summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/8021q/vlan_dev.c62
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/svc.c10
-rw-r--r--net/batman-adv/debugfs.c13
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/network-coding.c3
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/sysfs.c14
-rw-r--r--net/bluetooth/6lowpan.c65
-rw-r--r--net/bluetooth/hci_conn.c84
-rw-r--r--net/bluetooth/hci_core.c84
-rw-r--r--net/bluetooth/hci_event.c311
-rw-r--r--net/bluetooth/hci_sock.c17
-rw-r--r--net/bluetooth/l2cap_core.c6
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/lib.c1
-rw-r--r--net/bluetooth/mgmt.c278
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c20
-rw-r--r--net/bluetooth/smp.c160
-rw-r--r--net/bluetooth/smp.h30
-rw-r--r--net/bridge/Makefile4
-rw-r--r--net/bridge/br.c98
-rw-r--r--net/bridge/br_device.c16
-rw-r--r--net/bridge/br_fdb.c134
-rw-r--r--net/bridge/br_if.c126
-rw-r--r--net/bridge/br_input.c8
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c378
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/bridge/br_notify.c118
-rw-r--r--net/bridge/br_private.h75
-rw-r--r--net/bridge/br_sysfs_br.c26
-rw-r--r--net/bridge/br_sysfs_if.c30
-rw-r--r--net/bridge/br_vlan.c154
-rw-r--r--net/bridge/netfilter/Kconfig17
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c139
-rw-r--r--net/can/af_can.c31
-rw-r--r--net/can/af_can.h9
-rw-r--r--net/can/proc.c76
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c14
-rw-r--r--net/core/dev.c93
-rw-r--r--net/core/dev_addr_lists.c85
-rw-r--r--net/core/ethtool.c215
-rw-r--r--net/core/filter.c1289
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/pktgen.c50
-rw-r--r--net/core/ptp_classifier.c4
-rw-r--r--net/core/rtnetlink.c38
-rw-r--r--net/core/secure_seq.c25
-rw-r--r--net/core/skbuff.c25
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/tso.c77
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/proto.c9
-rw-r--r--net/dccp/sysctl.c3
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/dns_resolver/dns_query.c4
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ieee802154/6lowpan_rtnl.c207
-rw-r--r--net/ieee802154/dgram.c104
-rw-r--r--net/ieee802154/header_ops.c52
-rw-r--r--net/ieee802154/ieee802154.h19
-rw-r--r--net/ieee802154/netlink.c20
-rw-r--r--net/ieee802154/nl-mac.c809
-rw-r--r--net/ieee802154/nl_policy.c16
-rw-r--r--net/ieee802154/reassembly.c48
-rw-r--r--net/ipv4/af_inet.c110
-rw-r--r--net/ipv4/datagram.c20
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/gre_demux.c27
-rw-r--r--net/ipv4/gre_offload.c16
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/igmp.c16
-rw-r--r--net/ipv4/inet_connection_sock.c11
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/inetpeer.c20
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c22
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/ip_tunnel_core.c10
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/iptable_nat.c14
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c12
-rw-r--r--net/ipv4/proc.c24
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c52
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c45
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_bic.c5
-rw-r--r--net/ipv4/tcp_cong.c24
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_fastopen.c219
-rw-r--r--net/ipv4/tcp_highspeed.c4
-rw-r--r--net/ipv4/tcp_htcp.c4
-rw-r--r--net/ipv4/tcp_hybla.c7
-rw-r--r--net/ipv4/tcp_illinois.c5
-rw-r--r--net/ipv4/tcp_input.c36
-rw-r--r--net/ipv4/tcp_ipv4.c303
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_metrics.c5
-rw-r--r--net/ipv4/tcp_minisocks.c31
-rw-r--r--net/ipv4/tcp_offload.c9
-rw-r--r--net/ipv4/tcp_output.c122
-rw-r--r--net/ipv4/tcp_scalable.c5
-rw-r--r--net/ipv4/tcp_vegas.c7
-rw-r--r--net/ipv4/tcp_veno.c9
-rw-r--r--net/ipv4/tcp_yeah.c5
-rw-r--r--net/ipv4/udp.c135
-rw-r--r--net/ipv4/udp_offload.c8
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/addrconf.c49
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c45
-rw-r--r--net/ipv6/icmp.c41
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_checksum.c63
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c64
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c12
-rw-r--r--net/ipv6/output_core.c26
-rw-r--r--net/ipv6/ping.c8
-rw-r--r--net/ipv6/proc.c14
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/sysctl_net_ipv6.c7
-rw-r--r--net/ipv6/tcp_ipv6.c86
-rw-r--r--net/ipv6/udp.c74
-rw-r--r--net/ipv6/udp_offload.c5
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_output.c6
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/ipx/ipx_route.c3
-rw-r--r--net/iucv/af_iucv.c32
-rw-r--r--net/key/af_key.c34
-rw-r--r--net/l2tp/l2tp_core.c118
-rw-r--r--net/l2tp/l2tp_core.h4
-rw-r--r--net/l2tp/l2tp_ip.c1
-rw-r--r--net/l2tp/l2tp_ip6.c11
-rw-r--r--net/l2tp/l2tp_netlink.c10
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/aes_ccm.c37
-rw-r--r--net/mac80211/cfg.c591
-rw-r--r--net/mac80211/chan.c614
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_netdev.c6
-rw-r--r--net/mac80211/debugfs_netdev.h2
-rw-r--r--net/mac80211/driver-ops.h178
-rw-r--r--net/mac80211/ht.c22
-rw-r--r--net/mac80211/ibss.c78
-rw-r--r--net/mac80211/ieee80211_i.h50
-rw-r--r--net/mac80211/iface.c45
-rw-r--r--net/mac80211/key.c7
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/mesh.c38
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_sync.c2
-rw-r--r--net/mac80211/michael.h1
-rw-r--r--net/mac80211/mlme.c50
-rw-r--r--net/mac80211/rc80211_minstrel.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c22
-rw-r--r--net/mac80211/rx.c19
-rw-r--r--net/mac80211/scan.c25
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/status.c25
-rw-r--r--net/mac80211/tdls.c325
-rw-r--r--net/mac80211/trace.h117
-rw-r--r--net/mac80211/tx.c189
-rw-r--r--net/mac80211/util.c191
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/mac802154/Kconfig4
-rw-r--r--net/mac802154/Makefile3
-rw-r--r--net/mac802154/llsec.c1070
-rw-r--r--net/mac802154/llsec.h108
-rw-r--r--net/mac802154/mac802154.h44
-rw-r--r--net/mac802154/mac_cmd.c42
-rw-r--r--net/mac802154/mib.c187
-rw-r--r--net/mac802154/monitor.c3
-rw-r--r--net/mac802154/rx.c13
-rw-r--r--net/mac802154/wpan.c176
-rw-r--r--net/mpls/mpls_gso.c1
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c22
-rw-r--r--net/netfilter/nf_nat_core.c24
-rw-r--r--net/netfilter/nf_tables_api.c1269
-rw-r--r--net/netfilter/nfnetlink.c10
-rw-r--r--net/netfilter/nfnetlink_acct.c86
-rw-r--r--net/netfilter/nft_ct.c96
-rw-r--r--net/netfilter/nft_hash.c59
-rw-r--r--net/netfilter/nft_lookup.c10
-rw-r--r--net/netfilter/nft_meta.c103
-rw-r--r--net/netfilter/nft_rbtree.c43
-rw-r--r--net/netfilter/xt_bpf.c5
-rw-r--r--net/netfilter/xt_nfacct.c5
-rw-r--r--net/netfilter/xt_recent.c5
-rw-r--r--net/netlink/af_netlink.c70
-rw-r--r--net/netlink/af_netlink.h6
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/nfc/digital.h1
-rw-r--r--net/nfc/digital_core.c26
-rw-r--r--net/nfc/digital_dep.c5
-rw-r--r--net/nfc/digital_technology.c230
-rw-r--r--net/nfc/hci/command.c6
-rw-r--r--net/nfc/hci/core.c47
-rw-r--r--net/nfc/llcp_commands.c2
-rw-r--r--net/nfc/llcp_core.c13
-rw-r--r--net/nfc/nci/core.c9
-rw-r--r--net/nfc/nci/ntf.c7
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/nfc/rawsock.c94
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/datapath.c778
-rw-r--r--net/openvswitch/datapath.h8
-rw-r--r--net/openvswitch/flow.c188
-rw-r--r--net/openvswitch/flow.h53
-rw-r--r--net/openvswitch/flow_netlink.c186
-rw-r--r--net/openvswitch/flow_netlink.h1
-rw-r--r--net/openvswitch/flow_table.c121
-rw-r--r--net/openvswitch/flow_table.h4
-rw-r--r--net/openvswitch/vport-gre.c4
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport-vxlan.c7
-rw-r--r--net/openvswitch/vport.h6
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/iw_send.c4
-rw-r--r--net/rds/iw_sysctl.c3
-rw-r--r--net/rds/rdma_transport.c2
-rw-r--r--net/rds/sysctl.c3
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rfkill/rfkill-gpio.c59
-rw-r--r--net/sched/cls_api.c26
-rw-r--r--net/sched/cls_basic.c10
-rw-r--r--net/sched/cls_bpf.c14
-rw-r--r--net/sched/cls_cgroup.c4
-rw-r--r--net/sched/cls_flow.c4
-rw-r--r--net/sched/cls_fw.c10
-rw-r--r--net/sched/cls_route.c11
-rw-r--r--net/sched/cls_rsvp.h4
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_choke.c7
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_fq.c5
-rw-r--r--net/sched/sch_fq_codel.c7
-rw-r--r--net/sched/sch_hhf.c9
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/sctp/associola.c168
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/socket.c13
-rw-r--r--net/sctp/sysctl.c21
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/sunrpc/socklib.c3
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/bcast.c194
-rw-r--r--net/tipc/bcast.h9
-rw-r--r--net/tipc/bearer.c153
-rw-r--r--net/tipc/bearer.h47
-rw-r--r--net/tipc/config.c12
-rw-r--r--net/tipc/core.c14
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c281
-rw-r--r--net/tipc/discover.h1
-rw-r--r--net/tipc/eth_media.c51
-rw-r--r--net/tipc/handler.c134
-rw-r--r--net/tipc/ib_media.c34
-rw-r--r--net/tipc/link.c216
-rw-r--r--net/tipc/link.h21
-rw-r--r--net/tipc/msg.c55
-rw-r--r--net/tipc/msg.h5
-rw-r--r--net/tipc/name_distr.c78
-rw-r--r--net/tipc/name_distr.h35
-rw-r--r--net/tipc/name_table.c14
-rw-r--r--net/tipc/net.c71
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/node.c110
-rw-r--r--net/tipc/node.h88
-rw-r--r--net/tipc/node_subscr.c9
-rw-r--r--net/tipc/node_subscr.h2
-rw-r--r--net/tipc/port.c39
-rw-r--r--net/tipc/port.h10
-rw-r--r--net/tipc/socket.c121
-rw-r--r--net/tipc/socket.h4
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/wireless/Kconfig37
-rw-r--r--net/wireless/ap.c4
-rw-r--r--net/wireless/chan.c175
-rw-r--r--net/wireless/core.c147
-rw-r--r--net/wireless/core.h53
-rw-r--r--net/wireless/ethtool.c10
-rw-r--r--net/wireless/genregdb.awk14
-rw-r--r--net/wireless/ibss.c43
-rw-r--r--net/wireless/mesh.c32
-rw-r--r--net/wireless/mlme.c38
-rw-r--r--net/wireless/nl80211.c654
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/reg.c156
-rw-r--r--net/wireless/reg.h18
-rw-r--r--net/wireless/scan.c162
-rw-r--r--net/wireless/sme.c48
-rw-r--r--net/wireless/trace.h66
-rw-r--r--net/wireless/util.c209
-rw-r--r--net/wireless/wext-compat.c40
-rw-r--r--net/wireless/wext-compat.h2
-rw-r--r--net/wireless/wext-sme.c12
-rw-r--r--net/xfrm/xfrm_output.c5
-rw-r--r--net/xfrm/xfrm_policy.c56
-rw-r--r--net/xfrm/xfrm_proc.c3
-rw-r--r--net/xfrm/xfrm_state.c37
-rw-r--r--net/xfrm/xfrm_user.c53
345 files changed, 13684 insertions, 6769 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 3c32bd257b73..9012b1c922b6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -63,7 +63,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
63} 63}
64 64
65/* Must be invoked with rcu_read_lock. */ 65/* Must be invoked with rcu_read_lock. */
66struct net_device *__vlan_find_dev_deep(struct net_device *dev, 66struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
67 __be16 vlan_proto, u16 vlan_id) 67 __be16 vlan_proto, u16 vlan_id)
68{ 68{
69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); 69 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
@@ -81,13 +81,13 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
81 81
82 upper_dev = netdev_master_upper_dev_get_rcu(dev); 82 upper_dev = netdev_master_upper_dev_get_rcu(dev);
83 if (upper_dev) 83 if (upper_dev)
84 return __vlan_find_dev_deep(upper_dev, 84 return __vlan_find_dev_deep_rcu(upper_dev,
85 vlan_proto, vlan_id); 85 vlan_proto, vlan_id);
86 } 86 }
87 87
88 return NULL; 88 return NULL;
89} 89}
90EXPORT_SYMBOL(__vlan_find_dev_deep); 90EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
91 91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{ 93{
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 019efb79708f..ad2ac3c00398 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -643,9 +643,9 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
643 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 643 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
644 netdev_features_t old_features = features; 644 netdev_features_t old_features = features;
645 645
646 features &= real_dev->vlan_features; 646 features = netdev_intersect_features(features, real_dev->vlan_features);
647 features |= NETIF_F_RXCSUM; 647 features |= NETIF_F_RXCSUM;
648 features &= real_dev->features; 648 features = netdev_intersect_features(features, real_dev->features);
649 649
650 features |= old_features & NETIF_F_SOFT_FEATURES; 650 features |= old_features & NETIF_F_SOFT_FEATURES;
651 features |= NETIF_F_LLTX; 651 features |= NETIF_F_LLTX;
@@ -671,38 +671,36 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
671 671
672static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 672static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
673{ 673{
674 struct vlan_pcpu_stats *p;
675 u32 rx_errors = 0, tx_dropped = 0;
676 int i;
674 677
675 if (vlan_dev_priv(dev)->vlan_pcpu_stats) { 678 for_each_possible_cpu(i) {
676 struct vlan_pcpu_stats *p; 679 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
677 u32 rx_errors = 0, tx_dropped = 0; 680 unsigned int start;
678 int i; 681
679 682 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
680 for_each_possible_cpu(i) { 683 do {
681 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; 684 start = u64_stats_fetch_begin_irq(&p->syncp);
682 unsigned int start; 685 rxpackets = p->rx_packets;
683 686 rxbytes = p->rx_bytes;
684 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); 687 rxmulticast = p->rx_multicast;
685 do { 688 txpackets = p->tx_packets;
686 start = u64_stats_fetch_begin_irq(&p->syncp); 689 txbytes = p->tx_bytes;
687 rxpackets = p->rx_packets; 690 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
688 rxbytes = p->rx_bytes; 691
689 rxmulticast = p->rx_multicast; 692 stats->rx_packets += rxpackets;
690 txpackets = p->tx_packets; 693 stats->rx_bytes += rxbytes;
691 txbytes = p->tx_bytes; 694 stats->multicast += rxmulticast;
692 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 695 stats->tx_packets += txpackets;
693 696 stats->tx_bytes += txbytes;
694 stats->rx_packets += rxpackets; 697 /* rx_errors & tx_dropped are u32 */
695 stats->rx_bytes += rxbytes; 698 rx_errors += p->rx_errors;
696 stats->multicast += rxmulticast; 699 tx_dropped += p->tx_dropped;
697 stats->tx_packets += txpackets;
698 stats->tx_bytes += txbytes;
699 /* rx_errors & tx_dropped are u32 */
700 rx_errors += p->rx_errors;
701 tx_dropped += p->tx_dropped;
702 }
703 stats->rx_errors = rx_errors;
704 stats->tx_dropped = tx_dropped;
705 } 700 }
701 stats->rx_errors = rx_errors;
702 stats->tx_dropped = tx_dropped;
703
706 return stats; 704 return stats;
707} 705}
708 706
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 786ee2f83d5f..01a1082e02b3 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1669,7 +1669,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1669 goto out; 1669 goto out;
1670 } 1670 }
1671 1671
1672 if (sk->sk_no_check == 1) 1672 if (sk->sk_no_check_tx)
1673 ddp->deh_sum = 0; 1673 ddp->deh_sum = 0;
1674 else 1674 else
1675 ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp)); 1675 ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 1281049c135f..d8e5d0c2ebbc 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -263,17 +263,11 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
263 goto out; 263 goto out;
264 } 264 }
265 } 265 }
266/* 266
267 * Not supported yet
268 *
269 * #ifndef CONFIG_SINGLE_SIGITF
270 */
271 vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp); 267 vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
272 vcc->qos.txtp.pcr = 0; 268 vcc->qos.txtp.pcr = 0;
273 vcc->qos.txtp.min_pcr = 0; 269 vcc->qos.txtp.min_pcr = 0;
274/* 270
275 * #endif
276 */
277 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci); 271 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
278 if (!error) 272 if (!error)
279 sock->state = SS_CONNECTED; 273 sock->state = SS_CONNECTED;
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index b758881be108..a12e25efaf6f 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -245,6 +245,7 @@ static int batadv_algorithms_open(struct inode *inode, struct file *file)
245static int batadv_originators_open(struct inode *inode, struct file *file) 245static int batadv_originators_open(struct inode *inode, struct file *file)
246{ 246{
247 struct net_device *net_dev = (struct net_device *)inode->i_private; 247 struct net_device *net_dev = (struct net_device *)inode->i_private;
248
248 return single_open(file, batadv_orig_seq_print_text, net_dev); 249 return single_open(file, batadv_orig_seq_print_text, net_dev);
249} 250}
250 251
@@ -258,18 +259,21 @@ static int batadv_originators_hardif_open(struct inode *inode,
258 struct file *file) 259 struct file *file)
259{ 260{
260 struct net_device *net_dev = (struct net_device *)inode->i_private; 261 struct net_device *net_dev = (struct net_device *)inode->i_private;
262
261 return single_open(file, batadv_orig_hardif_seq_print_text, net_dev); 263 return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
262} 264}
263 265
264static int batadv_gateways_open(struct inode *inode, struct file *file) 266static int batadv_gateways_open(struct inode *inode, struct file *file)
265{ 267{
266 struct net_device *net_dev = (struct net_device *)inode->i_private; 268 struct net_device *net_dev = (struct net_device *)inode->i_private;
269
267 return single_open(file, batadv_gw_client_seq_print_text, net_dev); 270 return single_open(file, batadv_gw_client_seq_print_text, net_dev);
268} 271}
269 272
270static int batadv_transtable_global_open(struct inode *inode, struct file *file) 273static int batadv_transtable_global_open(struct inode *inode, struct file *file)
271{ 274{
272 struct net_device *net_dev = (struct net_device *)inode->i_private; 275 struct net_device *net_dev = (struct net_device *)inode->i_private;
276
273 return single_open(file, batadv_tt_global_seq_print_text, net_dev); 277 return single_open(file, batadv_tt_global_seq_print_text, net_dev);
274} 278}
275 279
@@ -277,6 +281,7 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file)
277static int batadv_bla_claim_table_open(struct inode *inode, struct file *file) 281static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
278{ 282{
279 struct net_device *net_dev = (struct net_device *)inode->i_private; 283 struct net_device *net_dev = (struct net_device *)inode->i_private;
284
280 return single_open(file, batadv_bla_claim_table_seq_print_text, 285 return single_open(file, batadv_bla_claim_table_seq_print_text,
281 net_dev); 286 net_dev);
282} 287}
@@ -285,6 +290,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
285 struct file *file) 290 struct file *file)
286{ 291{
287 struct net_device *net_dev = (struct net_device *)inode->i_private; 292 struct net_device *net_dev = (struct net_device *)inode->i_private;
293
288 return single_open(file, batadv_bla_backbone_table_seq_print_text, 294 return single_open(file, batadv_bla_backbone_table_seq_print_text,
289 net_dev); 295 net_dev);
290} 296}
@@ -300,6 +306,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
300static int batadv_dat_cache_open(struct inode *inode, struct file *file) 306static int batadv_dat_cache_open(struct inode *inode, struct file *file)
301{ 307{
302 struct net_device *net_dev = (struct net_device *)inode->i_private; 308 struct net_device *net_dev = (struct net_device *)inode->i_private;
309
303 return single_open(file, batadv_dat_cache_seq_print_text, net_dev); 310 return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
304} 311}
305#endif 312#endif
@@ -307,6 +314,7 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file)
307static int batadv_transtable_local_open(struct inode *inode, struct file *file) 314static int batadv_transtable_local_open(struct inode *inode, struct file *file)
308{ 315{
309 struct net_device *net_dev = (struct net_device *)inode->i_private; 316 struct net_device *net_dev = (struct net_device *)inode->i_private;
317
310 return single_open(file, batadv_tt_local_seq_print_text, net_dev); 318 return single_open(file, batadv_tt_local_seq_print_text, net_dev);
311} 319}
312 320
@@ -319,6 +327,7 @@ struct batadv_debuginfo {
319static int batadv_nc_nodes_open(struct inode *inode, struct file *file) 327static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
320{ 328{
321 struct net_device *net_dev = (struct net_device *)inode->i_private; 329 struct net_device *net_dev = (struct net_device *)inode->i_private;
330
322 return single_open(file, batadv_nc_nodes_seq_print_text, net_dev); 331 return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
323} 332}
324#endif 333#endif
@@ -333,7 +342,7 @@ struct batadv_debuginfo batadv_debuginfo_##_name = { \
333 .llseek = seq_lseek, \ 342 .llseek = seq_lseek, \
334 .release = single_release, \ 343 .release = single_release, \
335 } \ 344 } \
336}; 345}
337 346
338/* the following attributes are general and therefore they will be directly 347/* the following attributes are general and therefore they will be directly
339 * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs 348 * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
@@ -395,7 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
395 .llseek = seq_lseek, \ 404 .llseek = seq_lseek, \
396 .release = single_release, \ 405 .release = single_release, \
397 }, \ 406 }, \
398}; 407}
399static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO, 408static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
400 batadv_originators_hardif_open); 409 batadv_originators_hardif_open);
401 410
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index aa5d4946d0d7..f2c066b21716 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -594,7 +594,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
594 if (!neigh_node) 594 if (!neigh_node)
595 goto free_orig; 595 goto free_orig;
596 596
597 tmp_skb = pskb_copy(skb, GFP_ATOMIC); 597 tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
598 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb, 598 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
599 cand[i].orig_node, 599 cand[i].orig_node,
600 packet_subtype)) { 600 packet_subtype)) {
@@ -662,6 +662,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
662void batadv_dat_status_update(struct net_device *net_dev) 662void batadv_dat_status_update(struct net_device *net_dev)
663{ 663{
664 struct batadv_priv *bat_priv = netdev_priv(net_dev); 664 struct batadv_priv *bat_priv = netdev_priv(net_dev);
665
665 batadv_dat_tvlv_container_update(bat_priv); 666 batadv_dat_tvlv_container_update(bat_priv);
666} 667}
667 668
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 770dc890ceef..118b990bae25 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
24#define BATADV_DRIVER_DEVICE "batman-adv" 24#define BATADV_DRIVER_DEVICE "batman-adv"
25 25
26#ifndef BATADV_SOURCE_VERSION 26#ifndef BATADV_SOURCE_VERSION
27#define BATADV_SOURCE_VERSION "2014.2.0" 27#define BATADV_SOURCE_VERSION "2014.3.0"
28#endif 28#endif
29 29
30/* B.A.T.M.A.N. parameters */ 30/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index a9546fe541eb..8d04d174669e 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -86,6 +86,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
86void batadv_nc_status_update(struct net_device *net_dev) 86void batadv_nc_status_update(struct net_device *net_dev)
87{ 87{
88 struct batadv_priv *bat_priv = netdev_priv(net_dev); 88 struct batadv_priv *bat_priv = netdev_priv(net_dev);
89
89 batadv_nc_tvlv_container_update(bat_priv); 90 batadv_nc_tvlv_container_update(bat_priv);
90} 91}
91 92
@@ -1343,7 +1344,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
1343 struct ethhdr *ethhdr; 1344 struct ethhdr *ethhdr;
1344 1345
1345 /* Copy skb header to change the mac header */ 1346 /* Copy skb header to change the mac header */
1346 skb = pskb_copy(skb, GFP_ATOMIC); 1347 skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
1347 if (!skb) 1348 if (!skb)
1348 return; 1349 return;
1349 1350
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 744a59b85e15..e7ee65dc20bf 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -884,7 +884,7 @@ static void batadv_softif_init_early(struct net_device *dev)
884 /* generate random address */ 884 /* generate random address */
885 eth_hw_addr_random(dev); 885 eth_hw_addr_random(dev);
886 886
887 SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops); 887 dev->ethtool_ops = &batadv_ethtool_ops;
888 888
889 memset(priv, 0, sizeof(*priv)); 889 memset(priv, 0, sizeof(*priv));
890} 890}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 1ebb0d9e2ea5..fc47baa888c5 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -29,12 +29,14 @@
29static struct net_device *batadv_kobj_to_netdev(struct kobject *obj) 29static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
30{ 30{
31 struct device *dev = container_of(obj->parent, struct device, kobj); 31 struct device *dev = container_of(obj->parent, struct device, kobj);
32
32 return to_net_dev(dev); 33 return to_net_dev(dev);
33} 34}
34 35
35static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj) 36static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
36{ 37{
37 struct net_device *net_dev = batadv_kobj_to_netdev(obj); 38 struct net_device *net_dev = batadv_kobj_to_netdev(obj);
39
38 return netdev_priv(net_dev); 40 return netdev_priv(net_dev);
39} 41}
40 42
@@ -106,7 +108,7 @@ struct batadv_attribute batadv_attr_vlan_##_name = { \
106 .mode = _mode }, \ 108 .mode = _mode }, \
107 .show = _show, \ 109 .show = _show, \
108 .store = _store, \ 110 .store = _store, \
109}; 111}
110 112
111/* Use this, if you have customized show and store functions */ 113/* Use this, if you have customized show and store functions */
112#define BATADV_ATTR(_name, _mode, _show, _store) \ 114#define BATADV_ATTR(_name, _mode, _show, _store) \
@@ -115,7 +117,7 @@ struct batadv_attribute batadv_attr_##_name = { \
115 .mode = _mode }, \ 117 .mode = _mode }, \
116 .show = _show, \ 118 .show = _show, \
117 .store = _store, \ 119 .store = _store, \
118}; 120}
119 121
120#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \ 122#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
121ssize_t batadv_store_##_name(struct kobject *kobj, \ 123ssize_t batadv_store_##_name(struct kobject *kobj, \
@@ -124,6 +126,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
124{ \ 126{ \
125 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \ 127 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
126 struct batadv_priv *bat_priv = netdev_priv(net_dev); \ 128 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
129 \
127 return __batadv_store_bool_attr(buff, count, _post_func, attr, \ 130 return __batadv_store_bool_attr(buff, count, _post_func, attr, \
128 &bat_priv->_name, net_dev); \ 131 &bat_priv->_name, net_dev); \
129} 132}
@@ -133,6 +136,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
133 struct attribute *attr, char *buff) \ 136 struct attribute *attr, char *buff) \
134{ \ 137{ \
135 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \ 138 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
139 \
136 return sprintf(buff, "%s\n", \ 140 return sprintf(buff, "%s\n", \
137 atomic_read(&bat_priv->_name) == 0 ? \ 141 atomic_read(&bat_priv->_name) == 0 ? \
138 "disabled" : "enabled"); \ 142 "disabled" : "enabled"); \
@@ -155,6 +159,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
155{ \ 159{ \
156 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \ 160 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
157 struct batadv_priv *bat_priv = netdev_priv(net_dev); \ 161 struct batadv_priv *bat_priv = netdev_priv(net_dev); \
162 \
158 return __batadv_store_uint_attr(buff, count, _min, _max, \ 163 return __batadv_store_uint_attr(buff, count, _min, _max, \
159 _post_func, attr, \ 164 _post_func, attr, \
160 &bat_priv->_name, net_dev); \ 165 &bat_priv->_name, net_dev); \
@@ -165,6 +170,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
165 struct attribute *attr, char *buff) \ 170 struct attribute *attr, char *buff) \
166{ \ 171{ \
167 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \ 172 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
173 \
168 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ 174 return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
169} \ 175} \
170 176
@@ -188,6 +194,7 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj, \
188 size_t res = __batadv_store_bool_attr(buff, count, _post_func, \ 194 size_t res = __batadv_store_bool_attr(buff, count, _post_func, \
189 attr, &vlan->_name, \ 195 attr, &vlan->_name, \
190 bat_priv->soft_iface); \ 196 bat_priv->soft_iface); \
197 \
191 batadv_softif_vlan_free_ref(vlan); \ 198 batadv_softif_vlan_free_ref(vlan); \
192 return res; \ 199 return res; \
193} 200}
@@ -202,6 +209,7 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
202 size_t res = sprintf(buff, "%s\n", \ 209 size_t res = sprintf(buff, "%s\n", \
203 atomic_read(&vlan->_name) == 0 ? \ 210 atomic_read(&vlan->_name) == 0 ? \
204 "disabled" : "enabled"); \ 211 "disabled" : "enabled"); \
212 \
205 batadv_softif_vlan_free_ref(vlan); \ 213 batadv_softif_vlan_free_ref(vlan); \
206 return res; \ 214 return res; \
207} 215}
@@ -324,12 +332,14 @@ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
324 struct attribute *attr, char *buff) 332 struct attribute *attr, char *buff)
325{ 333{
326 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); 334 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
335
327 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name); 336 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
328} 337}
329 338
330static void batadv_post_gw_reselect(struct net_device *net_dev) 339static void batadv_post_gw_reselect(struct net_device *net_dev)
331{ 340{
332 struct batadv_priv *bat_priv = netdev_priv(net_dev); 341 struct batadv_priv *bat_priv = netdev_priv(net_dev);
342
333 batadv_gw_reselect(bat_priv); 343 batadv_gw_reselect(bat_priv);
334} 344}
335 345
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 73492b91105a..8796ffa08b43 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *conn,
420 return 0; 420 return 0;
421} 421}
422 422
423static void get_dest_bdaddr(struct in6_addr *ip6_daddr, 423static u8 get_addr_type_from_eui64(u8 byte)
424 bdaddr_t *addr, u8 *addr_type)
425{ 424{
426 u8 *eui64; 425 /* Is universal(0) or local(1) bit, */
426 if (byte & 0x02)
427 return ADDR_LE_DEV_RANDOM;
427 428
428 eui64 = ip6_daddr->s6_addr + 8; 429 return ADDR_LE_DEV_PUBLIC;
430}
431
432static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
433{
434 u8 *eui64 = ip6_daddr->s6_addr + 8;
429 435
430 addr->b[0] = eui64[7]; 436 addr->b[0] = eui64[7];
431 addr->b[1] = eui64[6]; 437 addr->b[1] = eui64[6];
@@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
433 addr->b[3] = eui64[2]; 439 addr->b[3] = eui64[2];
434 addr->b[4] = eui64[1]; 440 addr->b[4] = eui64[1];
435 addr->b[5] = eui64[0]; 441 addr->b[5] = eui64[0];
442}
436 443
437 addr->b[5] ^= 2; 444static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
445 bdaddr_t *addr, u8 *addr_type)
446{
447 copy_to_bdaddr(ip6_daddr, addr);
438 448
439 /* Set universal/local bit to 0 */ 449 /* We need to toggle the U/L bit that we got from IPv6 address
440 if (addr->b[5] & 1) { 450 * so that we get the proper address and type of the BD address.
441 addr->b[5] &= ~1; 451 */
442 *addr_type = ADDR_LE_DEV_PUBLIC; 452 addr->b[5] ^= 0x02;
443 } else { 453
444 *addr_type = ADDR_LE_DEV_RANDOM; 454 *addr_type = get_addr_type_from_eui64(addr->b[5]);
445 }
446} 455}
447 456
448static int header_create(struct sk_buff *skb, struct net_device *netdev, 457static int header_create(struct sk_buff *skb, struct net_device *netdev,
@@ -473,9 +482,11 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
473 /* Get destination BT device from skb. 482 /* Get destination BT device from skb.
474 * If there is no such peer then discard the packet. 483 * If there is no such peer then discard the packet.
475 */ 484 */
476 get_dest_bdaddr(&hdr->daddr, &addr, &addr_type); 485 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
477 486
478 BT_DBG("dest addr %pMR type %d", &addr, addr_type); 487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
488 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
489 &hdr->daddr);
479 490
480 read_lock_irqsave(&devices_lock, flags); 491 read_lock_irqsave(&devices_lock, flags);
481 peer = peer_lookup_ba(dev, &addr, addr_type); 492 peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
556 } else { 567 } else {
557 unsigned long flags; 568 unsigned long flags;
558 569
559 get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); 570 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
560 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8; 571 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
561 dev = lowpan_dev(netdev); 572 dev = lowpan_dev(netdev);
562 573
@@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
564 peer = peer_lookup_ba(dev, &addr, addr_type); 575 peer = peer_lookup_ba(dev, &addr, addr_type);
565 read_unlock_irqrestore(&devices_lock, flags); 576 read_unlock_irqrestore(&devices_lock, flags);
566 577
567 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name, 578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
568 &addr, &lowpan_cb(skb)->addr, peer); 579 netdev->name, &addr,
580 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb)->addr, peer);
569 582
570 if (peer && peer->conn) 583 if (peer && peer->conn)
571 err = send_pkt(peer->conn, netdev->dev_addr, 584 err = send_pkt(peer->conn, netdev->dev_addr,
@@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
620 eui[6] = addr[1]; 633 eui[6] = addr[1];
621 eui[7] = addr[0]; 634 eui[7] = addr[0];
622 635
623 eui[0] ^= 2; 636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
624
625 /* Universal/local bit set, RFC 4291 */
626 if (addr_type == ADDR_LE_DEV_PUBLIC) 637 if (addr_type == ADDR_LE_DEV_PUBLIC)
627 eui[0] |= 1; 638 eui[0] &= ~0x02;
628 else 639 else
629 eui[0] &= ~1; 640 eui[0] |= 0x02;
641
642 BT_DBG("type %d addr %*phC", addr_type, 8, eui);
630} 643}
631 644
632static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr, 645static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
@@ -634,7 +647,6 @@ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
634{ 647{
635 netdev->addr_assign_type = NET_ADDR_PERM; 648 netdev->addr_assign_type = NET_ADDR_PERM;
636 set_addr(netdev->dev_addr, addr->b, addr_type); 649 set_addr(netdev->dev_addr, addr->b, addr_type);
637 netdev->dev_addr[0] ^= 2;
638} 650}
639 651
640static void ifup(struct net_device *netdev) 652static void ifup(struct net_device *netdev)
@@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
684 696
685 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, 697 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
686 EUI64_ADDR_LEN); 698 EUI64_ADDR_LEN);
687 peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
688 * is done according RFC2464
689 */
690
691 raw_dump_inline(__func__, "peer IPv6 address",
692 (unsigned char *)&peer->peer_addr, 16);
693 raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
694 699
695 write_lock_irqsave(&devices_lock, flags); 700 write_lock_irqsave(&devices_lock, flags);
696 INIT_LIST_HEAD(&peer->list); 701 INIT_LIST_HEAD(&peer->list);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 521fd4f3985e..8671bc79a35b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -28,6 +28,7 @@
28 28
29#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
31 32
32#include "smp.h" 33#include "smp.h"
33#include "a2mp.h" 34#include "a2mp.h"
@@ -367,9 +368,23 @@ static void le_conn_timeout(struct work_struct *work)
367{ 368{
368 struct hci_conn *conn = container_of(work, struct hci_conn, 369 struct hci_conn *conn = container_of(work, struct hci_conn,
369 le_conn_timeout.work); 370 le_conn_timeout.work);
371 struct hci_dev *hdev = conn->hdev;
370 372
371 BT_DBG(""); 373 BT_DBG("");
372 374
375 /* We could end up here due to having done directed advertising,
376 * so clean up the state if necessary. This should however only
377 * happen with broken hardware or if low duty cycle was used
378 * (which doesn't have a timeout of its own).
379 */
380 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
381 u8 enable = 0x00;
382 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
383 &enable);
384 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
385 return;
386 }
387
373 hci_le_create_connection_cancel(conn); 388 hci_le_create_connection_cancel(conn);
374} 389}
375 390
@@ -393,6 +408,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
393 conn->io_capability = hdev->io_capability; 408 conn->io_capability = hdev->io_capability;
394 conn->remote_auth = 0xff; 409 conn->remote_auth = 0xff;
395 conn->key_type = 0xff; 410 conn->key_type = 0xff;
411 conn->tx_power = HCI_TX_POWER_INVALID;
412 conn->max_tx_power = HCI_TX_POWER_INVALID;
396 413
397 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 414 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
398 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 415 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -401,6 +418,10 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
401 case ACL_LINK: 418 case ACL_LINK:
402 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 419 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
403 break; 420 break;
421 case LE_LINK:
422 /* conn->src should reflect the local identity address */
423 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
424 break;
404 case SCO_LINK: 425 case SCO_LINK:
405 if (lmp_esco_capable(hdev)) 426 if (lmp_esco_capable(hdev))
406 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 427 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
@@ -545,6 +566,11 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
545 * favor of connection establishment, we should restart it. 566 * favor of connection establishment, we should restart it.
546 */ 567 */
547 hci_update_background_scan(hdev); 568 hci_update_background_scan(hdev);
569
570 /* Re-enable advertising in case this was a failed connection
571 * attempt as a peripheral.
572 */
573 mgmt_reenable_advertising(hdev);
548} 574}
549 575
550static void create_le_conn_complete(struct hci_dev *hdev, u8 status) 576static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
@@ -605,6 +631,45 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
605 conn->state = BT_CONNECT; 631 conn->state = BT_CONNECT;
606} 632}
607 633
634static void hci_req_directed_advertising(struct hci_request *req,
635 struct hci_conn *conn)
636{
637 struct hci_dev *hdev = req->hdev;
638 struct hci_cp_le_set_adv_param cp;
639 u8 own_addr_type;
640 u8 enable;
641
642 enable = 0x00;
643 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
644
645 /* Clear the HCI_ADVERTISING bit temporarily so that the
646 * hci_update_random_address knows that it's safe to go ahead
647 * and write a new random address. The flag will be set back on
648 * as soon as the SET_ADV_ENABLE HCI command completes.
649 */
650 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
651
652 /* Set require_privacy to false so that the remote device has a
653 * chance of identifying us.
654 */
655 if (hci_update_random_address(req, false, &own_addr_type) < 0)
656 return;
657
658 memset(&cp, 0, sizeof(cp));
659 cp.type = LE_ADV_DIRECT_IND;
660 cp.own_address_type = own_addr_type;
661 cp.direct_addr_type = conn->dst_type;
662 bacpy(&cp.direct_addr, &conn->dst);
663 cp.channel_map = hdev->le_adv_channel_map;
664
665 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
666
667 enable = 0x01;
668 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
669
670 conn->state = BT_CONNECT;
671}
672
608struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 673struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
609 u8 dst_type, u8 sec_level, u8 auth_type) 674 u8 dst_type, u8 sec_level, u8 auth_type)
610{ 675{
@@ -614,9 +679,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
614 struct hci_request req; 679 struct hci_request req;
615 int err; 680 int err;
616 681
617 if (test_bit(HCI_ADVERTISING, &hdev->flags))
618 return ERR_PTR(-ENOTSUPP);
619
620 /* Some devices send ATT messages as soon as the physical link is 682 /* Some devices send ATT messages as soon as the physical link is
621 * established. To be able to handle these ATT messages, the user- 683 * established. To be able to handle these ATT messages, the user-
622 * space first establishes the connection and then starts the pairing 684 * space first establishes the connection and then starts the pairing
@@ -664,13 +726,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
664 return ERR_PTR(-ENOMEM); 726 return ERR_PTR(-ENOMEM);
665 727
666 conn->dst_type = dst_type; 728 conn->dst_type = dst_type;
667
668 conn->out = true;
669 conn->link_mode |= HCI_LM_MASTER;
670 conn->sec_level = BT_SECURITY_LOW; 729 conn->sec_level = BT_SECURITY_LOW;
671 conn->pending_sec_level = sec_level; 730 conn->pending_sec_level = sec_level;
672 conn->auth_type = auth_type; 731 conn->auth_type = auth_type;
673 732
733 hci_req_init(&req, hdev);
734
735 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
736 hci_req_directed_advertising(&req, conn);
737 goto create_conn;
738 }
739
740 conn->out = true;
741 conn->link_mode |= HCI_LM_MASTER;
742
674 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 743 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
675 if (params) { 744 if (params) {
676 conn->le_conn_min_interval = params->conn_min_interval; 745 conn->le_conn_min_interval = params->conn_min_interval;
@@ -680,8 +749,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
680 conn->le_conn_max_interval = hdev->le_conn_max_interval; 749 conn->le_conn_max_interval = hdev->le_conn_max_interval;
681 } 750 }
682 751
683 hci_req_init(&req, hdev);
684
685 /* If controller is scanning, we stop it since some controllers are 752 /* If controller is scanning, we stop it since some controllers are
686 * not able to scan and connect at the same time. Also set the 753 * not able to scan and connect at the same time. Also set the
687 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete 754 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
@@ -695,6 +762,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
695 762
696 hci_req_add_le_create_conn(&req, conn); 763 hci_req_add_le_create_conn(&req, conn);
697 764
765create_conn:
698 err = hci_req_run(&req, create_le_conn_complete); 766 err = hci_req_run(&req, create_le_conn_complete);
699 if (err) { 767 if (err) {
700 hci_conn_del(conn); 768 hci_conn_del(conn);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 1c6ffaa8902f..0a43cce9a914 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -34,6 +34,7 @@
34 34
35#include <net/bluetooth/bluetooth.h> 35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h> 36#include <net/bluetooth/hci_core.h>
37#include <net/bluetooth/l2cap.h>
37 38
38#include "smp.h" 39#include "smp.h"
39 40
@@ -579,6 +580,62 @@ static int sniff_max_interval_get(void *data, u64 *val)
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, 580DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n"); 581 sniff_max_interval_set, "%llu\n");
581 582
583static int conn_info_min_age_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_info_min_age_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611static int conn_info_max_age_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_info_max_age_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
582static int identity_show(struct seq_file *f, void *p) 639static int identity_show(struct seq_file *f, void *p)
583{ 640{
584 struct hci_dev *hdev = f->private; 641 struct hci_dev *hdev = f->private;
@@ -955,14 +1012,9 @@ static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
955 if (count < 3) 1012 if (count < 3)
956 return -EINVAL; 1013 return -EINVAL;
957 1014
958 buf = kzalloc(count, GFP_KERNEL); 1015 buf = memdup_user(data, count);
959 if (!buf) 1016 if (IS_ERR(buf))
960 return -ENOMEM; 1017 return PTR_ERR(buf);
961
962 if (copy_from_user(buf, data, count)) {
963 err = -EFAULT;
964 goto done;
965 }
966 1018
967 if (memcmp(buf, "add", 3) == 0) { 1019 if (memcmp(buf, "add", 3) == 0) {
968 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", 1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
@@ -1759,6 +1811,11 @@ static int __hci_init(struct hci_dev *hdev)
1759 &blacklist_fops); 1811 &blacklist_fops);
1760 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1761 1813
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815 &conn_info_min_age_fops);
1816 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817 &conn_info_max_age_fops);
1818
1762 if (lmp_bredr_capable(hdev)) { 1819 if (lmp_bredr_capable(hdev)) {
1763 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 1820 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1764 hdev, &inquiry_cache_fops); 1821 hdev, &inquiry_cache_fops);
@@ -1828,6 +1885,9 @@ static int __hci_init(struct hci_dev *hdev)
1828 &lowpan_debugfs_fops); 1885 &lowpan_debugfs_fops);
1829 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, 1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1830 &le_auto_conn_fops); 1887 &le_auto_conn_fops);
1888 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout);
1831 } 1891 }
1832 1892
1833 return 0; 1893 return 0;
@@ -2033,12 +2093,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2033 2093
2034 hci_remove_remote_oob_data(hdev, &data->bdaddr); 2094 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2035 2095
2036 if (ssp) 2096 *ssp = data->ssp_mode;
2037 *ssp = data->ssp_mode;
2038 2097
2039 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2040 if (ie) { 2099 if (ie) {
2041 if (ie->data.ssp_mode && ssp) 2100 if (ie->data.ssp_mode)
2042 *ssp = true; 2101 *ssp = true;
2043 2102
2044 if (ie->name_state == NAME_NEEDED && 2103 if (ie->name_state == NAME_NEEDED &&
@@ -3791,6 +3850,9 @@ struct hci_dev *hci_alloc_dev(void)
3791 hdev->le_conn_max_interval = 0x0038; 3850 hdev->le_conn_max_interval = 0x0038;
3792 3851
3793 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3852 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3853 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3854 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3855 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3794 3856
3795 mutex_init(&hdev->lock); 3857 mutex_init(&hdev->lock);
3796 mutex_init(&hdev->req_lock); 3858 mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 15010a230b6d..1096e4cd1283 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -991,10 +991,25 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
991 if (!sent) 991 if (!sent)
992 return; 992 return;
993 993
994 if (status)
995 return;
996
994 hci_dev_lock(hdev); 997 hci_dev_lock(hdev);
995 998
996 if (!status) 999 /* If we're doing connection initation as peripheral. Set a
997 mgmt_advertising(hdev, *sent); 1000 * timeout in case something goes wrong.
1001 */
1002 if (*sent) {
1003 struct hci_conn *conn;
1004
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1006 if (conn)
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1010 }
1011
1012 mgmt_advertising(hdev, *sent);
998 1013
999 hci_dev_unlock(hdev); 1014 hci_dev_unlock(hdev);
1000} 1015}
@@ -1018,6 +1033,33 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1018 hci_dev_unlock(hdev); 1033 hci_dev_unlock(hdev);
1019} 1034}
1020 1035
1036static bool has_pending_adv_report(struct hci_dev *hdev)
1037{
1038 struct discovery_state *d = &hdev->discovery;
1039
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1041}
1042
1043static void clear_pending_adv_report(struct hci_dev *hdev)
1044{
1045 struct discovery_state *d = &hdev->discovery;
1046
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1049}
1050
1051static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1053{
1054 struct discovery_state *d = &hdev->discovery;
1055
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1061}
1062
1021static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1063static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1022 struct sk_buff *skb) 1064 struct sk_buff *skb)
1023{ 1065{
@@ -1036,9 +1078,25 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1036 switch (cp->enable) { 1078 switch (cp->enable) {
1037 case LE_SCAN_ENABLE: 1079 case LE_SCAN_ENABLE:
1038 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1039 break; 1083 break;
1040 1084
1041 case LE_SCAN_DISABLE: 1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1089 */
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1092
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL,
1095 d->last_adv_rssi, 0, 1,
1096 d->last_adv_data,
1097 d->last_adv_data_len, NULL, 0);
1098 }
1099
1042 /* Cancel this timer so that we don't try to disable scanning 1100 /* Cancel this timer so that we don't try to disable scanning
1043 * when it's already disabled. 1101 * when it's already disabled.
1044 */ 1102 */
@@ -1187,6 +1245,59 @@ static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1187 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1245 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1188} 1246}
1189 1247
1248static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1249{
1250 struct hci_rp_read_rssi *rp = (void *) skb->data;
1251 struct hci_conn *conn;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255 if (rp->status)
1256 return;
1257
1258 hci_dev_lock(hdev);
1259
1260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1261 if (conn)
1262 conn->rssi = rp->rssi;
1263
1264 hci_dev_unlock(hdev);
1265}
1266
1267static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1268{
1269 struct hci_cp_read_tx_power *sent;
1270 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1271 struct hci_conn *conn;
1272
1273 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1274
1275 if (rp->status)
1276 return;
1277
1278 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1279 if (!sent)
1280 return;
1281
1282 hci_dev_lock(hdev);
1283
1284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1285 if (!conn)
1286 goto unlock;
1287
1288 switch (sent->type) {
1289 case 0x00:
1290 conn->tx_power = rp->tx_power;
1291 break;
1292 case 0x01:
1293 conn->max_tx_power = rp->tx_power;
1294 break;
1295 }
1296
1297unlock:
1298 hci_dev_unlock(hdev);
1299}
1300
1190static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1301static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1191{ 1302{
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1342,6 +1453,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1342 * is requested. 1453 * is requested.
1343 */ 1454 */
1344 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1455 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1456 conn->pending_sec_level != BT_SECURITY_FIPS &&
1345 conn->pending_sec_level != BT_SECURITY_HIGH && 1457 conn->pending_sec_level != BT_SECURITY_HIGH &&
1346 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1458 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1347 return 0; 1459 return 0;
@@ -1827,7 +1939,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 1939 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1828 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1940 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1829 info->dev_class, 0, !name_known, ssp, NULL, 1941 info->dev_class, 0, !name_known, ssp, NULL,
1830 0); 1942 0, NULL, 0);
1831 } 1943 }
1832 1944
1833 hci_dev_unlock(hdev); 1945 hci_dev_unlock(hdev);
@@ -2579,6 +2691,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2579 hci_cc_write_remote_amp_assoc(hdev, skb); 2691 hci_cc_write_remote_amp_assoc(hdev, skb);
2580 break; 2692 break;
2581 2693
2694 case HCI_OP_READ_RSSI:
2695 hci_cc_read_rssi(hdev, skb);
2696 break;
2697
2698 case HCI_OP_READ_TX_POWER:
2699 hci_cc_read_tx_power(hdev, skb);
2700 break;
2701
2582 default: 2702 default:
2583 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2703 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2584 break; 2704 break;
@@ -2957,7 +3077,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2957 } 3077 }
2958 3078
2959 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 3079 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2960 conn->pending_sec_level == BT_SECURITY_HIGH) { 3080 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3081 conn->pending_sec_level == BT_SECURITY_FIPS)) {
2961 BT_DBG("%s ignoring key unauthenticated for high security", 3082 BT_DBG("%s ignoring key unauthenticated for high security",
2962 hdev->name); 3083 hdev->name);
2963 goto not_found; 3084 goto not_found;
@@ -3102,7 +3223,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3102 false, &ssp); 3223 false, &ssp);
3103 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3224 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3104 info->dev_class, info->rssi, 3225 info->dev_class, info->rssi,
3105 !name_known, ssp, NULL, 0); 3226 !name_known, ssp, NULL, 0, NULL, 0);
3106 } 3227 }
3107 } else { 3228 } else {
3108 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3229 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -3120,7 +3241,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3120 false, &ssp); 3241 false, &ssp);
3121 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3242 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3122 info->dev_class, info->rssi, 3243 info->dev_class, info->rssi,
3123 !name_known, ssp, NULL, 0); 3244 !name_known, ssp, NULL, 0, NULL, 0);
3124 } 3245 }
3125 } 3246 }
3126 3247
@@ -3309,7 +3430,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3309 eir_len = eir_get_length(info->data, sizeof(info->data)); 3430 eir_len = eir_get_length(info->data, sizeof(info->data));
3310 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3431 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3311 info->dev_class, info->rssi, !name_known, 3432 info->dev_class, info->rssi, !name_known,
3312 ssp, info->data, eir_len); 3433 ssp, info->data, eir_len, NULL, 0);
3313 } 3434 }
3314 3435
3315 hci_dev_unlock(hdev); 3436 hci_dev_unlock(hdev);
@@ -3367,24 +3488,20 @@ unlock:
3367 3488
3368static u8 hci_get_auth_req(struct hci_conn *conn) 3489static u8 hci_get_auth_req(struct hci_conn *conn)
3369{ 3490{
3370 /* If remote requests dedicated bonding follow that lead */
3371 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3372 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3373 /* If both remote and local IO capabilities allow MITM
3374 * protection then require it, otherwise don't */
3375 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3376 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3377 return HCI_AT_DEDICATED_BONDING;
3378 else
3379 return HCI_AT_DEDICATED_BONDING_MITM;
3380 }
3381
3382 /* If remote requests no-bonding follow that lead */ 3491 /* If remote requests no-bonding follow that lead */
3383 if (conn->remote_auth == HCI_AT_NO_BONDING || 3492 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3384 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 3493 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3385 return conn->remote_auth | (conn->auth_type & 0x01); 3494 return conn->remote_auth | (conn->auth_type & 0x01);
3386 3495
3387 return conn->auth_type; 3496 /* If both remote and local have enough IO capabilities, require
3497 * MITM protection
3498 */
3499 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3500 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3501 return conn->remote_auth | 0x01;
3502
3503 /* No MITM protection possible so ignore remote requirement */
3504 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3388} 3505}
3389 3506
3390static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3507static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3414,8 +3531,21 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3414 * to DisplayYesNo as it is not supported by BT spec. */ 3531 * to DisplayYesNo as it is not supported by BT spec. */
3415 cp.capability = (conn->io_capability == 0x04) ? 3532 cp.capability = (conn->io_capability == 0x04) ?
3416 HCI_IO_DISPLAY_YESNO : conn->io_capability; 3533 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3417 conn->auth_type = hci_get_auth_req(conn); 3534
3418 cp.authentication = conn->auth_type; 3535 /* If we are initiators, there is no remote information yet */
3536 if (conn->remote_auth == 0xff) {
3537 cp.authentication = conn->auth_type;
3538
3539 /* Request MITM protection if our IO caps allow it
3540 * except for the no-bonding case
3541 */
3542 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 cp.authentication != HCI_AT_NO_BONDING)
3544 cp.authentication |= 0x01;
3545 } else {
3546 conn->auth_type = hci_get_auth_req(conn);
3547 cp.authentication = conn->auth_type;
3548 }
3419 3549
3420 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3550 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3421 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3551 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
@@ -3483,12 +3613,9 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3483 rem_mitm = (conn->remote_auth & 0x01); 3613 rem_mitm = (conn->remote_auth & 0x01);
3484 3614
3485 /* If we require MITM but the remote device can't provide that 3615 /* If we require MITM but the remote device can't provide that
3486 * (it has NoInputNoOutput) then reject the confirmation 3616 * (it has NoInputNoOutput) then reject the confirmation request
3487 * request. The only exception is when we're dedicated bonding 3617 */
3488 * initiators (connect_cfm_cb set) since then we always have the MITM 3618 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3489 * bit set. */
3490 if (!conn->connect_cfm_cb && loc_mitm &&
3491 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3492 BT_DBG("Rejecting request: remote device can't provide MITM"); 3619 BT_DBG("Rejecting request: remote device can't provide MITM");
3493 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3620 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3494 sizeof(ev->bdaddr), &ev->bdaddr); 3621 sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3846,17 +3973,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3846 3973
3847 conn->dst_type = ev->bdaddr_type; 3974 conn->dst_type = ev->bdaddr_type;
3848 3975
3849 /* The advertising parameters for own address type
3850 * define which source address and source address
3851 * type this connections has.
3852 */
3853 if (bacmp(&conn->src, BDADDR_ANY)) {
3854 conn->src_type = ADDR_LE_DEV_PUBLIC;
3855 } else {
3856 bacpy(&conn->src, &hdev->static_addr);
3857 conn->src_type = ADDR_LE_DEV_RANDOM;
3858 }
3859
3860 if (ev->role == LE_CONN_ROLE_MASTER) { 3976 if (ev->role == LE_CONN_ROLE_MASTER) {
3861 conn->out = true; 3977 conn->out = true;
3862 conn->link_mode |= HCI_LM_MASTER; 3978 conn->link_mode |= HCI_LM_MASTER;
@@ -3881,27 +3997,24 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3881 &conn->init_addr, 3997 &conn->init_addr,
3882 &conn->init_addr_type); 3998 &conn->init_addr_type);
3883 } 3999 }
3884 } else {
3885 /* Set the responder (our side) address type based on
3886 * the advertising address type.
3887 */
3888 conn->resp_addr_type = hdev->adv_addr_type;
3889 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3890 bacpy(&conn->resp_addr, &hdev->random_addr);
3891 else
3892 bacpy(&conn->resp_addr, &hdev->bdaddr);
3893
3894 conn->init_addr_type = ev->bdaddr_type;
3895 bacpy(&conn->init_addr, &ev->bdaddr);
3896 } 4000 }
3897 } else { 4001 } else {
3898 cancel_delayed_work(&conn->le_conn_timeout); 4002 cancel_delayed_work(&conn->le_conn_timeout);
3899 } 4003 }
3900 4004
3901 /* Ensure that the hci_conn contains the identity address type 4005 if (!conn->out) {
3902 * regardless of which address the connection was made with. 4006 /* Set the responder (our side) address type based on
3903 */ 4007 * the advertising address type.
3904 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); 4008 */
4009 conn->resp_addr_type = hdev->adv_addr_type;
4010 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4011 bacpy(&conn->resp_addr, &hdev->random_addr);
4012 else
4013 bacpy(&conn->resp_addr, &hdev->bdaddr);
4014
4015 conn->init_addr_type = ev->bdaddr_type;
4016 bacpy(&conn->init_addr, &ev->bdaddr);
4017 }
3905 4018
3906 /* Lookup the identity address from the stored connection 4019 /* Lookup the identity address from the stored connection
3907 * address and address type. 4020 * address and address type.
@@ -3981,25 +4094,97 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
3981 } 4094 }
3982} 4095}
3983 4096
4097static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4098 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4099{
4100 struct discovery_state *d = &hdev->discovery;
4101 bool match;
4102
4103 /* Passive scanning shouldn't trigger any device found events */
4104 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4105 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4106 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4107 return;
4108 }
4109
4110 /* If there's nothing pending either store the data from this
4111 * event or send an immediate device found event if the data
4112 * should not be stored for later.
4113 */
4114 if (!has_pending_adv_report(hdev)) {
4115 /* If the report will trigger a SCAN_REQ store it for
4116 * later merging.
4117 */
4118 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4119 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4120 rssi, data, len);
4121 return;
4122 }
4123
4124 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4125 rssi, 0, 1, data, len, NULL, 0);
4126 return;
4127 }
4128
4129 /* Check if the pending report is for the same device as the new one */
4130 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4131 bdaddr_type == d->last_adv_addr_type);
4132
4133 /* If the pending data doesn't match this report or this isn't a
4134 * scan response (e.g. we got a duplicate ADV_IND) then force
4135 * sending of the pending data.
4136 */
4137 if (type != LE_ADV_SCAN_RSP || !match) {
4138 /* Send out whatever is in the cache, but skip duplicates */
4139 if (!match)
4140 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4141 d->last_adv_addr_type, NULL,
4142 d->last_adv_rssi, 0, 1,
4143 d->last_adv_data,
4144 d->last_adv_data_len, NULL, 0);
4145
4146 /* If the new report will trigger a SCAN_REQ store it for
4147 * later merging.
4148 */
4149 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4150 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4151 rssi, data, len);
4152 return;
4153 }
4154
4155 /* The advertising reports cannot be merged, so clear
4156 * the pending report and send out a device found event.
4157 */
4158 clear_pending_adv_report(hdev);
4159 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4160 rssi, 0, 1, data, len, NULL, 0);
4161 return;
4162 }
4163
4164 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4165 * the new event is a SCAN_RSP. We can therefore proceed with
4166 * sending a merged device found event.
4167 */
4168 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4169 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4170 d->last_adv_data, d->last_adv_data_len);
4171 clear_pending_adv_report(hdev);
4172}
4173
3984static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 4174static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3985{ 4175{
3986 u8 num_reports = skb->data[0]; 4176 u8 num_reports = skb->data[0];
3987 void *ptr = &skb->data[1]; 4177 void *ptr = &skb->data[1];
3988 s8 rssi;
3989 4178
3990 hci_dev_lock(hdev); 4179 hci_dev_lock(hdev);
3991 4180
3992 while (num_reports--) { 4181 while (num_reports--) {
3993 struct hci_ev_le_advertising_info *ev = ptr; 4182 struct hci_ev_le_advertising_info *ev = ptr;
3994 4183 s8 rssi;
3995 if (ev->evt_type == LE_ADV_IND ||
3996 ev->evt_type == LE_ADV_DIRECT_IND)
3997 check_pending_le_conn(hdev, &ev->bdaddr,
3998 ev->bdaddr_type);
3999 4184
4000 rssi = ev->data[ev->length]; 4185 rssi = ev->data[ev->length];
4001 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 4186 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4002 NULL, rssi, 0, 1, ev->data, ev->length); 4187 ev->bdaddr_type, rssi, ev->data, ev->length);
4003 4188
4004 ptr += sizeof(*ev) + ev->length + 1; 4189 ptr += sizeof(*ev) + ev->length + 1;
4005 } 4190 }
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b9a418e578e0..80d25c150a65 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -143,7 +143,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
143 143
144 if (!skb_copy) { 144 if (!skb_copy) {
145 /* Create a private copy with headroom */ 145 /* Create a private copy with headroom */
146 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); 146 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
147 if (!skb_copy) 147 if (!skb_copy)
148 continue; 148 continue;
149 149
@@ -247,8 +247,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
247 struct hci_mon_hdr *hdr; 247 struct hci_mon_hdr *hdr;
248 248
249 /* Create a private copy with headroom */ 249 /* Create a private copy with headroom */
250 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, 250 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
251 GFP_ATOMIC); 251 GFP_ATOMIC, true);
252 if (!skb_copy) 252 if (!skb_copy)
253 continue; 253 continue;
254 254
@@ -524,16 +524,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
524 case HCISETRAW: 524 case HCISETRAW:
525 if (!capable(CAP_NET_ADMIN)) 525 if (!capable(CAP_NET_ADMIN))
526 return -EPERM; 526 return -EPERM;
527 527 return -EOPNOTSUPP;
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
529 return -EPERM;
530
531 if (arg)
532 set_bit(HCI_RAW, &hdev->flags);
533 else
534 clear_bit(HCI_RAW, &hdev->flags);
535
536 return 0;
537 528
538 case HCIGETCONNINFO: 529 case HCIGETCONNINFO:
539 return hci_get_conn_info(hdev, (void __user *) arg); 530 return hci_get_conn_info(hdev, (void __user *) arg);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index dc4d301d3a72..6eabbe05fe54 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -471,8 +471,14 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; 472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->remote_max_tx = chan->max_tx;
475 chan->remote_tx_win = chan->tx_win;
474 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; 476 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->sec_level = BT_SECURITY_LOW; 477 chan->sec_level = BT_SECURITY_LOW;
478 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 chan->conf_state = 0;
476 482
477 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
478} 484}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ef5e5b04f34f..ade3fb4c23bc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1180,13 +1180,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
1180 /* Check for backlog size */ 1180 /* Check for backlog size */
1181 if (sk_acceptq_is_full(parent)) { 1181 if (sk_acceptq_is_full(parent)) {
1182 BT_DBG("backlog full %d", parent->sk_ack_backlog); 1182 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1183 release_sock(parent);
1183 return NULL; 1184 return NULL;
1184 } 1185 }
1185 1186
1186 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 1187 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
1187 GFP_ATOMIC); 1188 GFP_ATOMIC);
1188 if (!sk) 1189 if (!sk) {
1190 release_sock(parent);
1189 return NULL; 1191 return NULL;
1192 }
1190 1193
1191 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); 1194 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
1192 1195
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b3fbc73516c4..941ad7530eda 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -58,6 +58,7 @@ int bt_to_errno(__u16 code)
58 return EIO; 58 return EIO;
59 59
60 case 0x04: 60 case 0x04:
61 case 0x3c:
61 return EHOSTDOWN; 62 return EHOSTDOWN;
62 63
63 case 0x05: 64 case 0x05:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index d2d4e0d5aed0..0fce54412ffd 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -29,12 +29,13 @@
29 29
30#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/mgmt.h> 33#include <net/bluetooth/mgmt.h>
33 34
34#include "smp.h" 35#include "smp.h"
35 36
36#define MGMT_VERSION 1 37#define MGMT_VERSION 1
37#define MGMT_REVISION 5 38#define MGMT_REVISION 6
38 39
39static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -83,6 +84,7 @@ static const u16 mgmt_commands[] = {
83 MGMT_OP_SET_DEBUG_KEYS, 84 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY, 85 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS, 86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
86}; 88};
87 89
88static const u16 mgmt_events[] = { 90static const u16 mgmt_events[] = {
@@ -2850,10 +2852,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2850 } 2852 }
2851 2853
2852 sec_level = BT_SECURITY_MEDIUM; 2854 sec_level = BT_SECURITY_MEDIUM;
2853 if (cp->io_cap == 0x03) 2855 auth_type = HCI_AT_DEDICATED_BONDING;
2854 auth_type = HCI_AT_DEDICATED_BONDING;
2855 else
2856 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2857 2856
2858 if (cp->addr.type == BDADDR_BREDR) { 2857 if (cp->addr.type == BDADDR_BREDR) {
2859 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, 2858 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
@@ -3351,6 +3350,8 @@ static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3351 3350
3352static void start_discovery_complete(struct hci_dev *hdev, u8 status) 3351static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3353{ 3352{
3353 unsigned long timeout = 0;
3354
3354 BT_DBG("status %d", status); 3355 BT_DBG("status %d", status);
3355 3356
3356 if (status) { 3357 if (status) {
@@ -3366,13 +3367,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3366 3367
3367 switch (hdev->discovery.type) { 3368 switch (hdev->discovery.type) {
3368 case DISCOV_TYPE_LE: 3369 case DISCOV_TYPE_LE:
3369 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, 3370 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3370 DISCOV_LE_TIMEOUT);
3371 break; 3371 break;
3372 3372
3373 case DISCOV_TYPE_INTERLEAVED: 3373 case DISCOV_TYPE_INTERLEAVED:
3374 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, 3374 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3375 DISCOV_INTERLEAVED_TIMEOUT);
3376 break; 3375 break;
3377 3376
3378 case DISCOV_TYPE_BREDR: 3377 case DISCOV_TYPE_BREDR:
@@ -3381,6 +3380,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3381 default: 3380 default:
3382 BT_ERR("Invalid discovery type %d", hdev->discovery.type); 3381 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3383 } 3382 }
3383
3384 if (!timeout)
3385 return;
3386
3387 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3384} 3388}
3385 3389
3386static int start_discovery(struct sock *sk, struct hci_dev *hdev, 3390static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4530,7 +4534,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4530 4534
4531 for (i = 0; i < key_count; i++) { 4535 for (i = 0; i < key_count; i++) {
4532 struct mgmt_ltk_info *key = &cp->keys[i]; 4536 struct mgmt_ltk_info *key = &cp->keys[i];
4533 u8 type, addr_type; 4537 u8 type, addr_type, authenticated;
4534 4538
4535 if (key->addr.type == BDADDR_LE_PUBLIC) 4539 if (key->addr.type == BDADDR_LE_PUBLIC)
4536 addr_type = ADDR_LE_DEV_PUBLIC; 4540 addr_type = ADDR_LE_DEV_PUBLIC;
@@ -4542,8 +4546,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4542 else 4546 else
4543 type = HCI_SMP_LTK_SLAVE; 4547 type = HCI_SMP_LTK_SLAVE;
4544 4548
4549 switch (key->type) {
4550 case MGMT_LTK_UNAUTHENTICATED:
4551 authenticated = 0x00;
4552 break;
4553 case MGMT_LTK_AUTHENTICATED:
4554 authenticated = 0x01;
4555 break;
4556 default:
4557 continue;
4558 }
4559
4545 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type, 4560 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4546 key->type, key->val, key->enc_size, key->ediv, 4561 authenticated, key->val, key->enc_size, key->ediv,
4547 key->rand); 4562 key->rand);
4548 } 4563 }
4549 4564
@@ -4555,6 +4570,218 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4555 return err; 4570 return err;
4556} 4571}
4557 4572
4573struct cmd_conn_lookup {
4574 struct hci_conn *conn;
4575 bool valid_tx_power;
4576 u8 mgmt_status;
4577};
4578
4579static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4580{
4581 struct cmd_conn_lookup *match = data;
4582 struct mgmt_cp_get_conn_info *cp;
4583 struct mgmt_rp_get_conn_info rp;
4584 struct hci_conn *conn = cmd->user_data;
4585
4586 if (conn != match->conn)
4587 return;
4588
4589 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4590
4591 memset(&rp, 0, sizeof(rp));
4592 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4593 rp.addr.type = cp->addr.type;
4594
4595 if (!match->mgmt_status) {
4596 rp.rssi = conn->rssi;
4597
4598 if (match->valid_tx_power) {
4599 rp.tx_power = conn->tx_power;
4600 rp.max_tx_power = conn->max_tx_power;
4601 } else {
4602 rp.tx_power = HCI_TX_POWER_INVALID;
4603 rp.max_tx_power = HCI_TX_POWER_INVALID;
4604 }
4605 }
4606
4607 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4608 match->mgmt_status, &rp, sizeof(rp));
4609
4610 hci_conn_drop(conn);
4611
4612 mgmt_pending_remove(cmd);
4613}
4614
4615static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4616{
4617 struct hci_cp_read_rssi *cp;
4618 struct hci_conn *conn;
4619 struct cmd_conn_lookup match;
4620 u16 handle;
4621
4622 BT_DBG("status 0x%02x", status);
4623
4624 hci_dev_lock(hdev);
4625
4626 /* TX power data is valid in case request completed successfully,
4627 * otherwise we assume it's not valid. At the moment we assume that
4628 * either both or none of current and max values are valid to keep code
4629 * simple.
4630 */
4631 match.valid_tx_power = !status;
4632
4633 /* Commands sent in request are either Read RSSI or Read Transmit Power
4634 * Level so we check which one was last sent to retrieve connection
4635 * handle. Both commands have handle as first parameter so it's safe to
4636 * cast data on the same command struct.
4637 *
4638 * First command sent is always Read RSSI and we fail only if it fails.
4639 * In other case we simply override error to indicate success as we
4640 * already remembered if TX power value is actually valid.
4641 */
4642 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4643 if (!cp) {
4644 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4645 status = 0;
4646 }
4647
4648 if (!cp) {
4649 BT_ERR("invalid sent_cmd in response");
4650 goto unlock;
4651 }
4652
4653 handle = __le16_to_cpu(cp->handle);
4654 conn = hci_conn_hash_lookup_handle(hdev, handle);
4655 if (!conn) {
4656 BT_ERR("unknown handle (%d) in response", handle);
4657 goto unlock;
4658 }
4659
4660 match.conn = conn;
4661 match.mgmt_status = mgmt_status(status);
4662
4663 /* Cache refresh is complete, now reply for mgmt request for given
4664 * connection only.
4665 */
4666 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4667 get_conn_info_complete, &match);
4668
4669unlock:
4670 hci_dev_unlock(hdev);
4671}
4672
4673static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4674 u16 len)
4675{
4676 struct mgmt_cp_get_conn_info *cp = data;
4677 struct mgmt_rp_get_conn_info rp;
4678 struct hci_conn *conn;
4679 unsigned long conn_info_age;
4680 int err = 0;
4681
4682 BT_DBG("%s", hdev->name);
4683
4684 memset(&rp, 0, sizeof(rp));
4685 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4686 rp.addr.type = cp->addr.type;
4687
4688 if (!bdaddr_type_is_valid(cp->addr.type))
4689 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4690 MGMT_STATUS_INVALID_PARAMS,
4691 &rp, sizeof(rp));
4692
4693 hci_dev_lock(hdev);
4694
4695 if (!hdev_is_powered(hdev)) {
4696 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4697 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4698 goto unlock;
4699 }
4700
4701 if (cp->addr.type == BDADDR_BREDR)
4702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4703 &cp->addr.bdaddr);
4704 else
4705 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4706
4707 if (!conn || conn->state != BT_CONNECTED) {
4708 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4709 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4710 goto unlock;
4711 }
4712
4713 /* To avoid client trying to guess when to poll again for information we
4714 * calculate conn info age as random value between min/max set in hdev.
4715 */
4716 conn_info_age = hdev->conn_info_min_age +
4717 prandom_u32_max(hdev->conn_info_max_age -
4718 hdev->conn_info_min_age);
4719
4720 /* Query controller to refresh cached values if they are too old or were
4721 * never read.
4722 */
4723 if (time_after(jiffies, conn->conn_info_timestamp +
4724 msecs_to_jiffies(conn_info_age)) ||
4725 !conn->conn_info_timestamp) {
4726 struct hci_request req;
4727 struct hci_cp_read_tx_power req_txp_cp;
4728 struct hci_cp_read_rssi req_rssi_cp;
4729 struct pending_cmd *cmd;
4730
4731 hci_req_init(&req, hdev);
4732 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4733 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4734 &req_rssi_cp);
4735
4736 /* For LE links TX power does not change thus we don't need to
4737 * query for it once value is known.
4738 */
4739 if (!bdaddr_type_is_le(cp->addr.type) ||
4740 conn->tx_power == HCI_TX_POWER_INVALID) {
4741 req_txp_cp.handle = cpu_to_le16(conn->handle);
4742 req_txp_cp.type = 0x00;
4743 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4744 sizeof(req_txp_cp), &req_txp_cp);
4745 }
4746
4747 /* Max TX power needs to be read only once per connection */
4748 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4749 req_txp_cp.handle = cpu_to_le16(conn->handle);
4750 req_txp_cp.type = 0x01;
4751 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4752 sizeof(req_txp_cp), &req_txp_cp);
4753 }
4754
4755 err = hci_req_run(&req, conn_info_refresh_complete);
4756 if (err < 0)
4757 goto unlock;
4758
4759 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4760 data, len);
4761 if (!cmd) {
4762 err = -ENOMEM;
4763 goto unlock;
4764 }
4765
4766 hci_conn_hold(conn);
4767 cmd->user_data = conn;
4768
4769 conn->conn_info_timestamp = jiffies;
4770 } else {
4771 /* Cache is valid, just reply with values cached in hci_conn */
4772 rp.rssi = conn->rssi;
4773 rp.tx_power = conn->tx_power;
4774 rp.max_tx_power = conn->max_tx_power;
4775
4776 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4777 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4778 }
4779
4780unlock:
4781 hci_dev_unlock(hdev);
4782 return err;
4783}
4784
4558static const struct mgmt_handler { 4785static const struct mgmt_handler {
4559 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 4786 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4560 u16 data_len); 4787 u16 data_len);
@@ -4610,6 +4837,7 @@ static const struct mgmt_handler {
4610 { set_debug_keys, false, MGMT_SETTING_SIZE }, 4837 { set_debug_keys, false, MGMT_SETTING_SIZE },
4611 { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, 4838 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4612 { load_irks, true, MGMT_LOAD_IRKS_SIZE }, 4839 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4840 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4613}; 4841};
4614 4842
4615 4843
@@ -5005,6 +5233,14 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5005 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 5233 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5006} 5234}
5007 5235
5236static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5237{
5238 if (ltk->authenticated)
5239 return MGMT_LTK_AUTHENTICATED;
5240
5241 return MGMT_LTK_UNAUTHENTICATED;
5242}
5243
5008void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) 5244void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5009{ 5245{
5010 struct mgmt_ev_new_long_term_key ev; 5246 struct mgmt_ev_new_long_term_key ev;
@@ -5030,7 +5266,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5030 5266
5031 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 5267 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5032 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); 5268 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5033 ev.key.type = key->authenticated; 5269 ev.key.type = mgmt_ltk_type(key);
5034 ev.key.enc_size = key->enc_size; 5270 ev.key.enc_size = key->enc_size;
5035 ev.key.ediv = key->ediv; 5271 ev.key.ediv = key->ediv;
5036 ev.key.rand = key->rand; 5272 ev.key.rand = key->rand;
@@ -5668,8 +5904,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5668} 5904}
5669 5905
5670void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 5906void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5671 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8 5907 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5672 ssp, u8 *eir, u16 eir_len) 5908 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5909 u8 scan_rsp_len)
5673{ 5910{
5674 char buf[512]; 5911 char buf[512];
5675 struct mgmt_ev_device_found *ev = (void *) buf; 5912 struct mgmt_ev_device_found *ev = (void *) buf;
@@ -5679,8 +5916,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5679 if (!hci_discovery_active(hdev)) 5916 if (!hci_discovery_active(hdev))
5680 return; 5917 return;
5681 5918
5682 /* Leave 5 bytes for a potential CoD field */ 5919 /* Make sure that the buffer is big enough. The 5 extra bytes
5683 if (sizeof(*ev) + eir_len + 5 > sizeof(buf)) 5920 * are for the potential CoD field.
5921 */
5922 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5684 return; 5923 return;
5685 5924
5686 memset(buf, 0, sizeof(buf)); 5925 memset(buf, 0, sizeof(buf));
@@ -5707,8 +5946,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5707 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, 5946 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5708 dev_class, 3); 5947 dev_class, 3);
5709 5948
5710 ev->eir_len = cpu_to_le16(eir_len); 5949 if (scan_rsp_len > 0)
5711 ev_size = sizeof(*ev) + eir_len; 5950 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5951
5952 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5953 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5712 5954
5713 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); 5955 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5714} 5956}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index cf620260affa..754b6fe4f742 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -307,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
307 setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d); 307 setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
308 308
309 skb_queue_head_init(&d->tx_queue); 309 skb_queue_head_init(&d->tx_queue);
310 spin_lock_init(&d->lock); 310 mutex_init(&d->lock);
311 atomic_set(&d->refcnt, 1); 311 atomic_set(&d->refcnt, 1);
312 312
313 rfcomm_dlc_clear_state(d); 313 rfcomm_dlc_clear_state(d);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 403ec09f480a..8e385a0ae60e 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -70,7 +70,7 @@ struct rfcomm_dev {
70}; 70};
71 71
72static LIST_HEAD(rfcomm_dev_list); 72static LIST_HEAD(rfcomm_dev_list);
73static DEFINE_SPINLOCK(rfcomm_dev_lock); 73static DEFINE_MUTEX(rfcomm_dev_lock);
74 74
75static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); 75static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
76static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); 76static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
@@ -96,9 +96,9 @@ static void rfcomm_dev_destruct(struct tty_port *port)
96 if (dev->tty_dev) 96 if (dev->tty_dev)
97 tty_unregister_device(rfcomm_tty_driver, dev->id); 97 tty_unregister_device(rfcomm_tty_driver, dev->id);
98 98
99 spin_lock(&rfcomm_dev_lock); 99 mutex_lock(&rfcomm_dev_lock);
100 list_del(&dev->list); 100 list_del(&dev->list);
101 spin_unlock(&rfcomm_dev_lock); 101 mutex_unlock(&rfcomm_dev_lock);
102 102
103 kfree(dev); 103 kfree(dev);
104 104
@@ -161,14 +161,14 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)
161{ 161{
162 struct rfcomm_dev *dev; 162 struct rfcomm_dev *dev;
163 163
164 spin_lock(&rfcomm_dev_lock); 164 mutex_lock(&rfcomm_dev_lock);
165 165
166 dev = __rfcomm_dev_lookup(id); 166 dev = __rfcomm_dev_lookup(id);
167 167
168 if (dev && !tty_port_get(&dev->port)) 168 if (dev && !tty_port_get(&dev->port))
169 dev = NULL; 169 dev = NULL;
170 170
171 spin_unlock(&rfcomm_dev_lock); 171 mutex_unlock(&rfcomm_dev_lock);
172 172
173 return dev; 173 return dev;
174} 174}
@@ -224,7 +224,7 @@ static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
224 if (!dev) 224 if (!dev)
225 return ERR_PTR(-ENOMEM); 225 return ERR_PTR(-ENOMEM);
226 226
227 spin_lock(&rfcomm_dev_lock); 227 mutex_lock(&rfcomm_dev_lock);
228 228
229 if (req->dev_id < 0) { 229 if (req->dev_id < 0) {
230 dev->id = 0; 230 dev->id = 0;
@@ -305,11 +305,11 @@ static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
305 holds reference to this module. */ 305 holds reference to this module. */
306 __module_get(THIS_MODULE); 306 __module_get(THIS_MODULE);
307 307
308 spin_unlock(&rfcomm_dev_lock); 308 mutex_unlock(&rfcomm_dev_lock);
309 return dev; 309 return dev;
310 310
311out: 311out:
312 spin_unlock(&rfcomm_dev_lock); 312 mutex_unlock(&rfcomm_dev_lock);
313 kfree(dev); 313 kfree(dev);
314 return ERR_PTR(err); 314 return ERR_PTR(err);
315} 315}
@@ -524,7 +524,7 @@ static int rfcomm_get_dev_list(void __user *arg)
524 524
525 di = dl->dev_info; 525 di = dl->dev_info;
526 526
527 spin_lock(&rfcomm_dev_lock); 527 mutex_lock(&rfcomm_dev_lock);
528 528
529 list_for_each_entry(dev, &rfcomm_dev_list, list) { 529 list_for_each_entry(dev, &rfcomm_dev_list, list) {
530 if (!tty_port_get(&dev->port)) 530 if (!tty_port_get(&dev->port))
@@ -540,7 +540,7 @@ static int rfcomm_get_dev_list(void __user *arg)
540 break; 540 break;
541 } 541 }
542 542
543 spin_unlock(&rfcomm_dev_lock); 543 mutex_unlock(&rfcomm_dev_lock);
544 544
545 dl->dev_num = n; 545 dl->dev_num = n;
546 size = sizeof(*dl) + n * sizeof(*di); 546 size = sizeof(*dl) + n * sizeof(*di);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index dfb4e1161c10..3d1cc164557d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -35,6 +35,33 @@
35 35
36#define AUTH_REQ_MASK 0x07 36#define AUTH_REQ_MASK 0x07
37 37
38#define SMP_FLAG_TK_VALID 1
39#define SMP_FLAG_CFM_PENDING 2
40#define SMP_FLAG_MITM_AUTH 3
41#define SMP_FLAG_COMPLETE 4
42#define SMP_FLAG_INITIATOR 5
43
44struct smp_chan {
45 struct l2cap_conn *conn;
46 u8 preq[7]; /* SMP Pairing Request */
47 u8 prsp[7]; /* SMP Pairing Response */
48 u8 prnd[16]; /* SMP Pairing Random (local) */
49 u8 rrnd[16]; /* SMP Pairing Random (remote) */
50 u8 pcnf[16]; /* SMP Pairing Confirm */
51 u8 tk[16]; /* SMP Temporary Key */
52 u8 enc_key_size;
53 u8 remote_key_dist;
54 bdaddr_t id_addr;
55 u8 id_addr_type;
56 u8 irk[16];
57 struct smp_csrk *csrk;
58 struct smp_csrk *slave_csrk;
59 struct smp_ltk *ltk;
60 struct smp_ltk *slave_ltk;
61 struct smp_irk *remote_irk;
62 unsigned long flags;
63};
64
38static inline void swap128(const u8 src[16], u8 dst[16]) 65static inline void swap128(const u8 src[16], u8 dst[16])
39{ 66{
40 int i; 67 int i;
@@ -369,7 +396,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
369 396
370 /* Initialize key for JUST WORKS */ 397 /* Initialize key for JUST WORKS */
371 memset(smp->tk, 0, sizeof(smp->tk)); 398 memset(smp->tk, 0, sizeof(smp->tk));
372 clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 399 clear_bit(SMP_FLAG_TK_VALID, &smp->flags);
373 400
374 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
375 402
@@ -388,19 +415,18 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
388 method = JUST_WORKS; 415 method = JUST_WORKS;
389 416
390 /* Don't confirm locally initiated pairing attempts */ 417 /* Don't confirm locally initiated pairing attempts */
391 if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, 418 if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
392 &smp->smp_flags))
393 method = JUST_WORKS; 419 method = JUST_WORKS;
394 420
395 /* If Just Works, Continue with Zero TK */ 421 /* If Just Works, Continue with Zero TK */
396 if (method == JUST_WORKS) { 422 if (method == JUST_WORKS) {
397 set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 423 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
398 return 0; 424 return 0;
399 } 425 }
400 426
401 /* Not Just Works/Confirm results in MITM Authentication */ 427 /* Not Just Works/Confirm results in MITM Authentication */
402 if (method != JUST_CFM) 428 if (method != JUST_CFM)
403 set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags); 429 set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
404 430
405 /* If both devices have Keyoard-Display I/O, the master 431 /* If both devices have Keyoard-Display I/O, the master
406 * Confirms and the slave Enters the passkey. 432 * Confirms and the slave Enters the passkey.
@@ -419,7 +445,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
419 passkey %= 1000000; 445 passkey %= 1000000;
420 put_unaligned_le32(passkey, smp->tk); 446 put_unaligned_le32(passkey, smp->tk);
421 BT_DBG("PassKey: %d", passkey); 447 BT_DBG("PassKey: %d", passkey);
422 set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 448 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
423 } 449 }
424 450
425 hci_dev_lock(hcon->hdev); 451 hci_dev_lock(hcon->hdev);
@@ -441,15 +467,13 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
441 return ret; 467 return ret;
442} 468}
443 469
444static void confirm_work(struct work_struct *work) 470static u8 smp_confirm(struct smp_chan *smp)
445{ 471{
446 struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
447 struct l2cap_conn *conn = smp->conn; 472 struct l2cap_conn *conn = smp->conn;
448 struct hci_dev *hdev = conn->hcon->hdev; 473 struct hci_dev *hdev = conn->hcon->hdev;
449 struct crypto_blkcipher *tfm = hdev->tfm_aes; 474 struct crypto_blkcipher *tfm = hdev->tfm_aes;
450 struct smp_cmd_pairing_confirm cp; 475 struct smp_cmd_pairing_confirm cp;
451 int ret; 476 int ret;
452 u8 reason;
453 477
454 BT_DBG("conn %p", conn); 478 BT_DBG("conn %p", conn);
455 479
@@ -463,35 +487,27 @@ static void confirm_work(struct work_struct *work)
463 487
464 hci_dev_unlock(hdev); 488 hci_dev_unlock(hdev);
465 489
466 if (ret) { 490 if (ret)
467 reason = SMP_UNSPECIFIED; 491 return SMP_UNSPECIFIED;
468 goto error;
469 }
470 492
471 clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); 493 clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
472 494
473 smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); 495 smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
474 496
475 return; 497 return 0;
476
477error:
478 smp_failure(conn, reason);
479} 498}
480 499
481static void random_work(struct work_struct *work) 500static u8 smp_random(struct smp_chan *smp)
482{ 501{
483 struct smp_chan *smp = container_of(work, struct smp_chan, random);
484 struct l2cap_conn *conn = smp->conn; 502 struct l2cap_conn *conn = smp->conn;
485 struct hci_conn *hcon = conn->hcon; 503 struct hci_conn *hcon = conn->hcon;
486 struct hci_dev *hdev = hcon->hdev; 504 struct hci_dev *hdev = hcon->hdev;
487 struct crypto_blkcipher *tfm = hdev->tfm_aes; 505 struct crypto_blkcipher *tfm = hdev->tfm_aes;
488 u8 reason, confirm[16]; 506 u8 confirm[16];
489 int ret; 507 int ret;
490 508
491 if (IS_ERR_OR_NULL(tfm)) { 509 if (IS_ERR_OR_NULL(tfm))
492 reason = SMP_UNSPECIFIED; 510 return SMP_UNSPECIFIED;
493 goto error;
494 }
495 511
496 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 512 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
497 513
@@ -504,15 +520,12 @@ static void random_work(struct work_struct *work)
504 520
505 hci_dev_unlock(hdev); 521 hci_dev_unlock(hdev);
506 522
507 if (ret) { 523 if (ret)
508 reason = SMP_UNSPECIFIED; 524 return SMP_UNSPECIFIED;
509 goto error;
510 }
511 525
512 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) { 526 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
513 BT_ERR("Pairing failed (confirmation values mismatch)"); 527 BT_ERR("Pairing failed (confirmation values mismatch)");
514 reason = SMP_CONFIRM_FAILED; 528 return SMP_CONFIRM_FAILED;
515 goto error;
516 } 529 }
517 530
518 if (hcon->out) { 531 if (hcon->out) {
@@ -525,10 +538,8 @@ static void random_work(struct work_struct *work)
525 memset(stk + smp->enc_key_size, 0, 538 memset(stk + smp->enc_key_size, 0,
526 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 539 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
527 540
528 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) { 541 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
529 reason = SMP_UNSPECIFIED; 542 return SMP_UNSPECIFIED;
530 goto error;
531 }
532 543
533 hci_le_start_enc(hcon, ediv, rand, stk); 544 hci_le_start_enc(hcon, ediv, rand, stk);
534 hcon->enc_key_size = smp->enc_key_size; 545 hcon->enc_key_size = smp->enc_key_size;
@@ -550,10 +561,7 @@ static void random_work(struct work_struct *work)
550 ediv, rand); 561 ediv, rand);
551 } 562 }
552 563
553 return; 564 return 0;
554
555error:
556 smp_failure(conn, reason);
557} 565}
558 566
559static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) 567static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -564,9 +572,6 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
564 if (!smp) 572 if (!smp)
565 return NULL; 573 return NULL;
566 574
567 INIT_WORK(&smp->confirm, confirm_work);
568 INIT_WORK(&smp->random, random_work);
569
570 smp->conn = conn; 575 smp->conn = conn;
571 conn->smp_chan = smp; 576 conn->smp_chan = smp;
572 conn->hcon->smp_conn = conn; 577 conn->hcon->smp_conn = conn;
@@ -583,7 +588,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
583 588
584 BUG_ON(!smp); 589 BUG_ON(!smp);
585 590
586 complete = test_bit(SMP_FLAG_COMPLETE, &smp->smp_flags); 591 complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
587 mgmt_smp_complete(conn->hcon, complete); 592 mgmt_smp_complete(conn->hcon, complete);
588 593
589 kfree(smp->csrk); 594 kfree(smp->csrk);
@@ -634,7 +639,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
634 put_unaligned_le32(value, smp->tk); 639 put_unaligned_le32(value, smp->tk);
635 /* Fall Through */ 640 /* Fall Through */
636 case MGMT_OP_USER_CONFIRM_REPLY: 641 case MGMT_OP_USER_CONFIRM_REPLY:
637 set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags); 642 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
638 break; 643 break;
639 case MGMT_OP_USER_PASSKEY_NEG_REPLY: 644 case MGMT_OP_USER_PASSKEY_NEG_REPLY:
640 case MGMT_OP_USER_CONFIRM_NEG_REPLY: 645 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
@@ -646,8 +651,11 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
646 } 651 }
647 652
648 /* If it is our turn to send Pairing Confirm, do so now */ 653 /* If it is our turn to send Pairing Confirm, do so now */
649 if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags)) 654 if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) {
650 queue_work(hcon->hdev->workqueue, &smp->confirm); 655 u8 rsp = smp_confirm(smp);
656 if (rsp)
657 smp_failure(conn, rsp);
658 }
651 659
652 return 0; 660 return 0;
653} 661}
@@ -656,14 +664,13 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
656{ 664{
657 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 665 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
658 struct smp_chan *smp; 666 struct smp_chan *smp;
659 u8 key_size; 667 u8 key_size, auth;
660 u8 auth = SMP_AUTH_NONE;
661 int ret; 668 int ret;
662 669
663 BT_DBG("conn %p", conn); 670 BT_DBG("conn %p", conn);
664 671
665 if (skb->len < sizeof(*req)) 672 if (skb->len < sizeof(*req))
666 return SMP_UNSPECIFIED; 673 return SMP_INVALID_PARAMS;
667 674
668 if (conn->hcon->link_mode & HCI_LM_MASTER) 675 if (conn->hcon->link_mode & HCI_LM_MASTER)
669 return SMP_CMD_NOTSUPP; 676 return SMP_CMD_NOTSUPP;
@@ -681,8 +688,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
681 skb_pull(skb, sizeof(*req)); 688 skb_pull(skb, sizeof(*req));
682 689
683 /* We didn't start the pairing, so match remote */ 690 /* We didn't start the pairing, so match remote */
684 if (req->auth_req & SMP_AUTH_BONDING) 691 auth = req->auth_req;
685 auth = req->auth_req;
686 692
687 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 693 conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
688 694
@@ -704,7 +710,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
704 if (ret) 710 if (ret)
705 return SMP_UNSPECIFIED; 711 return SMP_UNSPECIFIED;
706 712
707 clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags); 713 clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
708 714
709 return 0; 715 return 0;
710} 716}
@@ -713,14 +719,13 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
713{ 719{
714 struct smp_cmd_pairing *req, *rsp = (void *) skb->data; 720 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
715 struct smp_chan *smp = conn->smp_chan; 721 struct smp_chan *smp = conn->smp_chan;
716 struct hci_dev *hdev = conn->hcon->hdev;
717 u8 key_size, auth = SMP_AUTH_NONE; 722 u8 key_size, auth = SMP_AUTH_NONE;
718 int ret; 723 int ret;
719 724
720 BT_DBG("conn %p", conn); 725 BT_DBG("conn %p", conn);
721 726
722 if (skb->len < sizeof(*rsp)) 727 if (skb->len < sizeof(*rsp))
723 return SMP_UNSPECIFIED; 728 return SMP_INVALID_PARAMS;
724 729
725 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 730 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
726 return SMP_CMD_NOTSUPP; 731 return SMP_CMD_NOTSUPP;
@@ -753,11 +758,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
753 if (ret) 758 if (ret)
754 return SMP_UNSPECIFIED; 759 return SMP_UNSPECIFIED;
755 760
756 set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); 761 set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
757 762
758 /* Can't compose response until we have been confirmed */ 763 /* Can't compose response until we have been confirmed */
759 if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) 764 if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
760 queue_work(hdev->workqueue, &smp->confirm); 765 return smp_confirm(smp);
761 766
762 return 0; 767 return 0;
763} 768}
@@ -765,12 +770,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
765static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) 770static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
766{ 771{
767 struct smp_chan *smp = conn->smp_chan; 772 struct smp_chan *smp = conn->smp_chan;
768 struct hci_dev *hdev = conn->hcon->hdev;
769 773
770 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 774 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
771 775
772 if (skb->len < sizeof(smp->pcnf)) 776 if (skb->len < sizeof(smp->pcnf))
773 return SMP_UNSPECIFIED; 777 return SMP_INVALID_PARAMS;
774 778
775 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); 779 memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
776 skb_pull(skb, sizeof(smp->pcnf)); 780 skb_pull(skb, sizeof(smp->pcnf));
@@ -778,10 +782,10 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
778 if (conn->hcon->out) 782 if (conn->hcon->out)
779 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), 783 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
780 smp->prnd); 784 smp->prnd);
781 else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) 785 else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
782 queue_work(hdev->workqueue, &smp->confirm); 786 return smp_confirm(smp);
783 else 787 else
784 set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags); 788 set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
785 789
786 return 0; 790 return 0;
787} 791}
@@ -789,19 +793,16 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
789static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) 793static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
790{ 794{
791 struct smp_chan *smp = conn->smp_chan; 795 struct smp_chan *smp = conn->smp_chan;
792 struct hci_dev *hdev = conn->hcon->hdev;
793 796
794 BT_DBG("conn %p", conn); 797 BT_DBG("conn %p", conn);
795 798
796 if (skb->len < sizeof(smp->rrnd)) 799 if (skb->len < sizeof(smp->rrnd))
797 return SMP_UNSPECIFIED; 800 return SMP_INVALID_PARAMS;
798 801
799 memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); 802 memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
800 skb_pull(skb, sizeof(smp->rrnd)); 803 skb_pull(skb, sizeof(smp->rrnd));
801 804
802 queue_work(hdev->workqueue, &smp->random); 805 return smp_random(smp);
803
804 return 0;
805} 806}
806 807
807static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) 808static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
@@ -836,7 +837,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
836 BT_DBG("conn %p", conn); 837 BT_DBG("conn %p", conn);
837 838
838 if (skb->len < sizeof(*rp)) 839 if (skb->len < sizeof(*rp))
839 return SMP_UNSPECIFIED; 840 return SMP_INVALID_PARAMS;
840 841
841 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 842 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
842 return SMP_CMD_NOTSUPP; 843 return SMP_CMD_NOTSUPP;
@@ -861,7 +862,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
861 862
862 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 863 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
863 864
864 clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags); 865 clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
865 866
866 return 0; 867 return 0;
867} 868}
@@ -908,10 +909,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
908 909
909 authreq = seclevel_to_authreq(sec_level); 910 authreq = seclevel_to_authreq(sec_level);
910 911
911 /* hcon->auth_type is set by pair_device in mgmt.c. If the MITM 912 /* Require MITM if IO Capability allows or the security level
912 * flag is set we should also set it for the SMP request. 913 * requires it.
913 */ 914 */
914 if ((hcon->auth_type & 0x01)) 915 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
916 sec_level > BT_SECURITY_MEDIUM)
915 authreq |= SMP_AUTH_MITM; 917 authreq |= SMP_AUTH_MITM;
916 918
917 if (hcon->link_mode & HCI_LM_MASTER) { 919 if (hcon->link_mode & HCI_LM_MASTER) {
@@ -928,7 +930,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
928 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); 930 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
929 } 931 }
930 932
931 set_bit(SMP_FLAG_INITIATOR, &smp->smp_flags); 933 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
932 934
933done: 935done:
934 hcon->pending_sec_level = sec_level; 936 hcon->pending_sec_level = sec_level;
@@ -944,7 +946,7 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
944 BT_DBG("conn %p", conn); 946 BT_DBG("conn %p", conn);
945 947
946 if (skb->len < sizeof(*rp)) 948 if (skb->len < sizeof(*rp))
947 return SMP_UNSPECIFIED; 949 return SMP_INVALID_PARAMS;
948 950
949 /* Ignore this PDU if it wasn't requested */ 951 /* Ignore this PDU if it wasn't requested */
950 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) 952 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
@@ -969,7 +971,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
969 BT_DBG("conn %p", conn); 971 BT_DBG("conn %p", conn);
970 972
971 if (skb->len < sizeof(*rp)) 973 if (skb->len < sizeof(*rp))
972 return SMP_UNSPECIFIED; 974 return SMP_INVALID_PARAMS;
973 975
974 /* Ignore this PDU if it wasn't requested */ 976 /* Ignore this PDU if it wasn't requested */
975 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) 977 if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
@@ -1001,7 +1003,7 @@ static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
1001 BT_DBG(""); 1003 BT_DBG("");
1002 1004
1003 if (skb->len < sizeof(*info)) 1005 if (skb->len < sizeof(*info))
1004 return SMP_UNSPECIFIED; 1006 return SMP_INVALID_PARAMS;
1005 1007
1006 /* Ignore this PDU if it wasn't requested */ 1008 /* Ignore this PDU if it wasn't requested */
1007 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) 1009 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
@@ -1025,7 +1027,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1025 BT_DBG(""); 1027 BT_DBG("");
1026 1028
1027 if (skb->len < sizeof(*info)) 1029 if (skb->len < sizeof(*info))
1028 return SMP_UNSPECIFIED; 1030 return SMP_INVALID_PARAMS;
1029 1031
1030 /* Ignore this PDU if it wasn't requested */ 1032 /* Ignore this PDU if it wasn't requested */
1031 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) 1033 if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
@@ -1075,7 +1077,7 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
1075 BT_DBG("conn %p", conn); 1077 BT_DBG("conn %p", conn);
1076 1078
1077 if (skb->len < sizeof(*rp)) 1079 if (skb->len < sizeof(*rp))
1078 return SMP_UNSPECIFIED; 1080 return SMP_INVALID_PARAMS;
1079 1081
1080 /* Ignore this PDU if it wasn't requested */ 1082 /* Ignore this PDU if it wasn't requested */
1081 if (!(smp->remote_key_dist & SMP_DIST_SIGN)) 1083 if (!(smp->remote_key_dist & SMP_DIST_SIGN))
@@ -1358,7 +1360,7 @@ int smp_distribute_keys(struct l2cap_conn *conn)
1358 1360
1359 clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); 1361 clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags);
1360 cancel_delayed_work_sync(&conn->security_timer); 1362 cancel_delayed_work_sync(&conn->security_timer);
1361 set_bit(SMP_FLAG_COMPLETE, &smp->smp_flags); 1363 set_bit(SMP_FLAG_COMPLETE, &smp->flags);
1362 smp_notify_keys(conn); 1364 smp_notify_keys(conn);
1363 1365
1364 smp_chan_destroy(conn); 1366 smp_chan_destroy(conn);
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 1277147a9150..5a8dc36460a1 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -111,39 +111,11 @@ struct smp_cmd_security_req {
111#define SMP_CMD_NOTSUPP 0x07 111#define SMP_CMD_NOTSUPP 0x07
112#define SMP_UNSPECIFIED 0x08 112#define SMP_UNSPECIFIED 0x08
113#define SMP_REPEATED_ATTEMPTS 0x09 113#define SMP_REPEATED_ATTEMPTS 0x09
114#define SMP_INVALID_PARAMS 0x0a
114 115
115#define SMP_MIN_ENC_KEY_SIZE 7 116#define SMP_MIN_ENC_KEY_SIZE 7
116#define SMP_MAX_ENC_KEY_SIZE 16 117#define SMP_MAX_ENC_KEY_SIZE 16
117 118
118#define SMP_FLAG_TK_VALID 1
119#define SMP_FLAG_CFM_PENDING 2
120#define SMP_FLAG_MITM_AUTH 3
121#define SMP_FLAG_COMPLETE 4
122#define SMP_FLAG_INITIATOR 5
123
124struct smp_chan {
125 struct l2cap_conn *conn;
126 u8 preq[7]; /* SMP Pairing Request */
127 u8 prsp[7]; /* SMP Pairing Response */
128 u8 prnd[16]; /* SMP Pairing Random (local) */
129 u8 rrnd[16]; /* SMP Pairing Random (remote) */
130 u8 pcnf[16]; /* SMP Pairing Confirm */
131 u8 tk[16]; /* SMP Temporary Key */
132 u8 enc_key_size;
133 u8 remote_key_dist;
134 bdaddr_t id_addr;
135 u8 id_addr_type;
136 u8 irk[16];
137 struct smp_csrk *csrk;
138 struct smp_csrk *slave_csrk;
139 struct smp_ltk *ltk;
140 struct smp_ltk *slave_ltk;
141 struct smp_irk *remote_irk;
142 unsigned long smp_flags;
143 struct work_struct confirm;
144 struct work_struct random;
145};
146
147/* SMP Commands */ 119/* SMP Commands */
148bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); 120bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
149int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); 121int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index e85498b2f166..8590b942bffa 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_BRIDGE) += bridge.o 5obj-$(CONFIG_BRIDGE) += bridge.o
6 6
7bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \ 7bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
8 br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \ 8 br_ioctl.o br_stp.o br_stp_bpdu.o \
9 br_stp_if.o br_stp_timer.o br_netlink.o 9 br_stp_if.o br_stp_timer.o br_netlink.o
10 10
11bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o 11bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
@@ -16,4 +16,4 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
16 16
17bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o 17bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
18 18
19obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 19obj-$(CONFIG_NETFILTER) += netfilter/
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 19311aafcf5a..1a755a1e5410 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -22,6 +22,104 @@
22 22
23#include "br_private.h" 23#include "br_private.h"
24 24
25/*
26 * Handle changes in state of network devices enslaved to a bridge.
27 *
28 * Note: don't care about up/down if bridge itself is down, because
29 * port state is checked when bridge is brought up.
30 */
31static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
32{
33 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
34 struct net_bridge_port *p;
35 struct net_bridge *br;
36 bool changed_addr;
37 int err;
38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
45 /* not a port of a bridge */
46 p = br_port_get_rtnl(dev);
47 if (!p)
48 return NOTIFY_DONE;
49
50 br = p->br;
51
52 switch (event) {
53 case NETDEV_CHANGEMTU:
54 dev_set_mtu(br->dev, br_min_mtu(br));
55 break;
56
57 case NETDEV_CHANGEADDR:
58 spin_lock_bh(&br->lock);
59 br_fdb_changeaddr(p, dev->dev_addr);
60 changed_addr = br_stp_recalculate_bridge_id(br);
61 spin_unlock_bh(&br->lock);
62
63 if (changed_addr)
64 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
65
66 break;
67
68 case NETDEV_CHANGE:
69 br_port_carrier_check(p);
70 break;
71
72 case NETDEV_FEAT_CHANGE:
73 netdev_update_features(br->dev);
74 break;
75
76 case NETDEV_DOWN:
77 spin_lock_bh(&br->lock);
78 if (br->dev->flags & IFF_UP)
79 br_stp_disable_port(p);
80 spin_unlock_bh(&br->lock);
81 break;
82
83 case NETDEV_UP:
84 if (netif_running(br->dev) && netif_oper_up(dev)) {
85 spin_lock_bh(&br->lock);
86 br_stp_enable_port(p);
87 spin_unlock_bh(&br->lock);
88 }
89 break;
90
91 case NETDEV_UNREGISTER:
92 br_del_if(br, dev);
93 break;
94
95 case NETDEV_CHANGENAME:
96 err = br_sysfs_renameif(p);
97 if (err)
98 return notifier_from_errno(err);
99 break;
100
101 case NETDEV_PRE_TYPE_CHANGE:
102 /* Forbid underlaying device to change its type. */
103 return NOTIFY_BAD;
104
105 case NETDEV_RESEND_IGMP:
106 /* Propagate to master device */
107 call_netdevice_notifiers(event, br->dev);
108 break;
109 }
110
111 /* Events that may cause spanning tree to refresh */
112 if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
113 event == NETDEV_CHANGE || event == NETDEV_DOWN)
114 br_ifinfo_notify(RTM_NEWLINK, p);
115
116 return NOTIFY_DONE;
117}
118
119static struct notifier_block br_device_notifier = {
120 .notifier_call = br_device_event
121};
122
25static void __net_exit br_net_exit(struct net *net) 123static void __net_exit br_net_exit(struct net *net)
26{ 124{
27 struct net_device *dev; 125 struct net_device *dev;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 3e2da2cb72db..568cccd39a3d 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -112,6 +112,12 @@ static void br_dev_set_multicast_list(struct net_device *dev)
112{ 112{
113} 113}
114 114
115static void br_dev_change_rx_flags(struct net_device *dev, int change)
116{
117 if (change & IFF_PROMISC)
118 br_manage_promisc(netdev_priv(dev));
119}
120
115static int br_dev_stop(struct net_device *dev) 121static int br_dev_stop(struct net_device *dev)
116{ 122{
117 struct net_bridge *br = netdev_priv(dev); 123 struct net_bridge *br = netdev_priv(dev);
@@ -309,6 +315,7 @@ static const struct net_device_ops br_netdev_ops = {
309 .ndo_get_stats64 = br_get_stats64, 315 .ndo_get_stats64 = br_get_stats64,
310 .ndo_set_mac_address = br_set_mac_address, 316 .ndo_set_mac_address = br_set_mac_address,
311 .ndo_set_rx_mode = br_dev_set_multicast_list, 317 .ndo_set_rx_mode = br_dev_set_multicast_list,
318 .ndo_change_rx_flags = br_dev_change_rx_flags,
312 .ndo_change_mtu = br_change_mtu, 319 .ndo_change_mtu = br_change_mtu,
313 .ndo_do_ioctl = br_dev_ioctl, 320 .ndo_do_ioctl = br_dev_ioctl,
314#ifdef CONFIG_NET_POLL_CONTROLLER 321#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -348,14 +355,15 @@ void br_dev_setup(struct net_device *dev)
348 355
349 dev->netdev_ops = &br_netdev_ops; 356 dev->netdev_ops = &br_netdev_ops;
350 dev->destructor = br_dev_free; 357 dev->destructor = br_dev_free;
351 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 358 dev->ethtool_ops = &br_ethtool_ops;
352 SET_NETDEV_DEVTYPE(dev, &br_type); 359 SET_NETDEV_DEVTYPE(dev, &br_type);
353 dev->tx_queue_len = 0; 360 dev->tx_queue_len = 0;
354 dev->priv_flags = IFF_EBRIDGE; 361 dev->priv_flags = IFF_EBRIDGE;
355 362
356 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | 363 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
357 NETIF_F_HW_VLAN_CTAG_TX; 364 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
358 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; 365 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
366 NETIF_F_HW_VLAN_STAG_TX;
359 dev->vlan_features = COMMON_FEATURES; 367 dev->vlan_features = COMMON_FEATURES;
360 368
361 br->dev = dev; 369 br->dev = dev;
@@ -370,6 +378,7 @@ void br_dev_setup(struct net_device *dev)
370 378
371 br->stp_enabled = BR_NO_STP; 379 br->stp_enabled = BR_NO_STP;
372 br->group_fwd_mask = BR_GROUPFWD_DEFAULT; 380 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
381 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
373 382
374 br->designated_root = br->bridge_id; 383 br->designated_root = br->bridge_id;
375 br->bridge_max_age = br->max_age = 20 * HZ; 384 br->bridge_max_age = br->max_age = 20 * HZ;
@@ -380,4 +389,5 @@ void br_dev_setup(struct net_device *dev)
380 br_netfilter_rtable_init(br); 389 br_netfilter_rtable_init(br);
381 br_stp_timer_init(br); 390 br_stp_timer_init(br);
382 br_multicast_init(br); 391 br_multicast_init(br);
392 br_vlan_init(br);
383} 393}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 474d36f93342..b524c36c1273 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -85,8 +85,58 @@ static void fdb_rcu_free(struct rcu_head *head)
85 kmem_cache_free(br_fdb_cache, ent); 85 kmem_cache_free(br_fdb_cache, ent);
86} 86}
87 87
88/* When a static FDB entry is added, the mac address from the entry is
89 * added to the bridge private HW address list and all required ports
90 * are then updated with the new information.
91 * Called under RTNL.
92 */
93static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
94{
95 int err;
96 struct net_bridge_port *p, *tmp;
97
98 ASSERT_RTNL();
99
100 list_for_each_entry(p, &br->port_list, list) {
101 if (!br_promisc_port(p)) {
102 err = dev_uc_add(p->dev, addr);
103 if (err)
104 goto undo;
105 }
106 }
107
108 return;
109undo:
110 list_for_each_entry(tmp, &br->port_list, list) {
111 if (tmp == p)
112 break;
113 if (!br_promisc_port(tmp))
114 dev_uc_del(tmp->dev, addr);
115 }
116}
117
118/* When a static FDB entry is deleted, the HW address from that entry is
119 * also removed from the bridge private HW address list and updates all
120 * the ports with needed information.
121 * Called under RTNL.
122 */
123static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
124{
125 struct net_bridge_port *p;
126
127 ASSERT_RTNL();
128
129 list_for_each_entry(p, &br->port_list, list) {
130 if (!br_promisc_port(p))
131 dev_uc_del(p->dev, addr);
132 }
133}
134
88static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) 135static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
89{ 136{
137 if (f->is_static)
138 fdb_del_hw(br, f->addr.addr);
139
90 hlist_del_rcu(&f->hlist); 140 hlist_del_rcu(&f->hlist);
91 fdb_notify(br, f, RTM_DELNEIGH); 141 fdb_notify(br, f, RTM_DELNEIGH);
92 call_rcu(&f->rcu, fdb_rcu_free); 142 call_rcu(&f->rcu, fdb_rcu_free);
@@ -466,6 +516,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
466 return -ENOMEM; 516 return -ENOMEM;
467 517
468 fdb->is_local = fdb->is_static = 1; 518 fdb->is_local = fdb->is_static = 1;
519 fdb_add_hw(br, addr);
469 fdb_notify(br, fdb, RTM_NEWNEIGH); 520 fdb_notify(br, fdb, RTM_NEWNEIGH);
470 return 0; 521 return 0;
471} 522}
@@ -571,6 +622,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
571 622
572 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) 623 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
573 goto nla_put_failure; 624 goto nla_put_failure;
625 if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
626 goto nla_put_failure;
574 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 627 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
575 ci.ndm_confirmed = 0; 628 ci.ndm_confirmed = 0;
576 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 629 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
@@ -592,6 +645,7 @@ static inline size_t fdb_nlmsg_size(void)
592{ 645{
593 return NLMSG_ALIGN(sizeof(struct ndmsg)) 646 return NLMSG_ALIGN(sizeof(struct ndmsg))
594 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 647 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
648 + nla_total_size(sizeof(u32)) /* NDA_MASTER */
595 + nla_total_size(sizeof(u16)) /* NDA_VLAN */ 649 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
596 + nla_total_size(sizeof(struct nda_cacheinfo)); 650 + nla_total_size(sizeof(struct nda_cacheinfo));
597} 651}
@@ -684,13 +738,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
684 } 738 }
685 739
686 if (fdb_to_nud(fdb) != state) { 740 if (fdb_to_nud(fdb) != state) {
687 if (state & NUD_PERMANENT) 741 if (state & NUD_PERMANENT) {
688 fdb->is_local = fdb->is_static = 1; 742 fdb->is_local = 1;
689 else if (state & NUD_NOARP) { 743 if (!fdb->is_static) {
744 fdb->is_static = 1;
745 fdb_add_hw(br, addr);
746 }
747 } else if (state & NUD_NOARP) {
690 fdb->is_local = 0; 748 fdb->is_local = 0;
691 fdb->is_static = 1; 749 if (!fdb->is_static) {
692 } else 750 fdb->is_static = 1;
693 fdb->is_local = fdb->is_static = 0; 751 fdb_add_hw(br, addr);
752 }
753 } else {
754 fdb->is_local = 0;
755 if (fdb->is_static) {
756 fdb->is_static = 0;
757 fdb_del_hw(br, addr);
758 }
759 }
694 760
695 modified = true; 761 modified = true;
696 } 762 }
@@ -880,3 +946,59 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
880out: 946out:
881 return err; 947 return err;
882} 948}
949
950int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
951{
952 struct net_bridge_fdb_entry *fdb, *tmp;
953 int i;
954 int err;
955
956 ASSERT_RTNL();
957
958 for (i = 0; i < BR_HASH_SIZE; i++) {
959 hlist_for_each_entry(fdb, &br->hash[i], hlist) {
960 /* We only care for static entries */
961 if (!fdb->is_static)
962 continue;
963
964 err = dev_uc_add(p->dev, fdb->addr.addr);
965 if (err)
966 goto rollback;
967 }
968 }
969 return 0;
970
971rollback:
972 for (i = 0; i < BR_HASH_SIZE; i++) {
973 hlist_for_each_entry(tmp, &br->hash[i], hlist) {
974 /* If we reached the fdb that failed, we can stop */
975 if (tmp == fdb)
976 break;
977
978 /* We only care for static entries */
979 if (!tmp->is_static)
980 continue;
981
982 dev_uc_del(p->dev, tmp->addr.addr);
983 }
984 }
985 return err;
986}
987
988void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
989{
990 struct net_bridge_fdb_entry *fdb;
991 int i;
992
993 ASSERT_RTNL();
994
995 for (i = 0; i < BR_HASH_SIZE; i++) {
996 hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
997 /* We only care for static entries */
998 if (!fdb->is_static)
999 continue;
1000
1001 dev_uc_del(p->dev, fdb->addr.addr);
1002 }
1003 }
1004}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5262b8617eb9..3eca3fdf8fe1 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -85,6 +85,111 @@ void br_port_carrier_check(struct net_bridge_port *p)
85 spin_unlock_bh(&br->lock); 85 spin_unlock_bh(&br->lock);
86} 86}
87 87
88static void br_port_set_promisc(struct net_bridge_port *p)
89{
90 int err = 0;
91
92 if (br_promisc_port(p))
93 return;
94
95 err = dev_set_promiscuity(p->dev, 1);
96 if (err)
97 return;
98
99 br_fdb_unsync_static(p->br, p);
100 p->flags |= BR_PROMISC;
101}
102
103static void br_port_clear_promisc(struct net_bridge_port *p)
104{
105 int err;
106
107 /* Check if the port is already non-promisc or if it doesn't
108 * support UNICAST filtering. Without unicast filtering support
109 * we'll end up re-enabling promisc mode anyway, so just check for
110 * it here.
111 */
112 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
113 return;
114
115 /* Since we'll be clearing the promisc mode, program the port
116 * first so that we don't have interruption in traffic.
117 */
118 err = br_fdb_sync_static(p->br, p);
119 if (err)
120 return;
121
122 dev_set_promiscuity(p->dev, -1);
123 p->flags &= ~BR_PROMISC;
124}
125
126/* When a port is added or removed or when certain port flags
127 * change, this function is called to automatically manage
128 * promiscuity setting of all the bridge ports. We are always called
129 * under RTNL so can skip using rcu primitives.
130 */
131void br_manage_promisc(struct net_bridge *br)
132{
133 struct net_bridge_port *p;
134 bool set_all = false;
135
136 /* If vlan filtering is disabled or bridge interface is placed
137 * into promiscuous mode, place all ports in promiscuous mode.
138 */
139 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
140 set_all = true;
141
142 list_for_each_entry(p, &br->port_list, list) {
143 if (set_all) {
144 br_port_set_promisc(p);
145 } else {
146 /* If the number of auto-ports is <= 1, then all other
147 * ports will have their output configuration
148 * statically specified through fdbs. Since ingress
149 * on the auto-port becomes forwarding/egress to other
150 * ports and egress configuration is statically known,
151 * we can say that ingress configuration of the
152 * auto-port is also statically known.
153 * This lets us disable promiscuous mode and write
154 * this config to hw.
155 */
156 if (br->auto_cnt == 0 ||
157 (br->auto_cnt == 1 && br_auto_port(p)))
158 br_port_clear_promisc(p);
159 else
160 br_port_set_promisc(p);
161 }
162 }
163}
164
165static void nbp_update_port_count(struct net_bridge *br)
166{
167 struct net_bridge_port *p;
168 u32 cnt = 0;
169
170 list_for_each_entry(p, &br->port_list, list) {
171 if (br_auto_port(p))
172 cnt++;
173 }
174 if (br->auto_cnt != cnt) {
175 br->auto_cnt = cnt;
176 br_manage_promisc(br);
177 }
178}
179
180static void nbp_delete_promisc(struct net_bridge_port *p)
181{
182 /* If port is currently promiscuous, unset promiscuity.
183 * Otherwise, it is a static port so remove all addresses
184 * from it.
185 */
186 dev_set_allmulti(p->dev, -1);
187 if (br_promisc_port(p))
188 dev_set_promiscuity(p->dev, -1);
189 else
190 br_fdb_unsync_static(p->br, p);
191}
192
88static void release_nbp(struct kobject *kobj) 193static void release_nbp(struct kobject *kobj)
89{ 194{
90 struct net_bridge_port *p 195 struct net_bridge_port *p
@@ -133,7 +238,7 @@ static void del_nbp(struct net_bridge_port *p)
133 238
134 sysfs_remove_link(br->ifobj, p->dev->name); 239 sysfs_remove_link(br->ifobj, p->dev->name);
135 240
136 dev_set_promiscuity(dev, -1); 241 nbp_delete_promisc(p);
137 242
138 spin_lock_bh(&br->lock); 243 spin_lock_bh(&br->lock);
139 br_stp_disable_port(p); 244 br_stp_disable_port(p);
@@ -141,10 +246,11 @@ static void del_nbp(struct net_bridge_port *p)
141 246
142 br_ifinfo_notify(RTM_DELLINK, p); 247 br_ifinfo_notify(RTM_DELLINK, p);
143 248
249 list_del_rcu(&p->list);
250
144 nbp_vlan_flush(p); 251 nbp_vlan_flush(p);
145 br_fdb_delete_by_port(br, p, 1); 252 br_fdb_delete_by_port(br, p, 1);
146 253 nbp_update_port_count(br);
147 list_del_rcu(&p->list);
148 254
149 dev->priv_flags &= ~IFF_BRIDGE_PORT; 255 dev->priv_flags &= ~IFF_BRIDGE_PORT;
150 256
@@ -353,7 +459,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
353 459
354 call_netdevice_notifiers(NETDEV_JOIN, dev); 460 call_netdevice_notifiers(NETDEV_JOIN, dev);
355 461
356 err = dev_set_promiscuity(dev, 1); 462 err = dev_set_allmulti(dev, 1);
357 if (err) 463 if (err)
358 goto put_back; 464 goto put_back;
359 465
@@ -384,6 +490,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
384 490
385 list_add_rcu(&p->list, &br->port_list); 491 list_add_rcu(&p->list, &br->port_list);
386 492
493 nbp_update_port_count(br);
494
387 netdev_update_features(br->dev); 495 netdev_update_features(br->dev);
388 496
389 if (br->dev->needed_headroom < dev->needed_headroom) 497 if (br->dev->needed_headroom < dev->needed_headroom)
@@ -421,7 +529,7 @@ err2:
421 kobject_put(&p->kobj); 529 kobject_put(&p->kobj);
422 p = NULL; /* kobject_put frees */ 530 p = NULL; /* kobject_put frees */
423err1: 531err1:
424 dev_set_promiscuity(dev, -1); 532 dev_set_allmulti(dev, -1);
425put_back: 533put_back:
426 dev_put(dev); 534 dev_put(dev);
427 kfree(p); 535 kfree(p);
@@ -455,3 +563,11 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
455 563
456 return 0; 564 return 0;
457} 565}
566
567void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
568{
569 struct net_bridge *br = p->br;
570
571 if (mask & BR_AUTO_MASK)
572 nbp_update_port_count(br);
573}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 04d6348fd530..366c43649079 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -177,6 +177,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
177 p = br_port_get_rcu(skb->dev); 177 p = br_port_get_rcu(skb->dev);
178 178
179 if (unlikely(is_link_local_ether_addr(dest))) { 179 if (unlikely(is_link_local_ether_addr(dest))) {
180 u16 fwd_mask = p->br->group_fwd_mask_required;
181
180 /* 182 /*
181 * See IEEE 802.1D Table 7-10 Reserved addresses 183 * See IEEE 802.1D Table 7-10 Reserved addresses
182 * 184 *
@@ -194,7 +196,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
194 case 0x00: /* Bridge Group Address */ 196 case 0x00: /* Bridge Group Address */
195 /* If STP is turned off, 197 /* If STP is turned off,
196 then must forward to keep loop detection */ 198 then must forward to keep loop detection */
197 if (p->br->stp_enabled == BR_NO_STP) 199 if (p->br->stp_enabled == BR_NO_STP ||
200 fwd_mask & (1u << dest[5]))
198 goto forward; 201 goto forward;
199 break; 202 break;
200 203
@@ -203,7 +206,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
203 206
204 default: 207 default:
205 /* Allow selective forwarding for most other protocols */ 208 /* Allow selective forwarding for most other protocols */
206 if (p->br->group_fwd_mask & (1u << dest[5])) 209 fwd_mask |= p->br->group_fwd_mask;
210 if (fwd_mask & (1u << dest[5]))
207 goto forward; 211 goto forward;
208 } 212 }
209 213
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b7b1914dfa25..5df05269d17a 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -418,13 +418,13 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
418 418
419 ip.proto = entry->addr.proto; 419 ip.proto = entry->addr.proto;
420 if (ip.proto == htons(ETH_P_IP)) { 420 if (ip.proto == htons(ETH_P_IP)) {
421 if (timer_pending(&br->ip4_querier.timer)) 421 if (timer_pending(&br->ip4_other_query.timer))
422 return -EBUSY; 422 return -EBUSY;
423 423
424 ip.u.ip4 = entry->addr.u.ip4; 424 ip.u.ip4 = entry->addr.u.ip4;
425#if IS_ENABLED(CONFIG_IPV6) 425#if IS_ENABLED(CONFIG_IPV6)
426 } else { 426 } else {
427 if (timer_pending(&br->ip6_querier.timer)) 427 if (timer_pending(&br->ip6_other_query.timer))
428 return -EBUSY; 428 return -EBUSY;
429 429
430 ip.u.ip6 = entry->addr.u.ip6; 430 ip.u.ip6 = entry->addr.u.ip6;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7b757b5dc773..cd3cf394c477 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/export.h>
14#include <linux/if_ether.h> 15#include <linux/if_ether.h>
15#include <linux/igmp.h> 16#include <linux/igmp.h>
16#include <linux/jhash.h> 17#include <linux/jhash.h>
@@ -35,7 +36,7 @@
35#include "br_private.h" 36#include "br_private.h"
36 37
37static void br_multicast_start_querier(struct net_bridge *br, 38static void br_multicast_start_querier(struct net_bridge *br,
38 struct bridge_mcast_query *query); 39 struct bridge_mcast_own_query *query);
39unsigned int br_mdb_rehash_seq; 40unsigned int br_mdb_rehash_seq;
40 41
41static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 42static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -761,7 +762,7 @@ static void br_multicast_local_router_expired(unsigned long data)
761} 762}
762 763
763static void br_multicast_querier_expired(struct net_bridge *br, 764static void br_multicast_querier_expired(struct net_bridge *br,
764 struct bridge_mcast_query *query) 765 struct bridge_mcast_own_query *query)
765{ 766{
766 spin_lock(&br->multicast_lock); 767 spin_lock(&br->multicast_lock);
767 if (!netif_running(br->dev) || br->multicast_disabled) 768 if (!netif_running(br->dev) || br->multicast_disabled)
@@ -777,7 +778,7 @@ static void br_ip4_multicast_querier_expired(unsigned long data)
777{ 778{
778 struct net_bridge *br = (void *)data; 779 struct net_bridge *br = (void *)data;
779 780
780 br_multicast_querier_expired(br, &br->ip4_query); 781 br_multicast_querier_expired(br, &br->ip4_own_query);
781} 782}
782 783
783#if IS_ENABLED(CONFIG_IPV6) 784#if IS_ENABLED(CONFIG_IPV6)
@@ -785,10 +786,22 @@ static void br_ip6_multicast_querier_expired(unsigned long data)
785{ 786{
786 struct net_bridge *br = (void *)data; 787 struct net_bridge *br = (void *)data;
787 788
788 br_multicast_querier_expired(br, &br->ip6_query); 789 br_multicast_querier_expired(br, &br->ip6_own_query);
789} 790}
790#endif 791#endif
791 792
793static void br_multicast_select_own_querier(struct net_bridge *br,
794 struct br_ip *ip,
795 struct sk_buff *skb)
796{
797 if (ip->proto == htons(ETH_P_IP))
798 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
799#if IS_ENABLED(CONFIG_IPV6)
800 else
801 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
802#endif
803}
804
792static void __br_multicast_send_query(struct net_bridge *br, 805static void __br_multicast_send_query(struct net_bridge *br,
793 struct net_bridge_port *port, 806 struct net_bridge_port *port,
794 struct br_ip *ip) 807 struct br_ip *ip)
@@ -804,17 +817,19 @@ static void __br_multicast_send_query(struct net_bridge *br,
804 skb->dev = port->dev; 817 skb->dev = port->dev;
805 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 818 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
806 dev_queue_xmit); 819 dev_queue_xmit);
807 } else 820 } else {
821 br_multicast_select_own_querier(br, ip, skb);
808 netif_rx(skb); 822 netif_rx(skb);
823 }
809} 824}
810 825
811static void br_multicast_send_query(struct net_bridge *br, 826static void br_multicast_send_query(struct net_bridge *br,
812 struct net_bridge_port *port, 827 struct net_bridge_port *port,
813 struct bridge_mcast_query *query) 828 struct bridge_mcast_own_query *own_query)
814{ 829{
815 unsigned long time; 830 unsigned long time;
816 struct br_ip br_group; 831 struct br_ip br_group;
817 struct bridge_mcast_querier *querier = NULL; 832 struct bridge_mcast_other_query *other_query = NULL;
818 833
819 if (!netif_running(br->dev) || br->multicast_disabled || 834 if (!netif_running(br->dev) || br->multicast_disabled ||
820 !br->multicast_querier) 835 !br->multicast_querier)
@@ -822,31 +837,32 @@ static void br_multicast_send_query(struct net_bridge *br,
822 837
823 memset(&br_group.u, 0, sizeof(br_group.u)); 838 memset(&br_group.u, 0, sizeof(br_group.u));
824 839
825 if (port ? (query == &port->ip4_query) : 840 if (port ? (own_query == &port->ip4_own_query) :
826 (query == &br->ip4_query)) { 841 (own_query == &br->ip4_own_query)) {
827 querier = &br->ip4_querier; 842 other_query = &br->ip4_other_query;
828 br_group.proto = htons(ETH_P_IP); 843 br_group.proto = htons(ETH_P_IP);
829#if IS_ENABLED(CONFIG_IPV6) 844#if IS_ENABLED(CONFIG_IPV6)
830 } else { 845 } else {
831 querier = &br->ip6_querier; 846 other_query = &br->ip6_other_query;
832 br_group.proto = htons(ETH_P_IPV6); 847 br_group.proto = htons(ETH_P_IPV6);
833#endif 848#endif
834 } 849 }
835 850
836 if (!querier || timer_pending(&querier->timer)) 851 if (!other_query || timer_pending(&other_query->timer))
837 return; 852 return;
838 853
839 __br_multicast_send_query(br, port, &br_group); 854 __br_multicast_send_query(br, port, &br_group);
840 855
841 time = jiffies; 856 time = jiffies;
842 time += query->startup_sent < br->multicast_startup_query_count ? 857 time += own_query->startup_sent < br->multicast_startup_query_count ?
843 br->multicast_startup_query_interval : 858 br->multicast_startup_query_interval :
844 br->multicast_query_interval; 859 br->multicast_query_interval;
845 mod_timer(&query->timer, time); 860 mod_timer(&own_query->timer, time);
846} 861}
847 862
848static void br_multicast_port_query_expired(struct net_bridge_port *port, 863static void
849 struct bridge_mcast_query *query) 864br_multicast_port_query_expired(struct net_bridge_port *port,
865 struct bridge_mcast_own_query *query)
850{ 866{
851 struct net_bridge *br = port->br; 867 struct net_bridge *br = port->br;
852 868
@@ -868,7 +884,7 @@ static void br_ip4_multicast_port_query_expired(unsigned long data)
868{ 884{
869 struct net_bridge_port *port = (void *)data; 885 struct net_bridge_port *port = (void *)data;
870 886
871 br_multicast_port_query_expired(port, &port->ip4_query); 887 br_multicast_port_query_expired(port, &port->ip4_own_query);
872} 888}
873 889
874#if IS_ENABLED(CONFIG_IPV6) 890#if IS_ENABLED(CONFIG_IPV6)
@@ -876,7 +892,7 @@ static void br_ip6_multicast_port_query_expired(unsigned long data)
876{ 892{
877 struct net_bridge_port *port = (void *)data; 893 struct net_bridge_port *port = (void *)data;
878 894
879 br_multicast_port_query_expired(port, &port->ip6_query); 895 br_multicast_port_query_expired(port, &port->ip6_own_query);
880} 896}
881#endif 897#endif
882 898
@@ -886,11 +902,11 @@ void br_multicast_add_port(struct net_bridge_port *port)
886 902
887 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 903 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
888 (unsigned long)port); 904 (unsigned long)port);
889 setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired, 905 setup_timer(&port->ip4_own_query.timer,
890 (unsigned long)port); 906 br_ip4_multicast_port_query_expired, (unsigned long)port);
891#if IS_ENABLED(CONFIG_IPV6) 907#if IS_ENABLED(CONFIG_IPV6)
892 setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired, 908 setup_timer(&port->ip6_own_query.timer,
893 (unsigned long)port); 909 br_ip6_multicast_port_query_expired, (unsigned long)port);
894#endif 910#endif
895} 911}
896 912
@@ -899,7 +915,7 @@ void br_multicast_del_port(struct net_bridge_port *port)
899 del_timer_sync(&port->multicast_router_timer); 915 del_timer_sync(&port->multicast_router_timer);
900} 916}
901 917
902static void br_multicast_enable(struct bridge_mcast_query *query) 918static void br_multicast_enable(struct bridge_mcast_own_query *query)
903{ 919{
904 query->startup_sent = 0; 920 query->startup_sent = 0;
905 921
@@ -916,9 +932,9 @@ void br_multicast_enable_port(struct net_bridge_port *port)
916 if (br->multicast_disabled || !netif_running(br->dev)) 932 if (br->multicast_disabled || !netif_running(br->dev))
917 goto out; 933 goto out;
918 934
919 br_multicast_enable(&port->ip4_query); 935 br_multicast_enable(&port->ip4_own_query);
920#if IS_ENABLED(CONFIG_IPV6) 936#if IS_ENABLED(CONFIG_IPV6)
921 br_multicast_enable(&port->ip6_query); 937 br_multicast_enable(&port->ip6_own_query);
922#endif 938#endif
923 939
924out: 940out:
@@ -938,9 +954,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
938 if (!hlist_unhashed(&port->rlist)) 954 if (!hlist_unhashed(&port->rlist))
939 hlist_del_init_rcu(&port->rlist); 955 hlist_del_init_rcu(&port->rlist);
940 del_timer(&port->multicast_router_timer); 956 del_timer(&port->multicast_router_timer);
941 del_timer(&port->ip4_query.timer); 957 del_timer(&port->ip4_own_query.timer);
942#if IS_ENABLED(CONFIG_IPV6) 958#if IS_ENABLED(CONFIG_IPV6)
943 del_timer(&port->ip6_query.timer); 959 del_timer(&port->ip6_own_query.timer);
944#endif 960#endif
945 spin_unlock(&br->multicast_lock); 961 spin_unlock(&br->multicast_lock);
946} 962}
@@ -1064,15 +1080,80 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1064} 1080}
1065#endif 1081#endif
1066 1082
1083static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1084 struct net_bridge_port *port,
1085 __be32 saddr)
1086{
1087 if (!timer_pending(&br->ip4_own_query.timer) &&
1088 !timer_pending(&br->ip4_other_query.timer))
1089 goto update;
1090
1091 if (!br->ip4_querier.addr.u.ip4)
1092 goto update;
1093
1094 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1095 goto update;
1096
1097 return false;
1098
1099update:
1100 br->ip4_querier.addr.u.ip4 = saddr;
1101
1102 /* update protected by general multicast_lock by caller */
1103 rcu_assign_pointer(br->ip4_querier.port, port);
1104
1105 return true;
1106}
1107
1108#if IS_ENABLED(CONFIG_IPV6)
1109static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1110 struct net_bridge_port *port,
1111 struct in6_addr *saddr)
1112{
1113 if (!timer_pending(&br->ip6_own_query.timer) &&
1114 !timer_pending(&br->ip6_other_query.timer))
1115 goto update;
1116
1117 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1118 goto update;
1119
1120 return false;
1121
1122update:
1123 br->ip6_querier.addr.u.ip6 = *saddr;
1124
1125 /* update protected by general multicast_lock by caller */
1126 rcu_assign_pointer(br->ip6_querier.port, port);
1127
1128 return true;
1129}
1130#endif
1131
1132static bool br_multicast_select_querier(struct net_bridge *br,
1133 struct net_bridge_port *port,
1134 struct br_ip *saddr)
1135{
1136 switch (saddr->proto) {
1137 case htons(ETH_P_IP):
1138 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1139#if IS_ENABLED(CONFIG_IPV6)
1140 case htons(ETH_P_IPV6):
1141 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1142#endif
1143 }
1144
1145 return false;
1146}
1147
1067static void 1148static void
1068br_multicast_update_querier_timer(struct net_bridge *br, 1149br_multicast_update_query_timer(struct net_bridge *br,
1069 struct bridge_mcast_querier *querier, 1150 struct bridge_mcast_other_query *query,
1070 unsigned long max_delay) 1151 unsigned long max_delay)
1071{ 1152{
1072 if (!timer_pending(&querier->timer)) 1153 if (!timer_pending(&query->timer))
1073 querier->delay_time = jiffies + max_delay; 1154 query->delay_time = jiffies + max_delay;
1074 1155
1075 mod_timer(&querier->timer, jiffies + br->multicast_querier_interval); 1156 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1076} 1157}
1077 1158
1078/* 1159/*
@@ -1125,16 +1206,14 @@ timer:
1125 1206
1126static void br_multicast_query_received(struct net_bridge *br, 1207static void br_multicast_query_received(struct net_bridge *br,
1127 struct net_bridge_port *port, 1208 struct net_bridge_port *port,
1128 struct bridge_mcast_querier *querier, 1209 struct bridge_mcast_other_query *query,
1129 int saddr, 1210 struct br_ip *saddr,
1130 bool is_general_query,
1131 unsigned long max_delay) 1211 unsigned long max_delay)
1132{ 1212{
1133 if (saddr && is_general_query) 1213 if (!br_multicast_select_querier(br, port, saddr))
1134 br_multicast_update_querier_timer(br, querier, max_delay);
1135 else if (timer_pending(&querier->timer))
1136 return; 1214 return;
1137 1215
1216 br_multicast_update_query_timer(br, query, max_delay);
1138 br_multicast_mark_router(br, port); 1217 br_multicast_mark_router(br, port);
1139} 1218}
1140 1219
@@ -1149,6 +1228,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1149 struct igmpv3_query *ih3; 1228 struct igmpv3_query *ih3;
1150 struct net_bridge_port_group *p; 1229 struct net_bridge_port_group *p;
1151 struct net_bridge_port_group __rcu **pp; 1230 struct net_bridge_port_group __rcu **pp;
1231 struct br_ip saddr;
1152 unsigned long max_delay; 1232 unsigned long max_delay;
1153 unsigned long now = jiffies; 1233 unsigned long now = jiffies;
1154 __be32 group; 1234 __be32 group;
@@ -1190,11 +1270,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1190 goto out; 1270 goto out;
1191 } 1271 }
1192 1272
1193 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, 1273 if (!group) {
1194 !group, max_delay); 1274 saddr.proto = htons(ETH_P_IP);
1275 saddr.u.ip4 = iph->saddr;
1195 1276
1196 if (!group) 1277 br_multicast_query_received(br, port, &br->ip4_other_query,
1278 &saddr, max_delay);
1197 goto out; 1279 goto out;
1280 }
1198 1281
1199 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1282 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1200 if (!mp) 1283 if (!mp)
@@ -1234,6 +1317,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1234 struct mld2_query *mld2q; 1317 struct mld2_query *mld2q;
1235 struct net_bridge_port_group *p; 1318 struct net_bridge_port_group *p;
1236 struct net_bridge_port_group __rcu **pp; 1319 struct net_bridge_port_group __rcu **pp;
1320 struct br_ip saddr;
1237 unsigned long max_delay; 1321 unsigned long max_delay;
1238 unsigned long now = jiffies; 1322 unsigned long now = jiffies;
1239 const struct in6_addr *group = NULL; 1323 const struct in6_addr *group = NULL;
@@ -1282,12 +1366,14 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1282 goto out; 1366 goto out;
1283 } 1367 }
1284 1368
1285 br_multicast_query_received(br, port, &br->ip6_querier, 1369 if (is_general_query) {
1286 !ipv6_addr_any(&ip6h->saddr), 1370 saddr.proto = htons(ETH_P_IPV6);
1287 is_general_query, max_delay); 1371 saddr.u.ip6 = ip6h->saddr;
1288 1372
1289 if (!group) 1373 br_multicast_query_received(br, port, &br->ip6_other_query,
1374 &saddr, max_delay);
1290 goto out; 1375 goto out;
1376 }
1291 1377
1292 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1378 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1293 if (!mp) 1379 if (!mp)
@@ -1315,11 +1401,12 @@ out:
1315} 1401}
1316#endif 1402#endif
1317 1403
1318static void br_multicast_leave_group(struct net_bridge *br, 1404static void
1319 struct net_bridge_port *port, 1405br_multicast_leave_group(struct net_bridge *br,
1320 struct br_ip *group, 1406 struct net_bridge_port *port,
1321 struct bridge_mcast_querier *querier, 1407 struct br_ip *group,
1322 struct bridge_mcast_query *query) 1408 struct bridge_mcast_other_query *other_query,
1409 struct bridge_mcast_own_query *own_query)
1323{ 1410{
1324 struct net_bridge_mdb_htable *mdb; 1411 struct net_bridge_mdb_htable *mdb;
1325 struct net_bridge_mdb_entry *mp; 1412 struct net_bridge_mdb_entry *mp;
@@ -1330,7 +1417,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1330 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1331 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1332 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED) ||
1333 timer_pending(&querier->timer)) 1420 timer_pending(&other_query->timer))
1334 goto out; 1421 goto out;
1335 1422
1336 mdb = mlock_dereference(br->mdb, br); 1423 mdb = mlock_dereference(br->mdb, br);
@@ -1344,7 +1431,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1344 time = jiffies + br->multicast_last_member_count * 1431 time = jiffies + br->multicast_last_member_count *
1345 br->multicast_last_member_interval; 1432 br->multicast_last_member_interval;
1346 1433
1347 mod_timer(&query->timer, time); 1434 mod_timer(&own_query->timer, time);
1348 1435
1349 for (p = mlock_dereference(mp->ports, br); 1436 for (p = mlock_dereference(mp->ports, br);
1350 p != NULL; 1437 p != NULL;
@@ -1425,17 +1512,19 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1425 __u16 vid) 1512 __u16 vid)
1426{ 1513{
1427 struct br_ip br_group; 1514 struct br_ip br_group;
1428 struct bridge_mcast_query *query = port ? &port->ip4_query : 1515 struct bridge_mcast_own_query *own_query;
1429 &br->ip4_query;
1430 1516
1431 if (ipv4_is_local_multicast(group)) 1517 if (ipv4_is_local_multicast(group))
1432 return; 1518 return;
1433 1519
1520 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1521
1434 br_group.u.ip4 = group; 1522 br_group.u.ip4 = group;
1435 br_group.proto = htons(ETH_P_IP); 1523 br_group.proto = htons(ETH_P_IP);
1436 br_group.vid = vid; 1524 br_group.vid = vid;
1437 1525
1438 br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query); 1526 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1527 own_query);
1439} 1528}
1440 1529
1441#if IS_ENABLED(CONFIG_IPV6) 1530#if IS_ENABLED(CONFIG_IPV6)
@@ -1445,18 +1534,19 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1445 __u16 vid) 1534 __u16 vid)
1446{ 1535{
1447 struct br_ip br_group; 1536 struct br_ip br_group;
1448 struct bridge_mcast_query *query = port ? &port->ip6_query : 1537 struct bridge_mcast_own_query *own_query;
1449 &br->ip6_query;
1450
1451 1538
1452 if (ipv6_addr_is_ll_all_nodes(group)) 1539 if (ipv6_addr_is_ll_all_nodes(group))
1453 return; 1540 return;
1454 1541
1542 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1543
1455 br_group.u.ip6 = *group; 1544 br_group.u.ip6 = *group;
1456 br_group.proto = htons(ETH_P_IPV6); 1545 br_group.proto = htons(ETH_P_IPV6);
1457 br_group.vid = vid; 1546 br_group.vid = vid;
1458 1547
1459 br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query); 1548 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1549 own_query);
1460} 1550}
1461#endif 1551#endif
1462 1552
@@ -1723,12 +1813,14 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1723} 1813}
1724 1814
1725static void br_multicast_query_expired(struct net_bridge *br, 1815static void br_multicast_query_expired(struct net_bridge *br,
1726 struct bridge_mcast_query *query) 1816 struct bridge_mcast_own_query *query,
1817 struct bridge_mcast_querier *querier)
1727{ 1818{
1728 spin_lock(&br->multicast_lock); 1819 spin_lock(&br->multicast_lock);
1729 if (query->startup_sent < br->multicast_startup_query_count) 1820 if (query->startup_sent < br->multicast_startup_query_count)
1730 query->startup_sent++; 1821 query->startup_sent++;
1731 1822
1823 rcu_assign_pointer(querier, NULL);
1732 br_multicast_send_query(br, NULL, query); 1824 br_multicast_send_query(br, NULL, query);
1733 spin_unlock(&br->multicast_lock); 1825 spin_unlock(&br->multicast_lock);
1734} 1826}
@@ -1737,7 +1829,7 @@ static void br_ip4_multicast_query_expired(unsigned long data)
1737{ 1829{
1738 struct net_bridge *br = (void *)data; 1830 struct net_bridge *br = (void *)data;
1739 1831
1740 br_multicast_query_expired(br, &br->ip4_query); 1832 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1741} 1833}
1742 1834
1743#if IS_ENABLED(CONFIG_IPV6) 1835#if IS_ENABLED(CONFIG_IPV6)
@@ -1745,7 +1837,7 @@ static void br_ip6_multicast_query_expired(unsigned long data)
1745{ 1837{
1746 struct net_bridge *br = (void *)data; 1838 struct net_bridge *br = (void *)data;
1747 1839
1748 br_multicast_query_expired(br, &br->ip6_query); 1840 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1749} 1841}
1750#endif 1842#endif
1751 1843
@@ -1767,28 +1859,30 @@ void br_multicast_init(struct net_bridge *br)
1767 br->multicast_querier_interval = 255 * HZ; 1859 br->multicast_querier_interval = 255 * HZ;
1768 br->multicast_membership_interval = 260 * HZ; 1860 br->multicast_membership_interval = 260 * HZ;
1769 1861
1770 br->ip4_querier.delay_time = 0; 1862 br->ip4_other_query.delay_time = 0;
1863 br->ip4_querier.port = NULL;
1771#if IS_ENABLED(CONFIG_IPV6) 1864#if IS_ENABLED(CONFIG_IPV6)
1772 br->ip6_querier.delay_time = 0; 1865 br->ip6_other_query.delay_time = 0;
1866 br->ip6_querier.port = NULL;
1773#endif 1867#endif
1774 1868
1775 spin_lock_init(&br->multicast_lock); 1869 spin_lock_init(&br->multicast_lock);
1776 setup_timer(&br->multicast_router_timer, 1870 setup_timer(&br->multicast_router_timer,
1777 br_multicast_local_router_expired, 0); 1871 br_multicast_local_router_expired, 0);
1778 setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired, 1872 setup_timer(&br->ip4_other_query.timer,
1779 (unsigned long)br); 1873 br_ip4_multicast_querier_expired, (unsigned long)br);
1780 setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired, 1874 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1781 (unsigned long)br); 1875 (unsigned long)br);
1782#if IS_ENABLED(CONFIG_IPV6) 1876#if IS_ENABLED(CONFIG_IPV6)
1783 setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired, 1877 setup_timer(&br->ip6_other_query.timer,
1784 (unsigned long)br); 1878 br_ip6_multicast_querier_expired, (unsigned long)br);
1785 setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired, 1879 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1786 (unsigned long)br); 1880 (unsigned long)br);
1787#endif 1881#endif
1788} 1882}
1789 1883
1790static void __br_multicast_open(struct net_bridge *br, 1884static void __br_multicast_open(struct net_bridge *br,
1791 struct bridge_mcast_query *query) 1885 struct bridge_mcast_own_query *query)
1792{ 1886{
1793 query->startup_sent = 0; 1887 query->startup_sent = 0;
1794 1888
@@ -1800,9 +1894,9 @@ static void __br_multicast_open(struct net_bridge *br,
1800 1894
1801void br_multicast_open(struct net_bridge *br) 1895void br_multicast_open(struct net_bridge *br)
1802{ 1896{
1803 __br_multicast_open(br, &br->ip4_query); 1897 __br_multicast_open(br, &br->ip4_own_query);
1804#if IS_ENABLED(CONFIG_IPV6) 1898#if IS_ENABLED(CONFIG_IPV6)
1805 __br_multicast_open(br, &br->ip6_query); 1899 __br_multicast_open(br, &br->ip6_own_query);
1806#endif 1900#endif
1807} 1901}
1808 1902
@@ -1815,11 +1909,11 @@ void br_multicast_stop(struct net_bridge *br)
1815 int i; 1909 int i;
1816 1910
1817 del_timer_sync(&br->multicast_router_timer); 1911 del_timer_sync(&br->multicast_router_timer);
1818 del_timer_sync(&br->ip4_querier.timer); 1912 del_timer_sync(&br->ip4_other_query.timer);
1819 del_timer_sync(&br->ip4_query.timer); 1913 del_timer_sync(&br->ip4_own_query.timer);
1820#if IS_ENABLED(CONFIG_IPV6) 1914#if IS_ENABLED(CONFIG_IPV6)
1821 del_timer_sync(&br->ip6_querier.timer); 1915 del_timer_sync(&br->ip6_other_query.timer);
1822 del_timer_sync(&br->ip6_query.timer); 1916 del_timer_sync(&br->ip6_own_query.timer);
1823#endif 1917#endif
1824 1918
1825 spin_lock_bh(&br->multicast_lock); 1919 spin_lock_bh(&br->multicast_lock);
@@ -1923,7 +2017,7 @@ unlock:
1923} 2017}
1924 2018
1925static void br_multicast_start_querier(struct net_bridge *br, 2019static void br_multicast_start_querier(struct net_bridge *br,
1926 struct bridge_mcast_query *query) 2020 struct bridge_mcast_own_query *query)
1927{ 2021{
1928 struct net_bridge_port *port; 2022 struct net_bridge_port *port;
1929 2023
@@ -1934,11 +2028,11 @@ static void br_multicast_start_querier(struct net_bridge *br,
1934 port->state == BR_STATE_BLOCKING) 2028 port->state == BR_STATE_BLOCKING)
1935 continue; 2029 continue;
1936 2030
1937 if (query == &br->ip4_query) 2031 if (query == &br->ip4_own_query)
1938 br_multicast_enable(&port->ip4_query); 2032 br_multicast_enable(&port->ip4_own_query);
1939#if IS_ENABLED(CONFIG_IPV6) 2033#if IS_ENABLED(CONFIG_IPV6)
1940 else 2034 else
1941 br_multicast_enable(&port->ip6_query); 2035 br_multicast_enable(&port->ip6_own_query);
1942#endif 2036#endif
1943 } 2037 }
1944} 2038}
@@ -1974,9 +2068,9 @@ rollback:
1974 goto rollback; 2068 goto rollback;
1975 } 2069 }
1976 2070
1977 br_multicast_start_querier(br, &br->ip4_query); 2071 br_multicast_start_querier(br, &br->ip4_own_query);
1978#if IS_ENABLED(CONFIG_IPV6) 2072#if IS_ENABLED(CONFIG_IPV6)
1979 br_multicast_start_querier(br, &br->ip6_query); 2073 br_multicast_start_querier(br, &br->ip6_own_query);
1980#endif 2074#endif
1981 2075
1982unlock: 2076unlock:
@@ -2001,16 +2095,16 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2001 2095
2002 max_delay = br->multicast_query_response_interval; 2096 max_delay = br->multicast_query_response_interval;
2003 2097
2004 if (!timer_pending(&br->ip4_querier.timer)) 2098 if (!timer_pending(&br->ip4_other_query.timer))
2005 br->ip4_querier.delay_time = jiffies + max_delay; 2099 br->ip4_other_query.delay_time = jiffies + max_delay;
2006 2100
2007 br_multicast_start_querier(br, &br->ip4_query); 2101 br_multicast_start_querier(br, &br->ip4_own_query);
2008 2102
2009#if IS_ENABLED(CONFIG_IPV6) 2103#if IS_ENABLED(CONFIG_IPV6)
2010 if (!timer_pending(&br->ip6_querier.timer)) 2104 if (!timer_pending(&br->ip6_other_query.timer))
2011 br->ip6_querier.delay_time = jiffies + max_delay; 2105 br->ip6_other_query.delay_time = jiffies + max_delay;
2012 2106
2013 br_multicast_start_querier(br, &br->ip6_query); 2107 br_multicast_start_querier(br, &br->ip6_own_query);
2014#endif 2108#endif
2015 2109
2016unlock: 2110unlock:
@@ -2061,3 +2155,107 @@ unlock:
2061 2155
2062 return err; 2156 return err;
2063} 2157}
2158
2159/**
2160 * br_multicast_list_adjacent - Returns snooped multicast addresses
2161 * @dev: The bridge port adjacent to which to retrieve addresses
2162 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2163 *
2164 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2165 * snooping feature on all bridge ports of dev's bridge device, excluding
2166 * the addresses from dev itself.
2167 *
2168 * Returns the number of items added to br_ip_list.
2169 *
2170 * Notes:
2171 * - br_ip_list needs to be initialized by caller
2172 * - br_ip_list might contain duplicates in the end
2173 * (needs to be taken care of by caller)
2174 * - br_ip_list needs to be freed by caller
2175 */
2176int br_multicast_list_adjacent(struct net_device *dev,
2177 struct list_head *br_ip_list)
2178{
2179 struct net_bridge *br;
2180 struct net_bridge_port *port;
2181 struct net_bridge_port_group *group;
2182 struct br_ip_list *entry;
2183 int count = 0;
2184
2185 rcu_read_lock();
2186 if (!br_ip_list || !br_port_exists(dev))
2187 goto unlock;
2188
2189 port = br_port_get_rcu(dev);
2190 if (!port || !port->br)
2191 goto unlock;
2192
2193 br = port->br;
2194
2195 list_for_each_entry_rcu(port, &br->port_list, list) {
2196 if (!port->dev || port->dev == dev)
2197 continue;
2198
2199 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2200 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2201 if (!entry)
2202 goto unlock;
2203
2204 entry->addr = group->addr;
2205 list_add(&entry->list, br_ip_list);
2206 count++;
2207 }
2208 }
2209
2210unlock:
2211 rcu_read_unlock();
2212 return count;
2213}
2214EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2215
2216/**
2217 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2218 * @dev: The bridge port adjacent to which to check for a querier
2219 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2220 *
2221 * Checks whether the given interface has a bridge on top and if so returns
2222 * true if a selected querier is behind one of the other ports of this
2223 * bridge. Otherwise returns false.
2224 */
2225bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2226{
2227 struct net_bridge *br;
2228 struct net_bridge_port *port;
2229 bool ret = false;
2230
2231 rcu_read_lock();
2232 if (!br_port_exists(dev))
2233 goto unlock;
2234
2235 port = br_port_get_rcu(dev);
2236 if (!port || !port->br)
2237 goto unlock;
2238
2239 br = port->br;
2240
2241 switch (proto) {
2242 case ETH_P_IP:
2243 if (!timer_pending(&br->ip4_other_query.timer) ||
2244 rcu_dereference(br->ip4_querier.port) == port)
2245 goto unlock;
2246 break;
2247 case ETH_P_IPV6:
2248 if (!timer_pending(&br->ip6_other_query.timer) ||
2249 rcu_dereference(br->ip6_querier.port) == port)
2250 goto unlock;
2251 break;
2252 default:
2253 goto unlock;
2254 }
2255
2256 ret = true;
2257unlock:
2258 rcu_read_unlock();
2259 return ret;
2260}
2261EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 2acf7fa1fec6..a615264cf01a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
535 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) 535 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
536 return br; 536 return br;
537 537
538 vlan = __vlan_find_dev_deep(br, skb->vlan_proto, 538 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
539 vlan_tx_tag_get(skb) & VLAN_VID_MASK); 539 vlan_tx_tag_get(skb) & VLAN_VID_MASK);
540 540
541 return vlan ? vlan : br; 541 return vlan ? vlan : br;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e8844d975b32..26edb518b839 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -328,6 +328,7 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
328static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 328static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
329{ 329{
330 int err; 330 int err;
331 unsigned long old_flags = p->flags;
331 332
332 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 333 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
333 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 334 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
@@ -353,6 +354,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
353 if (err) 354 if (err)
354 return err; 355 return err;
355 } 356 }
357
358 br_port_flags_change(p, old_flags ^ p->flags);
356 return 0; 359 return 0;
357} 360}
358 361
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
deleted file mode 100644
index 2998dd1769a0..000000000000
--- a/net/bridge/br_notify.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Device event handling
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/rtnetlink.h>
16#include <net/net_namespace.h>
17
18#include "br_private.h"
19
20static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
21
22struct notifier_block br_device_notifier = {
23 .notifier_call = br_device_event
24};
25
26/*
27 * Handle changes in state of network devices enslaved to a bridge.
28 *
29 * Note: don't care about up/down if bridge itself is down, because
30 * port state is checked when bridge is brought up.
31 */
32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
33{
34 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
35 struct net_bridge_port *p;
36 struct net_bridge *br;
37 bool changed_addr;
38 int err;
39
40 /* register of bridge completed, add sysfs entries */
41 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
42 br_sysfs_addbr(dev);
43 return NOTIFY_DONE;
44 }
45
46 /* not a port of a bridge */
47 p = br_port_get_rtnl(dev);
48 if (!p)
49 return NOTIFY_DONE;
50
51 br = p->br;
52
53 switch (event) {
54 case NETDEV_CHANGEMTU:
55 dev_set_mtu(br->dev, br_min_mtu(br));
56 break;
57
58 case NETDEV_CHANGEADDR:
59 spin_lock_bh(&br->lock);
60 br_fdb_changeaddr(p, dev->dev_addr);
61 changed_addr = br_stp_recalculate_bridge_id(br);
62 spin_unlock_bh(&br->lock);
63
64 if (changed_addr)
65 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
66
67 break;
68
69 case NETDEV_CHANGE:
70 br_port_carrier_check(p);
71 break;
72
73 case NETDEV_FEAT_CHANGE:
74 netdev_update_features(br->dev);
75 break;
76
77 case NETDEV_DOWN:
78 spin_lock_bh(&br->lock);
79 if (br->dev->flags & IFF_UP)
80 br_stp_disable_port(p);
81 spin_unlock_bh(&br->lock);
82 break;
83
84 case NETDEV_UP:
85 if (netif_running(br->dev) && netif_oper_up(dev)) {
86 spin_lock_bh(&br->lock);
87 br_stp_enable_port(p);
88 spin_unlock_bh(&br->lock);
89 }
90 break;
91
92 case NETDEV_UNREGISTER:
93 br_del_if(br, dev);
94 break;
95
96 case NETDEV_CHANGENAME:
97 err = br_sysfs_renameif(p);
98 if (err)
99 return notifier_from_errno(err);
100 break;
101
102 case NETDEV_PRE_TYPE_CHANGE:
103 /* Forbid underlaying device to change its type. */
104 return NOTIFY_BAD;
105
106 case NETDEV_RESEND_IGMP:
107 /* Propagate to master device */
108 call_netdevice_notifiers(event, br->dev);
109 break;
110 }
111
112 /* Events that may cause spanning tree to refresh */
113 if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
114 event == NETDEV_CHANGE || event == NETDEV_DOWN)
115 br_ifinfo_notify(RTM_NEWLINK, p);
116
117 return NOTIFY_DONE;
118}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 59d3a85c5873..23caf5b0309e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -35,6 +35,8 @@
35#define BR_GROUPFWD_DEFAULT 0 35#define BR_GROUPFWD_DEFAULT 0
36/* Don't allow forwarding control protocols like STP and LLDP */ 36/* Don't allow forwarding control protocols like STP and LLDP */
37#define BR_GROUPFWD_RESTRICTED 0x4007u 37#define BR_GROUPFWD_RESTRICTED 0x4007u
38/* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
39#define BR_GROUPFWD_8021AD 0xB801u
38 40
39/* Path to usermode spanning tree program */ 41/* Path to usermode spanning tree program */
40#define BR_STP_PROG "/sbin/bridge-stp" 42#define BR_STP_PROG "/sbin/bridge-stp"
@@ -54,30 +56,24 @@ struct mac_addr
54 unsigned char addr[ETH_ALEN]; 56 unsigned char addr[ETH_ALEN];
55}; 57};
56 58
57struct br_ip
58{
59 union {
60 __be32 ip4;
61#if IS_ENABLED(CONFIG_IPV6)
62 struct in6_addr ip6;
63#endif
64 } u;
65 __be16 proto;
66 __u16 vid;
67};
68
69#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 59#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
70/* our own querier */ 60/* our own querier */
71struct bridge_mcast_query { 61struct bridge_mcast_own_query {
72 struct timer_list timer; 62 struct timer_list timer;
73 u32 startup_sent; 63 u32 startup_sent;
74}; 64};
75 65
76/* other querier */ 66/* other querier */
77struct bridge_mcast_querier { 67struct bridge_mcast_other_query {
78 struct timer_list timer; 68 struct timer_list timer;
79 unsigned long delay_time; 69 unsigned long delay_time;
80}; 70};
71
72/* selected querier */
73struct bridge_mcast_querier {
74 struct br_ip addr;
75 struct net_bridge_port __rcu *port;
76};
81#endif 77#endif
82 78
83struct net_port_vlans { 79struct net_port_vlans {
@@ -174,11 +170,13 @@ struct net_bridge_port
174#define BR_ADMIN_COST 0x00000010 170#define BR_ADMIN_COST 0x00000010
175#define BR_LEARNING 0x00000020 171#define BR_LEARNING 0x00000020
176#define BR_FLOOD 0x00000040 172#define BR_FLOOD 0x00000040
173#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
174#define BR_PROMISC 0x00000080
177 175
178#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 176#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
179 struct bridge_mcast_query ip4_query; 177 struct bridge_mcast_own_query ip4_own_query;
180#if IS_ENABLED(CONFIG_IPV6) 178#if IS_ENABLED(CONFIG_IPV6)
181 struct bridge_mcast_query ip6_query; 179 struct bridge_mcast_own_query ip6_own_query;
182#endif /* IS_ENABLED(CONFIG_IPV6) */ 180#endif /* IS_ENABLED(CONFIG_IPV6) */
183 unsigned char multicast_router; 181 unsigned char multicast_router;
184 struct timer_list multicast_router_timer; 182 struct timer_list multicast_router_timer;
@@ -198,6 +196,9 @@ struct net_bridge_port
198#endif 196#endif
199}; 197};
200 198
199#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
200#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
201
201#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 202#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
202 203
203static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) 204static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
@@ -227,6 +228,7 @@ struct net_bridge
227 bool nf_call_arptables; 228 bool nf_call_arptables;
228#endif 229#endif
229 u16 group_fwd_mask; 230 u16 group_fwd_mask;
231 u16 group_fwd_mask_required;
230 232
231 /* STP */ 233 /* STP */
232 bridge_id designated_root; 234 bridge_id designated_root;
@@ -241,6 +243,7 @@ struct net_bridge
241 unsigned long bridge_forward_delay; 243 unsigned long bridge_forward_delay;
242 244
243 u8 group_addr[ETH_ALEN]; 245 u8 group_addr[ETH_ALEN];
246 bool group_addr_set;
244 u16 root_port; 247 u16 root_port;
245 248
246 enum { 249 enum {
@@ -277,11 +280,13 @@ struct net_bridge
277 struct hlist_head router_list; 280 struct hlist_head router_list;
278 281
279 struct timer_list multicast_router_timer; 282 struct timer_list multicast_router_timer;
283 struct bridge_mcast_other_query ip4_other_query;
284 struct bridge_mcast_own_query ip4_own_query;
280 struct bridge_mcast_querier ip4_querier; 285 struct bridge_mcast_querier ip4_querier;
281 struct bridge_mcast_query ip4_query;
282#if IS_ENABLED(CONFIG_IPV6) 286#if IS_ENABLED(CONFIG_IPV6)
287 struct bridge_mcast_other_query ip6_other_query;
288 struct bridge_mcast_own_query ip6_own_query;
283 struct bridge_mcast_querier ip6_querier; 289 struct bridge_mcast_querier ip6_querier;
284 struct bridge_mcast_query ip6_query;
285#endif /* IS_ENABLED(CONFIG_IPV6) */ 290#endif /* IS_ENABLED(CONFIG_IPV6) */
286#endif 291#endif
287 292
@@ -290,8 +295,10 @@ struct net_bridge
290 struct timer_list topology_change_timer; 295 struct timer_list topology_change_timer;
291 struct timer_list gc_timer; 296 struct timer_list gc_timer;
292 struct kobject *ifobj; 297 struct kobject *ifobj;
298 u32 auto_cnt;
293#ifdef CONFIG_BRIDGE_VLAN_FILTERING 299#ifdef CONFIG_BRIDGE_VLAN_FILTERING
294 u8 vlan_enabled; 300 u8 vlan_enabled;
301 __be16 vlan_proto;
295 struct net_port_vlans __rcu *vlan_info; 302 struct net_port_vlans __rcu *vlan_info;
296#endif 303#endif
297}; 304};
@@ -327,8 +334,6 @@ struct br_input_skb_cb {
327#define br_debug(br, format, args...) \ 334#define br_debug(br, format, args...) \
328 pr_debug("%s: " format, (br)->dev->name, ##args) 335 pr_debug("%s: " format, (br)->dev->name, ##args)
329 336
330extern struct notifier_block br_device_notifier;
331
332/* called under bridge lock */ 337/* called under bridge lock */
333static inline int br_is_root_bridge(const struct net_bridge *br) 338static inline int br_is_root_bridge(const struct net_bridge *br)
334{ 339{
@@ -395,6 +400,8 @@ int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
395 const unsigned char *addr, u16 nlh_flags); 400 const unsigned char *addr, u16 nlh_flags);
396int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 401int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
397 struct net_device *dev, int idx); 402 struct net_device *dev, int idx);
403int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
404void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
398 405
399/* br_forward.c */ 406/* br_forward.c */
400void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); 407void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -415,6 +422,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev);
415int br_min_mtu(const struct net_bridge *br); 422int br_min_mtu(const struct net_bridge *br);
416netdev_features_t br_features_recompute(struct net_bridge *br, 423netdev_features_t br_features_recompute(struct net_bridge *br,
417 netdev_features_t features); 424 netdev_features_t features);
425void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
426void br_manage_promisc(struct net_bridge *br);
418 427
419/* br_input.c */ 428/* br_input.c */
420int br_handle_frame_finish(struct sk_buff *skb); 429int br_handle_frame_finish(struct sk_buff *skb);
@@ -485,7 +494,7 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
485 494
486static inline bool 495static inline bool
487__br_multicast_querier_exists(struct net_bridge *br, 496__br_multicast_querier_exists(struct net_bridge *br,
488 struct bridge_mcast_querier *querier) 497 struct bridge_mcast_other_query *querier)
489{ 498{
490 return time_is_before_jiffies(querier->delay_time) && 499 return time_is_before_jiffies(querier->delay_time) &&
491 (br->multicast_querier || timer_pending(&querier->timer)); 500 (br->multicast_querier || timer_pending(&querier->timer));
@@ -496,10 +505,10 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
496{ 505{
497 switch (eth->h_proto) { 506 switch (eth->h_proto) {
498 case (htons(ETH_P_IP)): 507 case (htons(ETH_P_IP)):
499 return __br_multicast_querier_exists(br, &br->ip4_querier); 508 return __br_multicast_querier_exists(br, &br->ip4_other_query);
500#if IS_ENABLED(CONFIG_IPV6) 509#if IS_ENABLED(CONFIG_IPV6)
501 case (htons(ETH_P_IPV6)): 510 case (htons(ETH_P_IPV6)):
502 return __br_multicast_querier_exists(br, &br->ip6_querier); 511 return __br_multicast_querier_exists(br, &br->ip6_other_query);
503#endif 512#endif
504 default: 513 default:
505 return false; 514 return false;
@@ -589,7 +598,10 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
589int br_vlan_delete(struct net_bridge *br, u16 vid); 598int br_vlan_delete(struct net_bridge *br, u16 vid);
590void br_vlan_flush(struct net_bridge *br); 599void br_vlan_flush(struct net_bridge *br);
591bool br_vlan_find(struct net_bridge *br, u16 vid); 600bool br_vlan_find(struct net_bridge *br, u16 vid);
601void br_recalculate_fwd_mask(struct net_bridge *br);
592int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); 602int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
603int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
604void br_vlan_init(struct net_bridge *br);
593int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); 605int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
594int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); 606int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
595void nbp_vlan_flush(struct net_bridge_port *port); 607void nbp_vlan_flush(struct net_bridge_port *port);
@@ -633,6 +645,10 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
633 return v->pvid ?: VLAN_N_VID; 645 return v->pvid ?: VLAN_N_VID;
634} 646}
635 647
648static inline int br_vlan_enabled(struct net_bridge *br)
649{
650 return br->vlan_enabled;
651}
636#else 652#else
637static inline bool br_allowed_ingress(struct net_bridge *br, 653static inline bool br_allowed_ingress(struct net_bridge *br,
638 struct net_port_vlans *v, 654 struct net_port_vlans *v,
@@ -681,6 +697,14 @@ static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
681 return false; 697 return false;
682} 698}
683 699
700static inline void br_recalculate_fwd_mask(struct net_bridge *br)
701{
702}
703
704static inline void br_vlan_init(struct net_bridge *br)
705{
706}
707
684static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 708static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
685{ 709{
686 return -EOPNOTSUPP; 710 return -EOPNOTSUPP;
@@ -719,6 +743,11 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
719{ 743{
720 return VLAN_N_VID; /* Returns invalid vid */ 744 return VLAN_N_VID; /* Returns invalid vid */
721} 745}
746
747static inline int br_vlan_enabled(struct net_bridge *br)
748{
749 return 0;
750}
722#endif 751#endif
723 752
724/* br_netfilter.c */ 753/* br_netfilter.c */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 8dac65552f19..c9e2572b15f4 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -312,10 +312,19 @@ static ssize_t group_addr_store(struct device *d,
312 new_addr[5] == 3) /* 802.1X PAE address */ 312 new_addr[5] == 3) /* 802.1X PAE address */
313 return -EINVAL; 313 return -EINVAL;
314 314
315 if (!rtnl_trylock())
316 return restart_syscall();
317
315 spin_lock_bh(&br->lock); 318 spin_lock_bh(&br->lock);
316 for (i = 0; i < 6; i++) 319 for (i = 0; i < 6; i++)
317 br->group_addr[i] = new_addr[i]; 320 br->group_addr[i] = new_addr[i];
318 spin_unlock_bh(&br->lock); 321 spin_unlock_bh(&br->lock);
322
323 br->group_addr_set = true;
324 br_recalculate_fwd_mask(br);
325
326 rtnl_unlock();
327
319 return len; 328 return len;
320} 329}
321 330
@@ -700,6 +709,22 @@ static ssize_t vlan_filtering_store(struct device *d,
700 return store_bridge_parm(d, buf, len, br_vlan_filter_toggle); 709 return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
701} 710}
702static DEVICE_ATTR_RW(vlan_filtering); 711static DEVICE_ATTR_RW(vlan_filtering);
712
713static ssize_t vlan_protocol_show(struct device *d,
714 struct device_attribute *attr,
715 char *buf)
716{
717 struct net_bridge *br = to_bridge(d);
718 return sprintf(buf, "%#06x\n", ntohs(br->vlan_proto));
719}
720
721static ssize_t vlan_protocol_store(struct device *d,
722 struct device_attribute *attr,
723 const char *buf, size_t len)
724{
725 return store_bridge_parm(d, buf, len, br_vlan_set_proto);
726}
727static DEVICE_ATTR_RW(vlan_protocol);
703#endif 728#endif
704 729
705static struct attribute *bridge_attrs[] = { 730static struct attribute *bridge_attrs[] = {
@@ -745,6 +770,7 @@ static struct attribute *bridge_attrs[] = {
745#endif 770#endif
746#ifdef CONFIG_BRIDGE_VLAN_FILTERING 771#ifdef CONFIG_BRIDGE_VLAN_FILTERING
747 &dev_attr_vlan_filtering.attr, 772 &dev_attr_vlan_filtering.attr,
773 &dev_attr_vlan_protocol.attr,
748#endif 774#endif
749 NULL 775 NULL
750}; 776};
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index dd595bd7fa82..e561cd59b8a6 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -41,20 +41,30 @@ static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
41} \ 41} \
42static int store_##_name(struct net_bridge_port *p, unsigned long v) \ 42static int store_##_name(struct net_bridge_port *p, unsigned long v) \
43{ \ 43{ \
44 unsigned long flags = p->flags; \ 44 return store_flag(p, v, _mask); \
45 if (v) \
46 flags |= _mask; \
47 else \
48 flags &= ~_mask; \
49 if (flags != p->flags) { \
50 p->flags = flags; \
51 br_ifinfo_notify(RTM_NEWLINK, p); \
52 } \
53 return 0; \
54} \ 45} \
55static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \ 46static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \
56 show_##_name, store_##_name) 47 show_##_name, store_##_name)
57 48
49static int store_flag(struct net_bridge_port *p, unsigned long v,
50 unsigned long mask)
51{
52 unsigned long flags;
53
54 flags = p->flags;
55
56 if (v)
57 flags |= mask;
58 else
59 flags &= ~mask;
60
61 if (flags != p->flags) {
62 p->flags = flags;
63 br_port_flags_change(p, mask);
64 br_ifinfo_notify(RTM_NEWLINK, p);
65 }
66 return 0;
67}
58 68
59static ssize_t show_path_cost(struct net_bridge_port *p, char *buf) 69static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
60{ 70{
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5fee2feaf292..2b2774fe0703 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -60,7 +60,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
60 * that ever changes this code will allow tagged 60 * that ever changes this code will allow tagged
61 * traffic to enter the bridge. 61 * traffic to enter the bridge.
62 */ 62 */
63 err = vlan_vid_add(dev, htons(ETH_P_8021Q), vid); 63 err = vlan_vid_add(dev, br->vlan_proto, vid);
64 if (err) 64 if (err)
65 return err; 65 return err;
66 } 66 }
@@ -80,7 +80,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
80 80
81out_filt: 81out_filt:
82 if (p) 82 if (p)
83 vlan_vid_del(dev, htons(ETH_P_8021Q), vid); 83 vlan_vid_del(dev, br->vlan_proto, vid);
84 return err; 84 return err;
85} 85}
86 86
@@ -92,8 +92,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
92 __vlan_delete_pvid(v, vid); 92 __vlan_delete_pvid(v, vid);
93 clear_bit(vid, v->untagged_bitmap); 93 clear_bit(vid, v->untagged_bitmap);
94 94
95 if (v->port_idx) 95 if (v->port_idx) {
96 vlan_vid_del(v->parent.port->dev, htons(ETH_P_8021Q), vid); 96 struct net_bridge_port *p = v->parent.port;
97 vlan_vid_del(p->dev, p->br->vlan_proto, vid);
98 }
97 99
98 clear_bit(vid, v->vlan_bitmap); 100 clear_bit(vid, v->vlan_bitmap);
99 v->num_vlans--; 101 v->num_vlans--;
@@ -158,7 +160,8 @@ out:
158bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, 160bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
159 struct sk_buff *skb, u16 *vid) 161 struct sk_buff *skb, u16 *vid)
160{ 162{
161 int err; 163 bool tagged;
164 __be16 proto;
162 165
163 /* If VLAN filtering is disabled on the bridge, all packets are 166 /* If VLAN filtering is disabled on the bridge, all packets are
164 * permitted. 167 * permitted.
@@ -172,19 +175,41 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
172 if (!v) 175 if (!v)
173 goto drop; 176 goto drop;
174 177
178 proto = br->vlan_proto;
179
175 /* If vlan tx offload is disabled on bridge device and frame was 180 /* If vlan tx offload is disabled on bridge device and frame was
176 * sent from vlan device on the bridge device, it does not have 181 * sent from vlan device on the bridge device, it does not have
177 * HW accelerated vlan tag. 182 * HW accelerated vlan tag.
178 */ 183 */
179 if (unlikely(!vlan_tx_tag_present(skb) && 184 if (unlikely(!vlan_tx_tag_present(skb) &&
180 (skb->protocol == htons(ETH_P_8021Q) || 185 skb->protocol == proto)) {
181 skb->protocol == htons(ETH_P_8021AD)))) {
182 skb = vlan_untag(skb); 186 skb = vlan_untag(skb);
183 if (unlikely(!skb)) 187 if (unlikely(!skb))
184 return false; 188 return false;
185 } 189 }
186 190
187 err = br_vlan_get_tag(skb, vid); 191 if (!br_vlan_get_tag(skb, vid)) {
192 /* Tagged frame */
193 if (skb->vlan_proto != proto) {
194 /* Protocol-mismatch, empty out vlan_tci for new tag */
195 skb_push(skb, ETH_HLEN);
196 skb = __vlan_put_tag(skb, skb->vlan_proto,
197 vlan_tx_tag_get(skb));
198 if (unlikely(!skb))
199 return false;
200
201 skb_pull(skb, ETH_HLEN);
202 skb_reset_mac_len(skb);
203 *vid = 0;
204 tagged = false;
205 } else {
206 tagged = true;
207 }
208 } else {
209 /* Untagged frame */
210 tagged = false;
211 }
212
188 if (!*vid) { 213 if (!*vid) {
189 u16 pvid = br_get_pvid(v); 214 u16 pvid = br_get_pvid(v);
190 215
@@ -199,9 +224,9 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
199 * ingress frame is considered to belong to this vlan. 224 * ingress frame is considered to belong to this vlan.
200 */ 225 */
201 *vid = pvid; 226 *vid = pvid;
202 if (likely(err)) 227 if (likely(!tagged))
203 /* Untagged Frame. */ 228 /* Untagged Frame. */
204 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); 229 __vlan_hwaccel_put_tag(skb, proto, pvid);
205 else 230 else
206 /* Priority-tagged Frame. 231 /* Priority-tagged Frame.
207 * At this point, We know that skb->vlan_tci had 232 * At this point, We know that skb->vlan_tci had
@@ -254,7 +279,9 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
254 if (!v) 279 if (!v)
255 return false; 280 return false;
256 281
257 br_vlan_get_tag(skb, vid); 282 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
283 *vid = 0;
284
258 if (!*vid) { 285 if (!*vid) {
259 *vid = br_get_pvid(v); 286 *vid = br_get_pvid(v);
260 if (*vid == VLAN_N_VID) 287 if (*vid == VLAN_N_VID)
@@ -351,6 +378,33 @@ out:
351 return found; 378 return found;
352} 379}
353 380
381/* Must be protected by RTNL. */
382static void recalculate_group_addr(struct net_bridge *br)
383{
384 if (br->group_addr_set)
385 return;
386
387 spin_lock_bh(&br->lock);
388 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
389 /* Bridge Group Address */
390 br->group_addr[5] = 0x00;
391 } else { /* vlan_enabled && ETH_P_8021AD */
392 /* Provider Bridge Group Address */
393 br->group_addr[5] = 0x08;
394 }
395 spin_unlock_bh(&br->lock);
396}
397
398/* Must be protected by RTNL. */
399void br_recalculate_fwd_mask(struct net_bridge *br)
400{
401 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
402 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
403 else /* vlan_enabled && ETH_P_8021AD */
404 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
405 ~(1u << br->group_addr[5]);
406}
407
354int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) 408int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
355{ 409{
356 if (!rtnl_trylock()) 410 if (!rtnl_trylock())
@@ -360,12 +414,88 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
360 goto unlock; 414 goto unlock;
361 415
362 br->vlan_enabled = val; 416 br->vlan_enabled = val;
417 br_manage_promisc(br);
418 recalculate_group_addr(br);
419 br_recalculate_fwd_mask(br);
363 420
364unlock: 421unlock:
365 rtnl_unlock(); 422 rtnl_unlock();
366 return 0; 423 return 0;
367} 424}
368 425
426int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
427{
428 int err = 0;
429 struct net_bridge_port *p;
430 struct net_port_vlans *pv;
431 __be16 proto, oldproto;
432 u16 vid, errvid;
433
434 if (val != ETH_P_8021Q && val != ETH_P_8021AD)
435 return -EPROTONOSUPPORT;
436
437 if (!rtnl_trylock())
438 return restart_syscall();
439
440 proto = htons(val);
441 if (br->vlan_proto == proto)
442 goto unlock;
443
444 /* Add VLANs for the new proto to the device filter. */
445 list_for_each_entry(p, &br->port_list, list) {
446 pv = rtnl_dereference(p->vlan_info);
447 if (!pv)
448 continue;
449
450 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
451 err = vlan_vid_add(p->dev, proto, vid);
452 if (err)
453 goto err_filt;
454 }
455 }
456
457 oldproto = br->vlan_proto;
458 br->vlan_proto = proto;
459
460 recalculate_group_addr(br);
461 br_recalculate_fwd_mask(br);
462
463 /* Delete VLANs for the old proto from the device filter. */
464 list_for_each_entry(p, &br->port_list, list) {
465 pv = rtnl_dereference(p->vlan_info);
466 if (!pv)
467 continue;
468
469 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
470 vlan_vid_del(p->dev, oldproto, vid);
471 }
472
473unlock:
474 rtnl_unlock();
475 return err;
476
477err_filt:
478 errvid = vid;
479 for_each_set_bit(vid, pv->vlan_bitmap, errvid)
480 vlan_vid_del(p->dev, proto, vid);
481
482 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
483 pv = rtnl_dereference(p->vlan_info);
484 if (!pv)
485 continue;
486
487 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
488 vlan_vid_del(p->dev, proto, vid);
489 }
490
491 goto unlock;
492}
493
494void br_vlan_init(struct net_bridge *br)
495{
496 br->vlan_proto = htons(ETH_P_8021Q);
497}
498
369/* Must be protected by RTNL. 499/* Must be protected by RTNL.
370 * Must be called with vid in range from 1 to 4094 inclusive. 500 * Must be called with vid in range from 1 to 4094 inclusive.
371 */ 501 */
@@ -432,7 +562,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
432 return; 562 return;
433 563
434 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) 564 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
435 vlan_vid_del(port->dev, htons(ETH_P_8021Q), vid); 565 vlan_vid_del(port->dev, port->br->vlan_proto, vid);
436 566
437 __vlan_flush(pv); 567 __vlan_flush(pv);
438} 568}
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 5ca74a0e595f..629dc77874a9 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -2,14 +2,23 @@
2# Bridge netfilter configuration 2# Bridge netfilter configuration
3# 3#
4# 4#
5config NF_TABLES_BRIDGE 5menuconfig NF_TABLES_BRIDGE
6 depends on NF_TABLES 6 depends on BRIDGE && NETFILTER && NF_TABLES
7 tristate "Ethernet Bridge nf_tables support" 7 tristate "Ethernet Bridge nf_tables support"
8 8
9if NF_TABLES_BRIDGE
10
11config NFT_BRIDGE_META
12 tristate "Netfilter nf_table bridge meta support"
13 depends on NFT_META
14 help
15 Add support for bridge dedicated meta key.
16
17endif # NF_TABLES_BRIDGE
18
9menuconfig BRIDGE_NF_EBTABLES 19menuconfig BRIDGE_NF_EBTABLES
10 tristate "Ethernet Bridge tables (ebtables) support" 20 tristate "Ethernet Bridge tables (ebtables) support"
11 depends on BRIDGE && NETFILTER 21 depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
12 select NETFILTER_XTABLES
13 help 22 help
14 ebtables is a general, extensible frame/packet identification 23 ebtables is a general, extensible frame/packet identification
15 framework. Say 'Y' or 'M' here if you want to do Ethernet 24 framework. Say 'Y' or 'M' here if you want to do Ethernet
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index ea7629f58b3d..6f2f3943d66f 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o 5obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
6obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
6 7
7obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o 8obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
8 9
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
new file mode 100644
index 000000000000..4f02109d708f
--- /dev/null
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2014 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netlink.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables.h>
17#include <net/netfilter/nft_meta.h>
18
19#include "../br_private.h"
20
21static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
22 struct nft_data data[NFT_REG_MAX + 1],
23 const struct nft_pktinfo *pkt)
24{
25 const struct nft_meta *priv = nft_expr_priv(expr);
26 const struct net_device *in = pkt->in, *out = pkt->out;
27 struct nft_data *dest = &data[priv->dreg];
28 const struct net_bridge_port *p;
29
30 switch (priv->key) {
31 case NFT_META_BRI_IIFNAME:
32 if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
33 goto err;
34 break;
35 case NFT_META_BRI_OIFNAME:
36 if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
37 goto err;
38 break;
39 default:
40 goto out;
41 }
42
43 strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
44 return;
45out:
46 return nft_meta_get_eval(expr, data, pkt);
47err:
48 data[NFT_REG_VERDICT].verdict = NFT_BREAK;
49}
50
51static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
52 const struct nft_expr *expr,
53 const struct nlattr * const tb[])
54{
55 struct nft_meta *priv = nft_expr_priv(expr);
56 int err;
57
58 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
59 switch (priv->key) {
60 case NFT_META_BRI_IIFNAME:
61 case NFT_META_BRI_OIFNAME:
62 break;
63 default:
64 return nft_meta_get_init(ctx, expr, tb);
65 }
66
67 priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
68 err = nft_validate_output_register(priv->dreg);
69 if (err < 0)
70 return err;
71
72 err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
73 if (err < 0)
74 return err;
75
76 return 0;
77}
78
79static struct nft_expr_type nft_meta_bridge_type;
80static const struct nft_expr_ops nft_meta_bridge_get_ops = {
81 .type = &nft_meta_bridge_type,
82 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
83 .eval = nft_meta_bridge_get_eval,
84 .init = nft_meta_bridge_get_init,
85 .dump = nft_meta_get_dump,
86};
87
88static const struct nft_expr_ops nft_meta_bridge_set_ops = {
89 .type = &nft_meta_bridge_type,
90 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
91 .eval = nft_meta_set_eval,
92 .init = nft_meta_set_init,
93 .dump = nft_meta_set_dump,
94};
95
96static const struct nft_expr_ops *
97nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
98 const struct nlattr * const tb[])
99{
100 if (tb[NFTA_META_KEY] == NULL)
101 return ERR_PTR(-EINVAL);
102
103 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
104 return ERR_PTR(-EINVAL);
105
106 if (tb[NFTA_META_DREG])
107 return &nft_meta_bridge_get_ops;
108
109 if (tb[NFTA_META_SREG])
110 return &nft_meta_bridge_set_ops;
111
112 return ERR_PTR(-EINVAL);
113}
114
115static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
116 .family = NFPROTO_BRIDGE,
117 .name = "meta",
118 .select_ops = &nft_meta_bridge_select_ops,
119 .policy = nft_meta_policy,
120 .maxattr = NFTA_META_MAX,
121 .owner = THIS_MODULE,
122};
123
124static int __init nft_meta_bridge_module_init(void)
125{
126 return nft_register_expr(&nft_meta_bridge_type);
127}
128
129static void __exit nft_meta_bridge_module_exit(void)
130{
131 nft_unregister_expr(&nft_meta_bridge_type);
132}
133
134module_init(nft_meta_bridge_module_init);
135module_exit(nft_meta_bridge_module_exit);
136
137MODULE_LICENSE("GPL");
138MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
139MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
diff --git a/net/can/af_can.c b/net/can/af_can.c
index a27f8aad9e99..ce82337521f6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -338,6 +338,29 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
338} 338}
339 339
340/** 340/**
341 * effhash - hash function for 29 bit CAN identifier reduction
342 * @can_id: 29 bit CAN identifier
343 *
344 * Description:
345 * To reduce the linear traversal in one linked list of _single_ EFF CAN
346 * frame subscriptions the 29 bit identifier is mapped to 10 bits.
347 * (see CAN_EFF_RCV_HASH_BITS definition)
348 *
349 * Return:
350 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
351 */
352static unsigned int effhash(canid_t can_id)
353{
354 unsigned int hash;
355
356 hash = can_id;
357 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
358 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
359
360 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
361}
362
363/**
341 * find_rcv_list - determine optimal filterlist inside device filter struct 364 * find_rcv_list - determine optimal filterlist inside device filter struct
342 * @can_id: pointer to CAN identifier of a given can_filter 365 * @can_id: pointer to CAN identifier of a given can_filter
343 * @mask: pointer to CAN mask of a given can_filter 366 * @mask: pointer to CAN mask of a given can_filter
@@ -400,10 +423,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
400 !(*can_id & CAN_RTR_FLAG)) { 423 !(*can_id & CAN_RTR_FLAG)) {
401 424
402 if (*can_id & CAN_EFF_FLAG) { 425 if (*can_id & CAN_EFF_FLAG) {
403 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) { 426 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
404 /* RFC: a future use-case for hash-tables? */ 427 return &d->rx_eff[effhash(*can_id)];
405 return &d->rx[RX_EFF];
406 }
407 } else { 428 } else {
408 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 429 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
409 return &d->rx_sff[*can_id]; 430 return &d->rx_sff[*can_id];
@@ -632,7 +653,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
632 return matches; 653 return matches;
633 654
634 if (can_id & CAN_EFF_FLAG) { 655 if (can_id & CAN_EFF_FLAG) {
635 hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) { 656 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
636 if (r->can_id == can_id) { 657 if (r->can_id == can_id) {
637 deliver(skb, r); 658 deliver(skb, r);
638 matches++; 659 matches++;
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 6de58b40535c..fca0fe9fc45a 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -59,12 +59,17 @@ struct receiver {
59 char *ident; 59 char *ident;
60}; 60};
61 61
62enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; 62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
63#define CAN_EFF_RCV_HASH_BITS 10
64#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
65
66enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
63 67
64/* per device receive filters linked at dev->ml_priv */ 68/* per device receive filters linked at dev->ml_priv */
65struct dev_rcv_lists { 69struct dev_rcv_lists {
66 struct hlist_head rx[RX_MAX]; 70 struct hlist_head rx[RX_MAX];
67 struct hlist_head rx_sff[0x800]; 71 struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
72 struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
68 int remove_on_zero_entries; 73 int remove_on_zero_entries;
69 int entries; 74 int entries;
70}; 75};
diff --git a/net/can/proc.c b/net/can/proc.c
index b543470c8f8b..1a19b985a868 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -80,7 +80,6 @@ static const char rx_list_name[][8] = {
80 [RX_ALL] = "rx_all", 80 [RX_ALL] = "rx_all",
81 [RX_FIL] = "rx_fil", 81 [RX_FIL] = "rx_fil",
82 [RX_INV] = "rx_inv", 82 [RX_INV] = "rx_inv",
83 [RX_EFF] = "rx_eff",
84}; 83};
85 84
86/* 85/*
@@ -389,25 +388,26 @@ static const struct file_operations can_rcvlist_proc_fops = {
389 .release = single_release, 388 .release = single_release,
390}; 389};
391 390
392static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m, 391static inline void can_rcvlist_proc_show_array(struct seq_file *m,
393 struct net_device *dev, 392 struct net_device *dev,
394 struct dev_rcv_lists *d) 393 struct hlist_head *rcv_array,
394 unsigned int rcv_array_sz)
395{ 395{
396 int i; 396 unsigned int i;
397 int all_empty = 1; 397 int all_empty = 1;
398 398
399 /* check whether at least one list is non-empty */ 399 /* check whether at least one list is non-empty */
400 for (i = 0; i < 0x800; i++) 400 for (i = 0; i < rcv_array_sz; i++)
401 if (!hlist_empty(&d->rx_sff[i])) { 401 if (!hlist_empty(&rcv_array[i])) {
402 all_empty = 0; 402 all_empty = 0;
403 break; 403 break;
404 } 404 }
405 405
406 if (!all_empty) { 406 if (!all_empty) {
407 can_print_recv_banner(m); 407 can_print_recv_banner(m);
408 for (i = 0; i < 0x800; i++) { 408 for (i = 0; i < rcv_array_sz; i++) {
409 if (!hlist_empty(&d->rx_sff[i])) 409 if (!hlist_empty(&rcv_array[i]))
410 can_print_rcvlist(m, &d->rx_sff[i], dev); 410 can_print_rcvlist(m, &rcv_array[i], dev);
411 } 411 }
412 } else 412 } else
413 seq_printf(m, " (%s: no entry)\n", DNAME(dev)); 413 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
@@ -425,12 +425,15 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
425 425
426 /* sff receive list for 'all' CAN devices (dev == NULL) */ 426 /* sff receive list for 'all' CAN devices (dev == NULL) */
427 d = &can_rx_alldev_list; 427 d = &can_rx_alldev_list;
428 can_rcvlist_sff_proc_show_one(m, NULL, d); 428 can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
429 429
430 /* sff receive list for registered CAN devices */ 430 /* sff receive list for registered CAN devices */
431 for_each_netdev_rcu(&init_net, dev) { 431 for_each_netdev_rcu(&init_net, dev) {
432 if (dev->type == ARPHRD_CAN && dev->ml_priv) 432 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
433 can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv); 433 d = dev->ml_priv;
434 can_rcvlist_proc_show_array(m, dev, d->rx_sff,
435 ARRAY_SIZE(d->rx_sff));
436 }
434 } 437 }
435 438
436 rcu_read_unlock(); 439 rcu_read_unlock();
@@ -452,6 +455,49 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
452 .release = single_release, 455 .release = single_release,
453}; 456};
454 457
458
459static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
460{
461 struct net_device *dev;
462 struct dev_rcv_lists *d;
463
464 /* RX_EFF */
465 seq_puts(m, "\nreceive list 'rx_eff':\n");
466
467 rcu_read_lock();
468
469 /* eff receive list for 'all' CAN devices (dev == NULL) */
470 d = &can_rx_alldev_list;
471 can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
472
473 /* eff receive list for registered CAN devices */
474 for_each_netdev_rcu(&init_net, dev) {
475 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
476 d = dev->ml_priv;
477 can_rcvlist_proc_show_array(m, dev, d->rx_eff,
478 ARRAY_SIZE(d->rx_eff));
479 }
480 }
481
482 rcu_read_unlock();
483
484 seq_putc(m, '\n');
485 return 0;
486}
487
488static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
489{
490 return single_open(file, can_rcvlist_eff_proc_show, NULL);
491}
492
493static const struct file_operations can_rcvlist_eff_proc_fops = {
494 .owner = THIS_MODULE,
495 .open = can_rcvlist_eff_proc_open,
496 .read = seq_read,
497 .llseek = seq_lseek,
498 .release = single_release,
499};
500
455/* 501/*
456 * proc utility functions 502 * proc utility functions
457 */ 503 */
@@ -491,8 +537,8 @@ void can_init_proc(void)
491 &can_rcvlist_proc_fops, (void *)RX_FIL); 537 &can_rcvlist_proc_fops, (void *)RX_FIL);
492 pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir, 538 pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
493 &can_rcvlist_proc_fops, (void *)RX_INV); 539 &can_rcvlist_proc_fops, (void *)RX_INV);
494 pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir, 540 pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
495 &can_rcvlist_proc_fops, (void *)RX_EFF); 541 &can_rcvlist_eff_proc_fops);
496 pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir, 542 pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
497 &can_rcvlist_sff_proc_fops); 543 &can_rcvlist_sff_proc_fops);
498} 544}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b0dfce77656a..05be0c181695 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL(ceph_osdc_sync);
2491 * Call all pending notify callbacks - for use after a watch is 2491 * Call all pending notify callbacks - for use after a watch is
2492 * unregistered, to make sure no more callbacks for it will be invoked 2492 * unregistered, to make sure no more callbacks for it will be invoked
2493 */ 2493 */
2494extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 2494void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
2495{ 2495{
2496 flush_workqueue(osdc->notify_wq); 2496 flush_workqueue(osdc->notify_wq);
2497} 2497}
diff --git a/net/core/Makefile b/net/core/Makefile
index 826b925aa453..71093d94ad2b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
12 sock_diag.o dev_ioctl.o 12 sock_diag.o dev_ioctl.o tso.o
13 13
14obj-$(CONFIG_XFRM) += flow.o 14obj-$(CONFIG_XFRM) += flow.o
15obj-y += net-sysfs.o 15obj-y += net-sysfs.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a16ed7bbe376..6b1c04ca1d50 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -739,11 +739,15 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
739 __sum16 sum; 739 __sum16 sum;
740 740
741 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 741 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
742 if (likely(!sum)) { 742 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum &&
743 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) 743 !skb->csum_complete_sw)
744 netdev_rx_csum_fault(skb->dev); 744 netdev_rx_csum_fault(skb->dev);
745 skb->ip_summed = CHECKSUM_UNNECESSARY; 745
746 } 746 /* Save checksum complete for later use */
747 skb->csum = sum;
748 skb->ip_summed = CHECKSUM_COMPLETE;
749 skb->csum_complete_sw = 1;
750
747 return sum; 751 return sum;
748} 752}
749EXPORT_SYMBOL(__skb_checksum_complete_head); 753EXPORT_SYMBOL(__skb_checksum_complete_head);
diff --git a/net/core/dev.c b/net/core/dev.c
index a30bef1882f5..ab6c491bd2d3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1661} 1661}
1662EXPORT_SYMBOL_GPL(is_skb_forwardable); 1662EXPORT_SYMBOL_GPL(is_skb_forwardable);
1663 1663
1664int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1665{
1666 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1667 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1668 atomic_long_inc(&dev->rx_dropped);
1669 kfree_skb(skb);
1670 return NET_RX_DROP;
1671 }
1672 }
1673
1674 if (unlikely(!is_skb_forwardable(dev, skb))) {
1675 atomic_long_inc(&dev->rx_dropped);
1676 kfree_skb(skb);
1677 return NET_RX_DROP;
1678 }
1679
1680 skb_scrub_packet(skb, true);
1681 skb->protocol = eth_type_trans(skb, dev);
1682
1683 return 0;
1684}
1685EXPORT_SYMBOL_GPL(__dev_forward_skb);
1686
1664/** 1687/**
1665 * dev_forward_skb - loopback an skb to another netif 1688 * dev_forward_skb - loopback an skb to another netif
1666 * 1689 *
@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
1681 */ 1704 */
1682int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1705int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683{ 1706{
1684 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1707 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1685 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686 atomic_long_inc(&dev->rx_dropped);
1687 kfree_skb(skb);
1688 return NET_RX_DROP;
1689 }
1690 }
1691
1692 if (unlikely(!is_skb_forwardable(dev, skb))) {
1693 atomic_long_inc(&dev->rx_dropped);
1694 kfree_skb(skb);
1695 return NET_RX_DROP;
1696 }
1697
1698 skb_scrub_packet(skb, true);
1699 skb->protocol = eth_type_trans(skb, dev);
1700
1701 return netif_rx_internal(skb);
1702} 1708}
1703EXPORT_SYMBOL_GPL(dev_forward_skb); 1709EXPORT_SYMBOL_GPL(dev_forward_skb);
1704 1710
@@ -2507,13 +2513,39 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2507 return 0; 2513 return 0;
2508} 2514}
2509 2515
2516/* If MPLS offload request, verify we are testing hardware MPLS features
2517 * instead of standard features for the netdev.
2518 */
2519#ifdef CONFIG_NET_MPLS_GSO
2520static netdev_features_t net_mpls_features(struct sk_buff *skb,
2521 netdev_features_t features,
2522 __be16 type)
2523{
2524 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2525 features &= skb->dev->mpls_features;
2526
2527 return features;
2528}
2529#else
2530static netdev_features_t net_mpls_features(struct sk_buff *skb,
2531 netdev_features_t features,
2532 __be16 type)
2533{
2534 return features;
2535}
2536#endif
2537
2510static netdev_features_t harmonize_features(struct sk_buff *skb, 2538static netdev_features_t harmonize_features(struct sk_buff *skb,
2511 netdev_features_t features) 2539 netdev_features_t features)
2512{ 2540{
2513 int tmp; 2541 int tmp;
2542 __be16 type;
2543
2544 type = skb_network_protocol(skb, &tmp);
2545 features = net_mpls_features(skb, features, type);
2514 2546
2515 if (skb->ip_summed != CHECKSUM_NONE && 2547 if (skb->ip_summed != CHECKSUM_NONE &&
2516 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2548 !can_checksum_protocol(features, type)) {
2517 features &= ~NETIF_F_ALL_CSUM; 2549 features &= ~NETIF_F_ALL_CSUM;
2518 } else if (illegal_highdma(skb->dev, skb)) { 2550 } else if (illegal_highdma(skb->dev, skb)) {
2519 features &= ~NETIF_F_SG; 2551 features &= ~NETIF_F_SG;
@@ -5689,10 +5721,6 @@ static void rollback_registered_many(struct list_head *head)
5689 */ 5721 */
5690 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5722 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5691 5723
5692 if (!dev->rtnl_link_ops ||
5693 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5694 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5695
5696 /* 5724 /*
5697 * Flush the unicast and multicast chains 5725 * Flush the unicast and multicast chains
5698 */ 5726 */
@@ -5702,6 +5730,10 @@ static void rollback_registered_many(struct list_head *head)
5702 if (dev->netdev_ops->ndo_uninit) 5730 if (dev->netdev_ops->ndo_uninit)
5703 dev->netdev_ops->ndo_uninit(dev); 5731 dev->netdev_ops->ndo_uninit(dev);
5704 5732
5733 if (!dev->rtnl_link_ops ||
5734 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5735 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5736
5705 /* Notifier chain MUST detach us all upper devices. */ 5737 /* Notifier chain MUST detach us all upper devices. */
5706 WARN_ON(netdev_has_any_upper_dev(dev)); 5738 WARN_ON(netdev_has_any_upper_dev(dev));
5707 5739
@@ -5927,10 +5959,7 @@ static void netdev_init_one_queue(struct net_device *dev,
5927 5959
5928static void netif_free_tx_queues(struct net_device *dev) 5960static void netif_free_tx_queues(struct net_device *dev)
5929{ 5961{
5930 if (is_vmalloc_addr(dev->_tx)) 5962 kvfree(dev->_tx);
5931 vfree(dev->_tx);
5932 else
5933 kfree(dev->_tx);
5934} 5963}
5935 5964
5936static int netif_alloc_netdev_queues(struct net_device *dev) 5965static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -6404,10 +6433,7 @@ void netdev_freemem(struct net_device *dev)
6404{ 6433{
6405 char *addr = (char *)dev - dev->padded; 6434 char *addr = (char *)dev - dev->padded;
6406 6435
6407 if (is_vmalloc_addr(addr)) 6436 kvfree(addr);
6408 vfree(addr);
6409 else
6410 kfree(addr);
6411} 6437}
6412 6438
6413/** 6439/**
@@ -6512,11 +6538,6 @@ free_all:
6512 6538
6513free_pcpu: 6539free_pcpu:
6514 free_percpu(dev->pcpu_refcnt); 6540 free_percpu(dev->pcpu_refcnt);
6515 netif_free_tx_queues(dev);
6516#ifdef CONFIG_SYSFS
6517 kfree(dev->_rx);
6518#endif
6519
6520free_dev: 6541free_dev:
6521 netdev_freemem(dev); 6542 netdev_freemem(dev);
6522 return NULL; 6543 return NULL;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 329d5794e7dc..b6b230600b97 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -225,6 +225,91 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
225} 225}
226EXPORT_SYMBOL(__hw_addr_unsync); 226EXPORT_SYMBOL(__hw_addr_unsync);
227 227
228/**
229 * __hw_addr_sync_dev - Synchonize device's multicast list
230 * @list: address list to syncronize
231 * @dev: device to sync
232 * @sync: function to call if address should be added
233 * @unsync: function to call if address should be removed
234 *
235 * This funciton is intended to be called from the ndo_set_rx_mode
236 * function of devices that require explicit address add/remove
237 * notifications. The unsync function may be NULL in which case
238 * the addresses requiring removal will simply be removed without
239 * any notification to the device.
240 **/
241int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
242 struct net_device *dev,
243 int (*sync)(struct net_device *, const unsigned char *),
244 int (*unsync)(struct net_device *,
245 const unsigned char *))
246{
247 struct netdev_hw_addr *ha, *tmp;
248 int err;
249
250 /* first go through and flush out any stale entries */
251 list_for_each_entry_safe(ha, tmp, &list->list, list) {
252 if (!ha->sync_cnt || ha->refcount != 1)
253 continue;
254
255 /* if unsync is defined and fails defer unsyncing address */
256 if (unsync && unsync(dev, ha->addr))
257 continue;
258
259 ha->sync_cnt--;
260 __hw_addr_del_entry(list, ha, false, false);
261 }
262
263 /* go through and sync new entries to the list */
264 list_for_each_entry_safe(ha, tmp, &list->list, list) {
265 if (ha->sync_cnt)
266 continue;
267
268 err = sync(dev, ha->addr);
269 if (err)
270 return err;
271
272 ha->sync_cnt++;
273 ha->refcount++;
274 }
275
276 return 0;
277}
278EXPORT_SYMBOL(__hw_addr_sync_dev);
279
280/**
281 * __hw_addr_unsync_dev - Remove synchonized addresses from device
282 * @list: address list to remove syncronized addresses from
283 * @dev: device to sync
284 * @unsync: function to call if address should be removed
285 *
286 * Remove all addresses that were added to the device by __hw_addr_sync_dev().
287 * This function is intended to be called from the ndo_stop or ndo_open
288 * functions on devices that require explicit address add/remove
289 * notifications. If the unsync function pointer is NULL then this function
290 * can be used to just reset the sync_cnt for the addresses in the list.
291 **/
292void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
293 struct net_device *dev,
294 int (*unsync)(struct net_device *,
295 const unsigned char *))
296{
297 struct netdev_hw_addr *ha, *tmp;
298
299 list_for_each_entry_safe(ha, tmp, &list->list, list) {
300 if (!ha->sync_cnt)
301 continue;
302
303 /* if unsync is defined and fails defer unsyncing address */
304 if (unsync && unsync(dev, ha->addr))
305 continue;
306
307 ha->sync_cnt--;
308 __hw_addr_del_entry(list, ha, false, false);
309 }
310}
311EXPORT_SYMBOL(__hw_addr_unsync_dev);
312
228static void __hw_addr_flush(struct netdev_hw_addr_list *list) 313static void __hw_addr_flush(struct netdev_hw_addr_list *list)
229{ 314{
230 struct netdev_hw_addr *ha, *tmp; 315 struct netdev_hw_addr *ha, *tmp;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 640ba0e5831c..17cb912793fa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -557,6 +557,23 @@ err_out:
557 return ret; 557 return ret;
558} 558}
559 559
560static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
561 struct ethtool_rxnfc *rx_rings,
562 u32 size)
563{
564 int i;
565
566 if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
567 return -EFAULT;
568
569 /* Validate ring indices */
570 for (i = 0; i < size; i++)
571 if (indir[i] >= rx_rings->data)
572 return -EINVAL;
573
574 return 0;
575}
576
560static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, 577static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
561 void __user *useraddr) 578 void __user *useraddr)
562{ 579{
@@ -565,7 +582,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
565 int ret; 582 int ret;
566 583
567 if (!dev->ethtool_ops->get_rxfh_indir_size || 584 if (!dev->ethtool_ops->get_rxfh_indir_size ||
568 !dev->ethtool_ops->get_rxfh_indir) 585 !dev->ethtool_ops->get_rxfh)
569 return -EOPNOTSUPP; 586 return -EOPNOTSUPP;
570 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); 587 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
571 if (dev_size == 0) 588 if (dev_size == 0)
@@ -591,7 +608,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
591 if (!indir) 608 if (!indir)
592 return -ENOMEM; 609 return -ENOMEM;
593 610
594 ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); 611 ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL);
595 if (ret) 612 if (ret)
596 goto out; 613 goto out;
597 614
@@ -613,8 +630,9 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
613 u32 *indir; 630 u32 *indir;
614 const struct ethtool_ops *ops = dev->ethtool_ops; 631 const struct ethtool_ops *ops = dev->ethtool_ops;
615 int ret; 632 int ret;
633 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
616 634
617 if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir || 635 if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
618 !ops->get_rxnfc) 636 !ops->get_rxnfc)
619 return -EOPNOTSUPP; 637 return -EOPNOTSUPP;
620 638
@@ -643,28 +661,184 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
643 for (i = 0; i < dev_size; i++) 661 for (i = 0; i < dev_size; i++)
644 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); 662 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
645 } else { 663 } else {
646 if (copy_from_user(indir, 664 ret = ethtool_copy_validate_indir(indir,
647 useraddr + 665 useraddr + ringidx_offset,
648 offsetof(struct ethtool_rxfh_indir, 666 &rx_rings,
649 ring_index[0]), 667 dev_size);
650 dev_size * sizeof(indir[0]))) { 668 if (ret)
669 goto out;
670 }
671
672 ret = ops->set_rxfh(dev, indir, NULL);
673
674out:
675 kfree(indir);
676 return ret;
677}
678
679static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
680 void __user *useraddr)
681{
682 int ret;
683 const struct ethtool_ops *ops = dev->ethtool_ops;
684 u32 user_indir_size, user_key_size;
685 u32 dev_indir_size = 0, dev_key_size = 0;
686 struct ethtool_rxfh rxfh;
687 u32 total_size;
688 u32 indir_bytes;
689 u32 *indir = NULL;
690 u8 *hkey = NULL;
691 u8 *rss_config;
692
693 if (!(dev->ethtool_ops->get_rxfh_indir_size ||
694 dev->ethtool_ops->get_rxfh_key_size) ||
695 !dev->ethtool_ops->get_rxfh)
696 return -EOPNOTSUPP;
697
698 if (ops->get_rxfh_indir_size)
699 dev_indir_size = ops->get_rxfh_indir_size(dev);
700 if (ops->get_rxfh_key_size)
701 dev_key_size = ops->get_rxfh_key_size(dev);
702
703 if ((dev_key_size + dev_indir_size) == 0)
704 return -EOPNOTSUPP;
705
706 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
707 return -EFAULT;
708 user_indir_size = rxfh.indir_size;
709 user_key_size = rxfh.key_size;
710
711 /* Check that reserved fields are 0 for now */
712 if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
713 return -EINVAL;
714
715 rxfh.indir_size = dev_indir_size;
716 rxfh.key_size = dev_key_size;
717 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
718 return -EFAULT;
719
720 /* If the user buffer size is 0, this is just a query for the
721 * device table size and key size. Otherwise, if the User size is
722 * not equal to device table size or key size it's an error.
723 */
724 if (!user_indir_size && !user_key_size)
725 return 0;
726
727 if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
728 (user_key_size && (user_key_size != dev_key_size)))
729 return -EINVAL;
730
731 indir_bytes = user_indir_size * sizeof(indir[0]);
732 total_size = indir_bytes + user_key_size;
733 rss_config = kzalloc(total_size, GFP_USER);
734 if (!rss_config)
735 return -ENOMEM;
736
737 if (user_indir_size)
738 indir = (u32 *)rss_config;
739
740 if (user_key_size)
741 hkey = rss_config + indir_bytes;
742
743 ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey);
744 if (!ret) {
745 if (copy_to_user(useraddr +
746 offsetof(struct ethtool_rxfh, rss_config[0]),
747 rss_config, total_size))
651 ret = -EFAULT; 748 ret = -EFAULT;
749 }
750
751 kfree(rss_config);
752
753 return ret;
754}
755
756static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
757 void __user *useraddr)
758{
759 int ret;
760 const struct ethtool_ops *ops = dev->ethtool_ops;
761 struct ethtool_rxnfc rx_rings;
762 struct ethtool_rxfh rxfh;
763 u32 dev_indir_size = 0, dev_key_size = 0, i;
764 u32 *indir = NULL, indir_bytes = 0;
765 u8 *hkey = NULL;
766 u8 *rss_config;
767 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
768
769 if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) ||
770 !ops->get_rxnfc || !ops->set_rxfh)
771 return -EOPNOTSUPP;
772
773 if (ops->get_rxfh_indir_size)
774 dev_indir_size = ops->get_rxfh_indir_size(dev);
775 if (ops->get_rxfh_key_size)
776 dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
777 if ((dev_key_size + dev_indir_size) == 0)
778 return -EOPNOTSUPP;
779
780 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
781 return -EFAULT;
782
783 /* Check that reserved fields are 0 for now */
784 if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
785 return -EINVAL;
786
787 /* If either indir or hash key is valid, proceed further.
788 * It is not valid to request that both be unchanged.
789 */
790 if ((rxfh.indir_size &&
791 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
792 rxfh.indir_size != dev_indir_size) ||
793 (rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
794 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
795 rxfh.key_size == 0))
796 return -EINVAL;
797
798 if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
799 indir_bytes = dev_indir_size * sizeof(indir[0]);
800
801 rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
802 if (!rss_config)
803 return -ENOMEM;
804
805 rx_rings.cmd = ETHTOOL_GRXRINGS;
806 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
807 if (ret)
808 goto out;
809
810 /* rxfh.indir_size == 0 means reset the indir table to default.
811 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged.
812 */
813 if (rxfh.indir_size &&
814 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
815 indir = (u32 *)rss_config;
816 ret = ethtool_copy_validate_indir(indir,
817 useraddr + rss_cfg_offset,
818 &rx_rings,
819 rxfh.indir_size);
820 if (ret)
652 goto out; 821 goto out;
653 } 822 } else if (rxfh.indir_size == 0) {
823 indir = (u32 *)rss_config;
824 for (i = 0; i < dev_indir_size; i++)
825 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
826 }
654 827
655 /* Validate ring indices */ 828 if (rxfh.key_size) {
656 for (i = 0; i < dev_size; i++) { 829 hkey = rss_config + indir_bytes;
657 if (indir[i] >= rx_rings.data) { 830 if (copy_from_user(hkey,
658 ret = -EINVAL; 831 useraddr + rss_cfg_offset + indir_bytes,
659 goto out; 832 rxfh.key_size)) {
660 } 833 ret = -EFAULT;
834 goto out;
661 } 835 }
662 } 836 }
663 837
664 ret = ops->set_rxfh_indir(dev, indir); 838 ret = ops->set_rxfh(dev, indir, hkey);
665 839
666out: 840out:
667 kfree(indir); 841 kfree(rss_config);
668 return ret; 842 return ret;
669} 843}
670 844
@@ -1491,6 +1665,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1491 case ETHTOOL_GRXCLSRULE: 1665 case ETHTOOL_GRXCLSRULE:
1492 case ETHTOOL_GRXCLSRLALL: 1666 case ETHTOOL_GRXCLSRLALL:
1493 case ETHTOOL_GRXFHINDIR: 1667 case ETHTOOL_GRXFHINDIR:
1668 case ETHTOOL_GRSSH:
1494 case ETHTOOL_GFEATURES: 1669 case ETHTOOL_GFEATURES:
1495 case ETHTOOL_GCHANNELS: 1670 case ETHTOOL_GCHANNELS:
1496 case ETHTOOL_GET_TS_INFO: 1671 case ETHTOOL_GET_TS_INFO:
@@ -1628,6 +1803,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1628 case ETHTOOL_SRXFHINDIR: 1803 case ETHTOOL_SRXFHINDIR:
1629 rc = ethtool_set_rxfh_indir(dev, useraddr); 1804 rc = ethtool_set_rxfh_indir(dev, useraddr);
1630 break; 1805 break;
1806 case ETHTOOL_GRSSH:
1807 rc = ethtool_get_rxfh(dev, useraddr);
1808 break;
1809 case ETHTOOL_SRSSH:
1810 rc = ethtool_set_rxfh(dev, useraddr);
1811 break;
1631 case ETHTOOL_GFEATURES: 1812 case ETHTOOL_GFEATURES:
1632 rc = ethtool_get_features(dev, useraddr); 1813 rc = ethtool_get_features(dev, useraddr);
1633 break; 1814 break;
diff --git a/net/core/filter.c b/net/core/filter.c
index ab3c74e49f07..735fad897496 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -45,6 +45,27 @@
45#include <linux/seccomp.h> 45#include <linux/seccomp.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47 47
48/* Registers */
49#define BPF_R0 regs[BPF_REG_0]
50#define BPF_R1 regs[BPF_REG_1]
51#define BPF_R2 regs[BPF_REG_2]
52#define BPF_R3 regs[BPF_REG_3]
53#define BPF_R4 regs[BPF_REG_4]
54#define BPF_R5 regs[BPF_REG_5]
55#define BPF_R6 regs[BPF_REG_6]
56#define BPF_R7 regs[BPF_REG_7]
57#define BPF_R8 regs[BPF_REG_8]
58#define BPF_R9 regs[BPF_REG_9]
59#define BPF_R10 regs[BPF_REG_10]
60
61/* Named registers */
62#define DST regs[insn->dst_reg]
63#define SRC regs[insn->src_reg]
64#define FP regs[BPF_REG_FP]
65#define ARG1 regs[BPF_REG_ARG1]
66#define CTX regs[BPF_REG_CTX]
67#define IMM insn->imm
68
48/* No hurry in this branch 69/* No hurry in this branch
49 * 70 *
50 * Exported for the bpf jit load helper. 71 * Exported for the bpf jit load helper.
@@ -57,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
57 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 78 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
58 else if (k >= SKF_LL_OFF) 79 else if (k >= SKF_LL_OFF)
59 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 80 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
60
61 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 81 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
62 return ptr; 82 return ptr;
83
63 return NULL; 84 return NULL;
64} 85}
65 86
@@ -68,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
68{ 89{
69 if (k >= 0) 90 if (k >= 0)
70 return skb_header_pointer(skb, k, size, buffer); 91 return skb_header_pointer(skb, k, size, buffer);
92
71 return bpf_internal_load_pointer_neg_helper(skb, k, size); 93 return bpf_internal_load_pointer_neg_helper(skb, k, size);
72} 94}
73 95
@@ -122,13 +144,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
122 return 0; 144 return 0;
123} 145}
124 146
125/* Register mappings for user programs. */
126#define A_REG 0
127#define X_REG 7
128#define TMP_REG 8
129#define ARG2_REG 2
130#define ARG3_REG 3
131
132/** 147/**
133 * __sk_run_filter - run a filter on a given context 148 * __sk_run_filter - run a filter on a given context
134 * @ctx: buffer to run the filter on 149 * @ctx: buffer to run the filter on
@@ -138,442 +153,433 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
138 * keep, 0 for none. @ctx is the data we are operating on, @insn is the 153 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
139 * array of filter instructions. 154 * array of filter instructions.
140 */ 155 */
141unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) 156static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
142{ 157{
143 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 158 u64 stack[MAX_BPF_STACK / sizeof(u64)];
144 u64 regs[MAX_BPF_REG], tmp; 159 u64 regs[MAX_BPF_REG], tmp;
145 void *ptr;
146 int off;
147
148#define K insn->imm
149#define A regs[insn->a_reg]
150#define X regs[insn->x_reg]
151#define R0 regs[0]
152
153#define CONT ({insn++; goto select_insn; })
154#define CONT_JMP ({insn++; goto select_insn; })
155
156 static const void *jumptable[256] = { 160 static const void *jumptable[256] = {
157 [0 ... 255] = &&default_label, 161 [0 ... 255] = &&default_label,
158 /* Now overwrite non-defaults ... */ 162 /* Now overwrite non-defaults ... */
159#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C 163 /* 32 bit ALU operations */
160 DL(BPF_ALU, BPF_ADD, BPF_X), 164 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
161 DL(BPF_ALU, BPF_ADD, BPF_K), 165 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
162 DL(BPF_ALU, BPF_SUB, BPF_X), 166 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
163 DL(BPF_ALU, BPF_SUB, BPF_K), 167 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
164 DL(BPF_ALU, BPF_AND, BPF_X), 168 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
165 DL(BPF_ALU, BPF_AND, BPF_K), 169 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
166 DL(BPF_ALU, BPF_OR, BPF_X), 170 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
167 DL(BPF_ALU, BPF_OR, BPF_K), 171 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
168 DL(BPF_ALU, BPF_LSH, BPF_X), 172 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
169 DL(BPF_ALU, BPF_LSH, BPF_K), 173 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
170 DL(BPF_ALU, BPF_RSH, BPF_X), 174 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
171 DL(BPF_ALU, BPF_RSH, BPF_K), 175 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
172 DL(BPF_ALU, BPF_XOR, BPF_X), 176 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
173 DL(BPF_ALU, BPF_XOR, BPF_K), 177 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
174 DL(BPF_ALU, BPF_MUL, BPF_X), 178 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
175 DL(BPF_ALU, BPF_MUL, BPF_K), 179 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
176 DL(BPF_ALU, BPF_MOV, BPF_X), 180 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
177 DL(BPF_ALU, BPF_MOV, BPF_K), 181 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
178 DL(BPF_ALU, BPF_DIV, BPF_X), 182 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
179 DL(BPF_ALU, BPF_DIV, BPF_K), 183 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
180 DL(BPF_ALU, BPF_MOD, BPF_X), 184 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
181 DL(BPF_ALU, BPF_MOD, BPF_K), 185 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
182 DL(BPF_ALU, BPF_NEG, 0), 186 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
183 DL(BPF_ALU, BPF_END, BPF_TO_BE), 187 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
184 DL(BPF_ALU, BPF_END, BPF_TO_LE), 188 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
185 DL(BPF_ALU64, BPF_ADD, BPF_X), 189 /* 64 bit ALU operations */
186 DL(BPF_ALU64, BPF_ADD, BPF_K), 190 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
187 DL(BPF_ALU64, BPF_SUB, BPF_X), 191 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
188 DL(BPF_ALU64, BPF_SUB, BPF_K), 192 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
189 DL(BPF_ALU64, BPF_AND, BPF_X), 193 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
190 DL(BPF_ALU64, BPF_AND, BPF_K), 194 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
191 DL(BPF_ALU64, BPF_OR, BPF_X), 195 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
192 DL(BPF_ALU64, BPF_OR, BPF_K), 196 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
193 DL(BPF_ALU64, BPF_LSH, BPF_X), 197 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
194 DL(BPF_ALU64, BPF_LSH, BPF_K), 198 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
195 DL(BPF_ALU64, BPF_RSH, BPF_X), 199 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
196 DL(BPF_ALU64, BPF_RSH, BPF_K), 200 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
197 DL(BPF_ALU64, BPF_XOR, BPF_X), 201 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
198 DL(BPF_ALU64, BPF_XOR, BPF_K), 202 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
199 DL(BPF_ALU64, BPF_MUL, BPF_X), 203 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
200 DL(BPF_ALU64, BPF_MUL, BPF_K), 204 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
201 DL(BPF_ALU64, BPF_MOV, BPF_X), 205 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
202 DL(BPF_ALU64, BPF_MOV, BPF_K), 206 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
203 DL(BPF_ALU64, BPF_ARSH, BPF_X), 207 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
204 DL(BPF_ALU64, BPF_ARSH, BPF_K), 208 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
205 DL(BPF_ALU64, BPF_DIV, BPF_X), 209 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
206 DL(BPF_ALU64, BPF_DIV, BPF_K), 210 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
207 DL(BPF_ALU64, BPF_MOD, BPF_X), 211 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
208 DL(BPF_ALU64, BPF_MOD, BPF_K), 212 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
209 DL(BPF_ALU64, BPF_NEG, 0), 213 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
210 DL(BPF_JMP, BPF_CALL, 0), 214 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
211 DL(BPF_JMP, BPF_JA, 0), 215 /* Call instruction */
212 DL(BPF_JMP, BPF_JEQ, BPF_X), 216 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
213 DL(BPF_JMP, BPF_JEQ, BPF_K), 217 /* Jumps */
214 DL(BPF_JMP, BPF_JNE, BPF_X), 218 [BPF_JMP | BPF_JA] = &&JMP_JA,
215 DL(BPF_JMP, BPF_JNE, BPF_K), 219 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
216 DL(BPF_JMP, BPF_JGT, BPF_X), 220 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
217 DL(BPF_JMP, BPF_JGT, BPF_K), 221 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
218 DL(BPF_JMP, BPF_JGE, BPF_X), 222 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
219 DL(BPF_JMP, BPF_JGE, BPF_K), 223 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
220 DL(BPF_JMP, BPF_JSGT, BPF_X), 224 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
221 DL(BPF_JMP, BPF_JSGT, BPF_K), 225 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
222 DL(BPF_JMP, BPF_JSGE, BPF_X), 226 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
223 DL(BPF_JMP, BPF_JSGE, BPF_K), 227 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
224 DL(BPF_JMP, BPF_JSET, BPF_X), 228 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
225 DL(BPF_JMP, BPF_JSET, BPF_K), 229 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
226 DL(BPF_JMP, BPF_EXIT, 0), 230 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
227 DL(BPF_STX, BPF_MEM, BPF_B), 231 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
228 DL(BPF_STX, BPF_MEM, BPF_H), 232 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
229 DL(BPF_STX, BPF_MEM, BPF_W), 233 /* Program return */
230 DL(BPF_STX, BPF_MEM, BPF_DW), 234 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
231 DL(BPF_STX, BPF_XADD, BPF_W), 235 /* Store instructions */
232 DL(BPF_STX, BPF_XADD, BPF_DW), 236 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
233 DL(BPF_ST, BPF_MEM, BPF_B), 237 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
234 DL(BPF_ST, BPF_MEM, BPF_H), 238 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
235 DL(BPF_ST, BPF_MEM, BPF_W), 239 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
236 DL(BPF_ST, BPF_MEM, BPF_DW), 240 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
237 DL(BPF_LDX, BPF_MEM, BPF_B), 241 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
238 DL(BPF_LDX, BPF_MEM, BPF_H), 242 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
239 DL(BPF_LDX, BPF_MEM, BPF_W), 243 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
240 DL(BPF_LDX, BPF_MEM, BPF_DW), 244 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
241 DL(BPF_LD, BPF_ABS, BPF_W), 245 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
242 DL(BPF_LD, BPF_ABS, BPF_H), 246 /* Load instructions */
243 DL(BPF_LD, BPF_ABS, BPF_B), 247 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
244 DL(BPF_LD, BPF_IND, BPF_W), 248 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
245 DL(BPF_LD, BPF_IND, BPF_H), 249 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
246 DL(BPF_LD, BPF_IND, BPF_B), 250 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
247#undef DL 251 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
252 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
253 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
254 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
255 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
256 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
248 }; 257 };
258 void *ptr;
259 int off;
260
261#define CONT ({ insn++; goto select_insn; })
262#define CONT_JMP ({ insn++; goto select_insn; })
249 263
250 regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; 264 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
251 regs[ARG1_REG] = (u64) (unsigned long) ctx; 265 ARG1 = (u64) (unsigned long) ctx;
252 regs[A_REG] = 0; 266
253 regs[X_REG] = 0; 267 /* Registers used in classic BPF programs need to be reset first. */
268 regs[BPF_REG_A] = 0;
269 regs[BPF_REG_X] = 0;
254 270
255select_insn: 271select_insn:
256 goto *jumptable[insn->code]; 272 goto *jumptable[insn->code];
257 273
258 /* ALU */ 274 /* ALU */
259#define ALU(OPCODE, OP) \ 275#define ALU(OPCODE, OP) \
260 BPF_ALU64_##OPCODE##_BPF_X: \ 276 ALU64_##OPCODE##_X: \
261 A = A OP X; \ 277 DST = DST OP SRC; \
262 CONT; \ 278 CONT; \
263 BPF_ALU_##OPCODE##_BPF_X: \ 279 ALU_##OPCODE##_X: \
264 A = (u32) A OP (u32) X; \ 280 DST = (u32) DST OP (u32) SRC; \
265 CONT; \ 281 CONT; \
266 BPF_ALU64_##OPCODE##_BPF_K: \ 282 ALU64_##OPCODE##_K: \
267 A = A OP K; \ 283 DST = DST OP IMM; \
268 CONT; \ 284 CONT; \
269 BPF_ALU_##OPCODE##_BPF_K: \ 285 ALU_##OPCODE##_K: \
270 A = (u32) A OP (u32) K; \ 286 DST = (u32) DST OP (u32) IMM; \
271 CONT; 287 CONT;
272 288
273 ALU(BPF_ADD, +) 289 ALU(ADD, +)
274 ALU(BPF_SUB, -) 290 ALU(SUB, -)
275 ALU(BPF_AND, &) 291 ALU(AND, &)
276 ALU(BPF_OR, |) 292 ALU(OR, |)
277 ALU(BPF_LSH, <<) 293 ALU(LSH, <<)
278 ALU(BPF_RSH, >>) 294 ALU(RSH, >>)
279 ALU(BPF_XOR, ^) 295 ALU(XOR, ^)
280 ALU(BPF_MUL, *) 296 ALU(MUL, *)
281#undef ALU 297#undef ALU
282 BPF_ALU_BPF_NEG_0: 298 ALU_NEG:
283 A = (u32) -A; 299 DST = (u32) -DST;
284 CONT; 300 CONT;
285 BPF_ALU64_BPF_NEG_0: 301 ALU64_NEG:
286 A = -A; 302 DST = -DST;
287 CONT; 303 CONT;
288 BPF_ALU_BPF_MOV_BPF_X: 304 ALU_MOV_X:
289 A = (u32) X; 305 DST = (u32) SRC;
290 CONT; 306 CONT;
291 BPF_ALU_BPF_MOV_BPF_K: 307 ALU_MOV_K:
292 A = (u32) K; 308 DST = (u32) IMM;
293 CONT; 309 CONT;
294 BPF_ALU64_BPF_MOV_BPF_X: 310 ALU64_MOV_X:
295 A = X; 311 DST = SRC;
296 CONT; 312 CONT;
297 BPF_ALU64_BPF_MOV_BPF_K: 313 ALU64_MOV_K:
298 A = K; 314 DST = IMM;
299 CONT; 315 CONT;
300 BPF_ALU64_BPF_ARSH_BPF_X: 316 ALU64_ARSH_X:
301 (*(s64 *) &A) >>= X; 317 (*(s64 *) &DST) >>= SRC;
302 CONT; 318 CONT;
303 BPF_ALU64_BPF_ARSH_BPF_K: 319 ALU64_ARSH_K:
304 (*(s64 *) &A) >>= K; 320 (*(s64 *) &DST) >>= IMM;
305 CONT; 321 CONT;
306 BPF_ALU64_BPF_MOD_BPF_X: 322 ALU64_MOD_X:
307 if (unlikely(X == 0)) 323 if (unlikely(SRC == 0))
308 return 0; 324 return 0;
309 tmp = A; 325 tmp = DST;
310 A = do_div(tmp, X); 326 DST = do_div(tmp, SRC);
311 CONT; 327 CONT;
312 BPF_ALU_BPF_MOD_BPF_X: 328 ALU_MOD_X:
313 if (unlikely(X == 0)) 329 if (unlikely(SRC == 0))
314 return 0; 330 return 0;
315 tmp = (u32) A; 331 tmp = (u32) DST;
316 A = do_div(tmp, (u32) X); 332 DST = do_div(tmp, (u32) SRC);
317 CONT; 333 CONT;
318 BPF_ALU64_BPF_MOD_BPF_K: 334 ALU64_MOD_K:
319 tmp = A; 335 tmp = DST;
320 A = do_div(tmp, K); 336 DST = do_div(tmp, IMM);
321 CONT; 337 CONT;
322 BPF_ALU_BPF_MOD_BPF_K: 338 ALU_MOD_K:
323 tmp = (u32) A; 339 tmp = (u32) DST;
324 A = do_div(tmp, (u32) K); 340 DST = do_div(tmp, (u32) IMM);
325 CONT; 341 CONT;
326 BPF_ALU64_BPF_DIV_BPF_X: 342 ALU64_DIV_X:
327 if (unlikely(X == 0)) 343 if (unlikely(SRC == 0))
328 return 0; 344 return 0;
329 do_div(A, X); 345 do_div(DST, SRC);
330 CONT; 346 CONT;
331 BPF_ALU_BPF_DIV_BPF_X: 347 ALU_DIV_X:
332 if (unlikely(X == 0)) 348 if (unlikely(SRC == 0))
333 return 0; 349 return 0;
334 tmp = (u32) A; 350 tmp = (u32) DST;
335 do_div(tmp, (u32) X); 351 do_div(tmp, (u32) SRC);
336 A = (u32) tmp; 352 DST = (u32) tmp;
337 CONT; 353 CONT;
338 BPF_ALU64_BPF_DIV_BPF_K: 354 ALU64_DIV_K:
339 do_div(A, K); 355 do_div(DST, IMM);
340 CONT; 356 CONT;
341 BPF_ALU_BPF_DIV_BPF_K: 357 ALU_DIV_K:
342 tmp = (u32) A; 358 tmp = (u32) DST;
343 do_div(tmp, (u32) K); 359 do_div(tmp, (u32) IMM);
344 A = (u32) tmp; 360 DST = (u32) tmp;
345 CONT; 361 CONT;
346 BPF_ALU_BPF_END_BPF_TO_BE: 362 ALU_END_TO_BE:
347 switch (K) { 363 switch (IMM) {
348 case 16: 364 case 16:
349 A = (__force u16) cpu_to_be16(A); 365 DST = (__force u16) cpu_to_be16(DST);
350 break; 366 break;
351 case 32: 367 case 32:
352 A = (__force u32) cpu_to_be32(A); 368 DST = (__force u32) cpu_to_be32(DST);
353 break; 369 break;
354 case 64: 370 case 64:
355 A = (__force u64) cpu_to_be64(A); 371 DST = (__force u64) cpu_to_be64(DST);
356 break; 372 break;
357 } 373 }
358 CONT; 374 CONT;
359 BPF_ALU_BPF_END_BPF_TO_LE: 375 ALU_END_TO_LE:
360 switch (K) { 376 switch (IMM) {
361 case 16: 377 case 16:
362 A = (__force u16) cpu_to_le16(A); 378 DST = (__force u16) cpu_to_le16(DST);
363 break; 379 break;
364 case 32: 380 case 32:
365 A = (__force u32) cpu_to_le32(A); 381 DST = (__force u32) cpu_to_le32(DST);
366 break; 382 break;
367 case 64: 383 case 64:
368 A = (__force u64) cpu_to_le64(A); 384 DST = (__force u64) cpu_to_le64(DST);
369 break; 385 break;
370 } 386 }
371 CONT; 387 CONT;
372 388
373 /* CALL */ 389 /* CALL */
374 BPF_JMP_BPF_CALL_0: 390 JMP_CALL:
375 /* Function call scratches R1-R5 registers, preserves R6-R9, 391 /* Function call scratches BPF_R1-BPF_R5 registers,
376 * and stores return value into R0. 392 * preserves BPF_R6-BPF_R9, and stores return value
393 * into BPF_R0.
377 */ 394 */
378 R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3], 395 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
379 regs[4], regs[5]); 396 BPF_R4, BPF_R5);
380 CONT; 397 CONT;
381 398
382 /* JMP */ 399 /* JMP */
383 BPF_JMP_BPF_JA_0: 400 JMP_JA:
384 insn += insn->off; 401 insn += insn->off;
385 CONT; 402 CONT;
386 BPF_JMP_BPF_JEQ_BPF_X: 403 JMP_JEQ_X:
387 if (A == X) { 404 if (DST == SRC) {
388 insn += insn->off; 405 insn += insn->off;
389 CONT_JMP; 406 CONT_JMP;
390 } 407 }
391 CONT; 408 CONT;
392 BPF_JMP_BPF_JEQ_BPF_K: 409 JMP_JEQ_K:
393 if (A == K) { 410 if (DST == IMM) {
394 insn += insn->off; 411 insn += insn->off;
395 CONT_JMP; 412 CONT_JMP;
396 } 413 }
397 CONT; 414 CONT;
398 BPF_JMP_BPF_JNE_BPF_X: 415 JMP_JNE_X:
399 if (A != X) { 416 if (DST != SRC) {
400 insn += insn->off; 417 insn += insn->off;
401 CONT_JMP; 418 CONT_JMP;
402 } 419 }
403 CONT; 420 CONT;
404 BPF_JMP_BPF_JNE_BPF_K: 421 JMP_JNE_K:
405 if (A != K) { 422 if (DST != IMM) {
406 insn += insn->off; 423 insn += insn->off;
407 CONT_JMP; 424 CONT_JMP;
408 } 425 }
409 CONT; 426 CONT;
410 BPF_JMP_BPF_JGT_BPF_X: 427 JMP_JGT_X:
411 if (A > X) { 428 if (DST > SRC) {
412 insn += insn->off; 429 insn += insn->off;
413 CONT_JMP; 430 CONT_JMP;
414 } 431 }
415 CONT; 432 CONT;
416 BPF_JMP_BPF_JGT_BPF_K: 433 JMP_JGT_K:
417 if (A > K) { 434 if (DST > IMM) {
418 insn += insn->off; 435 insn += insn->off;
419 CONT_JMP; 436 CONT_JMP;
420 } 437 }
421 CONT; 438 CONT;
422 BPF_JMP_BPF_JGE_BPF_X: 439 JMP_JGE_X:
423 if (A >= X) { 440 if (DST >= SRC) {
424 insn += insn->off; 441 insn += insn->off;
425 CONT_JMP; 442 CONT_JMP;
426 } 443 }
427 CONT; 444 CONT;
428 BPF_JMP_BPF_JGE_BPF_K: 445 JMP_JGE_K:
429 if (A >= K) { 446 if (DST >= IMM) {
430 insn += insn->off; 447 insn += insn->off;
431 CONT_JMP; 448 CONT_JMP;
432 } 449 }
433 CONT; 450 CONT;
434 BPF_JMP_BPF_JSGT_BPF_X: 451 JMP_JSGT_X:
435 if (((s64)A) > ((s64)X)) { 452 if (((s64) DST) > ((s64) SRC)) {
436 insn += insn->off; 453 insn += insn->off;
437 CONT_JMP; 454 CONT_JMP;
438 } 455 }
439 CONT; 456 CONT;
440 BPF_JMP_BPF_JSGT_BPF_K: 457 JMP_JSGT_K:
441 if (((s64)A) > ((s64)K)) { 458 if (((s64) DST) > ((s64) IMM)) {
442 insn += insn->off; 459 insn += insn->off;
443 CONT_JMP; 460 CONT_JMP;
444 } 461 }
445 CONT; 462 CONT;
446 BPF_JMP_BPF_JSGE_BPF_X: 463 JMP_JSGE_X:
447 if (((s64)A) >= ((s64)X)) { 464 if (((s64) DST) >= ((s64) SRC)) {
448 insn += insn->off; 465 insn += insn->off;
449 CONT_JMP; 466 CONT_JMP;
450 } 467 }
451 CONT; 468 CONT;
452 BPF_JMP_BPF_JSGE_BPF_K: 469 JMP_JSGE_K:
453 if (((s64)A) >= ((s64)K)) { 470 if (((s64) DST) >= ((s64) IMM)) {
454 insn += insn->off; 471 insn += insn->off;
455 CONT_JMP; 472 CONT_JMP;
456 } 473 }
457 CONT; 474 CONT;
458 BPF_JMP_BPF_JSET_BPF_X: 475 JMP_JSET_X:
459 if (A & X) { 476 if (DST & SRC) {
460 insn += insn->off; 477 insn += insn->off;
461 CONT_JMP; 478 CONT_JMP;
462 } 479 }
463 CONT; 480 CONT;
464 BPF_JMP_BPF_JSET_BPF_K: 481 JMP_JSET_K:
465 if (A & K) { 482 if (DST & IMM) {
466 insn += insn->off; 483 insn += insn->off;
467 CONT_JMP; 484 CONT_JMP;
468 } 485 }
469 CONT; 486 CONT;
470 BPF_JMP_BPF_EXIT_0: 487 JMP_EXIT:
471 return R0; 488 return BPF_R0;
472 489
473 /* STX and ST and LDX*/ 490 /* STX and ST and LDX*/
474#define LDST(SIZEOP, SIZE) \ 491#define LDST(SIZEOP, SIZE) \
475 BPF_STX_BPF_MEM_##SIZEOP: \ 492 STX_MEM_##SIZEOP: \
476 *(SIZE *)(unsigned long) (A + insn->off) = X; \ 493 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
477 CONT; \ 494 CONT; \
478 BPF_ST_BPF_MEM_##SIZEOP: \ 495 ST_MEM_##SIZEOP: \
479 *(SIZE *)(unsigned long) (A + insn->off) = K; \ 496 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
480 CONT; \ 497 CONT; \
481 BPF_LDX_BPF_MEM_##SIZEOP: \ 498 LDX_MEM_##SIZEOP: \
482 A = *(SIZE *)(unsigned long) (X + insn->off); \ 499 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
483 CONT; 500 CONT;
484 501
485 LDST(BPF_B, u8) 502 LDST(B, u8)
486 LDST(BPF_H, u16) 503 LDST(H, u16)
487 LDST(BPF_W, u32) 504 LDST(W, u32)
488 LDST(BPF_DW, u64) 505 LDST(DW, u64)
489#undef LDST 506#undef LDST
490 BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */ 507 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
491 atomic_add((u32) X, (atomic_t *)(unsigned long) 508 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
492 (A + insn->off)); 509 (DST + insn->off));
493 CONT; 510 CONT;
494 BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ 511 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
495 atomic64_add((u64) X, (atomic64_t *)(unsigned long) 512 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
496 (A + insn->off)); 513 (DST + insn->off));
497 CONT; 514 CONT;
498 BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ 515 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
499 off = K; 516 off = IMM;
500load_word: 517load_word:
501 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only 518 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
502 * appearing in the programs where ctx == skb. All programs 519 * only appearing in the programs where ctx ==
503 * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter() 520 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
504 * saves it in R6, internal BPF verifier will check that 521 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
505 * R6 == ctx. 522 * internal BPF verifier will check that BPF_R6 ==
523 * ctx.
506 * 524 *
507 * BPF_ABS and BPF_IND are wrappers of function calls, so 525 * BPF_ABS and BPF_IND are wrappers of function calls,
508 * they scratch R1-R5 registers, preserve R6-R9, and store 526 * so they scratch BPF_R1-BPF_R5 registers, preserve
509 * return value into R0. 527 * BPF_R6-BPF_R9, and store return value into BPF_R0.
510 * 528 *
511 * Implicit input: 529 * Implicit input:
512 * ctx 530 * ctx == skb == BPF_R6 == CTX
513 * 531 *
514 * Explicit input: 532 * Explicit input:
515 * X == any register 533 * SRC == any register
516 * K == 32-bit immediate 534 * IMM == 32-bit immediate
517 * 535 *
518 * Output: 536 * Output:
519 * R0 - 8/16/32-bit skb data converted to cpu endianness 537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
520 */ 538 */
521 ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); 539
540 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
522 if (likely(ptr != NULL)) { 541 if (likely(ptr != NULL)) {
523 R0 = get_unaligned_be32(ptr); 542 BPF_R0 = get_unaligned_be32(ptr);
524 CONT; 543 CONT;
525 } 544 }
545
526 return 0; 546 return 0;
527 BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ 547 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
528 off = K; 548 off = IMM;
529load_half: 549load_half:
530 ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); 550 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
531 if (likely(ptr != NULL)) { 551 if (likely(ptr != NULL)) {
532 R0 = get_unaligned_be16(ptr); 552 BPF_R0 = get_unaligned_be16(ptr);
533 CONT; 553 CONT;
534 } 554 }
555
535 return 0; 556 return 0;
536 BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */ 557 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
537 off = K; 558 off = IMM;
538load_byte: 559load_byte:
539 ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); 560 ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
540 if (likely(ptr != NULL)) { 561 if (likely(ptr != NULL)) {
541 R0 = *(u8 *)ptr; 562 BPF_R0 = *(u8 *)ptr;
542 CONT; 563 CONT;
543 } 564 }
565
544 return 0; 566 return 0;
545 BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ 567 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
546 off = K + X; 568 off = IMM + SRC;
547 goto load_word; 569 goto load_word;
548 BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ 570 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
549 off = K + X; 571 off = IMM + SRC;
550 goto load_half; 572 goto load_half;
551 BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */ 573 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
552 off = K + X; 574 off = IMM + SRC;
553 goto load_byte; 575 goto load_byte;
554 576
555 default_label: 577 default_label:
556 /* If we ever reach this, we have a bug somewhere. */ 578 /* If we ever reach this, we have a bug somewhere. */
557 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); 579 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
558 return 0; 580 return 0;
559#undef CONT_JMP
560#undef CONT
561
562#undef R0
563#undef X
564#undef A
565#undef K
566} 581}
567 582
568u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
569 const struct sock_filter_int *insni)
570 __attribute__ ((alias ("__sk_run_filter")));
571
572u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
573 const struct sock_filter_int *insni)
574 __attribute__ ((alias ("__sk_run_filter")));
575EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
576
577/* Helper to find the offset of pkt_type in sk_buff structure. We want 583/* Helper to find the offset of pkt_type in sk_buff structure. We want
578 * to make sure its still a 3bit field starting at a byte boundary; 584 * to make sure its still a 3bit field starting at a byte boundary;
579 * taken from arch/x86/net/bpf_jit_comp.c. 585 * taken from arch/x86/net/bpf_jit_comp.c.
@@ -598,16 +604,14 @@ static unsigned int pkt_type_offset(void)
598 return -1; 604 return -1;
599} 605}
600 606
601static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 607static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
602{ 608{
603 struct sk_buff *skb = (struct sk_buff *)(long) ctx; 609 return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
604
605 return __skb_get_poff(skb);
606} 610}
607 611
608static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 612static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
609{ 613{
610 struct sk_buff *skb = (struct sk_buff *)(long) ctx; 614 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
611 struct nlattr *nla; 615 struct nlattr *nla;
612 616
613 if (skb_is_nonlinear(skb)) 617 if (skb_is_nonlinear(skb))
@@ -616,19 +620,19 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
616 if (skb->len < sizeof(struct nlattr)) 620 if (skb->len < sizeof(struct nlattr))
617 return 0; 621 return 0;
618 622
619 if (A > skb->len - sizeof(struct nlattr)) 623 if (a > skb->len - sizeof(struct nlattr))
620 return 0; 624 return 0;
621 625
622 nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); 626 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
623 if (nla) 627 if (nla)
624 return (void *) nla - (void *) skb->data; 628 return (void *) nla - (void *) skb->data;
625 629
626 return 0; 630 return 0;
627} 631}
628 632
629static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 633static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
630{ 634{
631 struct sk_buff *skb = (struct sk_buff *)(long) ctx; 635 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
632 struct nlattr *nla; 636 struct nlattr *nla;
633 637
634 if (skb_is_nonlinear(skb)) 638 if (skb_is_nonlinear(skb))
@@ -637,25 +641,31 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
637 if (skb->len < sizeof(struct nlattr)) 641 if (skb->len < sizeof(struct nlattr))
638 return 0; 642 return 0;
639 643
640 if (A > skb->len - sizeof(struct nlattr)) 644 if (a > skb->len - sizeof(struct nlattr))
641 return 0; 645 return 0;
642 646
643 nla = (struct nlattr *) &skb->data[A]; 647 nla = (struct nlattr *) &skb->data[a];
644 if (nla->nla_len > skb->len - A) 648 if (nla->nla_len > skb->len - a)
645 return 0; 649 return 0;
646 650
647 nla = nla_find_nested(nla, X); 651 nla = nla_find_nested(nla, x);
648 if (nla) 652 if (nla)
649 return (void *) nla - (void *) skb->data; 653 return (void *) nla - (void *) skb->data;
650 654
651 return 0; 655 return 0;
652} 656}
653 657
654static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) 658static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
655{ 659{
656 return raw_smp_processor_id(); 660 return raw_smp_processor_id();
657} 661}
658 662
663/* note that this only generates 32-bit random numbers */
664static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
665{
666 return prandom_u32();
667}
668
659static bool convert_bpf_extensions(struct sock_filter *fp, 669static bool convert_bpf_extensions(struct sock_filter *fp,
660 struct sock_filter_int **insnp) 670 struct sock_filter_int **insnp)
661{ 671{
@@ -665,126 +675,83 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
665 case SKF_AD_OFF + SKF_AD_PROTOCOL: 675 case SKF_AD_OFF + SKF_AD_PROTOCOL:
666 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 676 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
667 677
668 insn->code = BPF_LDX | BPF_MEM | BPF_H; 678 /* A = *(u16 *) (CTX + offsetof(protocol)) */
669 insn->a_reg = A_REG; 679 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
670 insn->x_reg = CTX_REG; 680 offsetof(struct sk_buff, protocol));
671 insn->off = offsetof(struct sk_buff, protocol);
672 insn++;
673
674 /* A = ntohs(A) [emitting a nop or swap16] */ 681 /* A = ntohs(A) [emitting a nop or swap16] */
675 insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; 682 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
676 insn->a_reg = A_REG;
677 insn->imm = 16;
678 break; 683 break;
679 684
680 case SKF_AD_OFF + SKF_AD_PKTTYPE: 685 case SKF_AD_OFF + SKF_AD_PKTTYPE:
681 insn->code = BPF_LDX | BPF_MEM | BPF_B; 686 *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
682 insn->a_reg = A_REG; 687 pkt_type_offset());
683 insn->x_reg = CTX_REG;
684 insn->off = pkt_type_offset();
685 if (insn->off < 0) 688 if (insn->off < 0)
686 return false; 689 return false;
687 insn++; 690 insn++;
688 691 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
689 insn->code = BPF_ALU | BPF_AND | BPF_K;
690 insn->a_reg = A_REG;
691 insn->imm = PKT_TYPE_MAX;
692#ifdef __BIG_ENDIAN_BITFIELD 692#ifdef __BIG_ENDIAN_BITFIELD
693 insn++; 693 insn++;
694 694 *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
695 insn->code = BPF_ALU | BPF_RSH | BPF_K;
696 insn->a_reg = A_REG;
697 insn->imm = 5;
698#endif 695#endif
699 break; 696 break;
700 697
701 case SKF_AD_OFF + SKF_AD_IFINDEX: 698 case SKF_AD_OFF + SKF_AD_IFINDEX:
702 case SKF_AD_OFF + SKF_AD_HATYPE: 699 case SKF_AD_OFF + SKF_AD_HATYPE:
703 if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
704 insn->code = BPF_LDX | BPF_MEM | BPF_DW;
705 else
706 insn->code = BPF_LDX | BPF_MEM | BPF_W;
707 insn->a_reg = TMP_REG;
708 insn->x_reg = CTX_REG;
709 insn->off = offsetof(struct sk_buff, dev);
710 insn++;
711
712 insn->code = BPF_JMP | BPF_JNE | BPF_K;
713 insn->a_reg = TMP_REG;
714 insn->imm = 0;
715 insn->off = 1;
716 insn++;
717
718 insn->code = BPF_JMP | BPF_EXIT;
719 insn++;
720
721 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 700 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
722 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 701 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
723 702 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
724 insn->a_reg = A_REG; 703
725 insn->x_reg = TMP_REG; 704 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
726 705 BPF_REG_TMP, BPF_REG_CTX,
727 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { 706 offsetof(struct sk_buff, dev));
728 insn->code = BPF_LDX | BPF_MEM | BPF_W; 707 /* if (tmp != 0) goto pc + 1 */
729 insn->off = offsetof(struct net_device, ifindex); 708 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
730 } else { 709 *insn++ = BPF_EXIT_INSN();
731 insn->code = BPF_LDX | BPF_MEM | BPF_H; 710 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
732 insn->off = offsetof(struct net_device, type); 711 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
733 } 712 offsetof(struct net_device, ifindex));
713 else
714 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
715 offsetof(struct net_device, type));
734 break; 716 break;
735 717
736 case SKF_AD_OFF + SKF_AD_MARK: 718 case SKF_AD_OFF + SKF_AD_MARK:
737 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 719 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
738 720
739 insn->code = BPF_LDX | BPF_MEM | BPF_W; 721 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
740 insn->a_reg = A_REG; 722 offsetof(struct sk_buff, mark));
741 insn->x_reg = CTX_REG;
742 insn->off = offsetof(struct sk_buff, mark);
743 break; 723 break;
744 724
745 case SKF_AD_OFF + SKF_AD_RXHASH: 725 case SKF_AD_OFF + SKF_AD_RXHASH:
746 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 726 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
747 727
748 insn->code = BPF_LDX | BPF_MEM | BPF_W; 728 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
749 insn->a_reg = A_REG; 729 offsetof(struct sk_buff, hash));
750 insn->x_reg = CTX_REG;
751 insn->off = offsetof(struct sk_buff, hash);
752 break; 730 break;
753 731
754 case SKF_AD_OFF + SKF_AD_QUEUE: 732 case SKF_AD_OFF + SKF_AD_QUEUE:
755 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); 733 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
756 734
757 insn->code = BPF_LDX | BPF_MEM | BPF_H; 735 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
758 insn->a_reg = A_REG; 736 offsetof(struct sk_buff, queue_mapping));
759 insn->x_reg = CTX_REG;
760 insn->off = offsetof(struct sk_buff, queue_mapping);
761 break; 737 break;
762 738
763 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 739 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
764 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 740 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
765 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 741 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
766
767 insn->code = BPF_LDX | BPF_MEM | BPF_H;
768 insn->a_reg = A_REG;
769 insn->x_reg = CTX_REG;
770 insn->off = offsetof(struct sk_buff, vlan_tci);
771 insn++;
772
773 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 742 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
774 743
744 /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
745 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
746 offsetof(struct sk_buff, vlan_tci));
775 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { 747 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
776 insn->code = BPF_ALU | BPF_AND | BPF_K; 748 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
777 insn->a_reg = A_REG; 749 ~VLAN_TAG_PRESENT);
778 insn->imm = ~VLAN_TAG_PRESENT;
779 } else { 750 } else {
780 insn->code = BPF_ALU | BPF_RSH | BPF_K; 751 /* A >>= 12 */
781 insn->a_reg = A_REG; 752 *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
782 insn->imm = 12; 753 /* A &= 1 */
783 insn++; 754 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
784
785 insn->code = BPF_ALU | BPF_AND | BPF_K;
786 insn->a_reg = A_REG;
787 insn->imm = 1;
788 } 755 }
789 break; 756 break;
790 757
@@ -792,46 +759,36 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
792 case SKF_AD_OFF + SKF_AD_NLATTR: 759 case SKF_AD_OFF + SKF_AD_NLATTR:
793 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 760 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
794 case SKF_AD_OFF + SKF_AD_CPU: 761 case SKF_AD_OFF + SKF_AD_CPU:
795 /* arg1 = ctx */ 762 case SKF_AD_OFF + SKF_AD_RANDOM:
796 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 763 /* arg1 = CTX */
797 insn->a_reg = ARG1_REG; 764 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
798 insn->x_reg = CTX_REG;
799 insn++;
800
801 /* arg2 = A */ 765 /* arg2 = A */
802 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 766 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
803 insn->a_reg = ARG2_REG;
804 insn->x_reg = A_REG;
805 insn++;
806
807 /* arg3 = X */ 767 /* arg3 = X */
808 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 768 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
809 insn->a_reg = ARG3_REG; 769 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
810 insn->x_reg = X_REG;
811 insn++;
812
813 /* Emit call(ctx, arg2=A, arg3=X) */
814 insn->code = BPF_JMP | BPF_CALL;
815 switch (fp->k) { 770 switch (fp->k) {
816 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 771 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
817 insn->imm = __skb_get_pay_offset - __bpf_call_base; 772 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
818 break; 773 break;
819 case SKF_AD_OFF + SKF_AD_NLATTR: 774 case SKF_AD_OFF + SKF_AD_NLATTR:
820 insn->imm = __skb_get_nlattr - __bpf_call_base; 775 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
821 break; 776 break;
822 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 777 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
823 insn->imm = __skb_get_nlattr_nest - __bpf_call_base; 778 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
824 break; 779 break;
825 case SKF_AD_OFF + SKF_AD_CPU: 780 case SKF_AD_OFF + SKF_AD_CPU:
826 insn->imm = __get_raw_cpu_id - __bpf_call_base; 781 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
782 break;
783 case SKF_AD_OFF + SKF_AD_RANDOM:
784 *insn = BPF_EMIT_CALL(__get_random_u32);
827 break; 785 break;
828 } 786 }
829 break; 787 break;
830 788
831 case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 789 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
832 insn->code = BPF_ALU | BPF_XOR | BPF_X; 790 /* A ^= X */
833 insn->a_reg = A_REG; 791 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
834 insn->x_reg = X_REG;
835 break; 792 break;
836 793
837 default: 794 default:
@@ -881,7 +838,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
881 u8 bpf_src; 838 u8 bpf_src;
882 839
883 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
884 BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
885 842
886 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len >= BPF_MAXINSNS)
887 return -EINVAL; 844 return -EINVAL;
@@ -896,11 +853,8 @@ do_pass:
896 new_insn = new_prog; 853 new_insn = new_prog;
897 fp = prog; 854 fp = prog;
898 855
899 if (new_insn) { 856 if (new_insn)
900 new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 857 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
901 new_insn->a_reg = CTX_REG;
902 new_insn->x_reg = ARG1_REG;
903 }
904 new_insn++; 858 new_insn++;
905 859
906 for (i = 0; i < len; fp++, i++) { 860 for (i = 0; i < len; fp++, i++) {
@@ -948,17 +902,16 @@ do_pass:
948 convert_bpf_extensions(fp, &insn)) 902 convert_bpf_extensions(fp, &insn))
949 break; 903 break;
950 904
951 insn->code = fp->code; 905 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
952 insn->a_reg = A_REG;
953 insn->x_reg = X_REG;
954 insn->imm = fp->k;
955 break; 906 break;
956 907
957 /* Jump opcodes map as-is, but offsets need adjustment. */ 908 /* Jump transformation cannot use BPF block macros
958 case BPF_JMP | BPF_JA: 909 * everywhere as offset calculation and target updates
959 target = i + fp->k + 1; 910 * require a bit more work than the rest, i.e. jump
960 insn->code = fp->code; 911 * opcodes map as-is, but offsets need adjustment.
961#define EMIT_JMP \ 912 */
913
914#define BPF_EMIT_JMP \
962 do { \ 915 do { \
963 if (target >= len || target < 0) \ 916 if (target >= len || target < 0) \
964 goto err; \ 917 goto err; \
@@ -967,7 +920,10 @@ do_pass:
967 insn->off -= insn - tmp_insns; \ 920 insn->off -= insn - tmp_insns; \
968 } while (0) 921 } while (0)
969 922
970 EMIT_JMP; 923 case BPF_JMP | BPF_JA:
924 target = i + fp->k + 1;
925 insn->code = fp->code;
926 BPF_EMIT_JMP;
971 break; 927 break;
972 928
973 case BPF_JMP | BPF_JEQ | BPF_K: 929 case BPF_JMP | BPF_JEQ | BPF_K:
@@ -983,17 +939,14 @@ do_pass:
983 * immediate into tmp register and use it 939 * immediate into tmp register and use it
984 * in compare insn. 940 * in compare insn.
985 */ 941 */
986 insn->code = BPF_ALU | BPF_MOV | BPF_K; 942 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
987 insn->a_reg = TMP_REG;
988 insn->imm = fp->k;
989 insn++;
990 943
991 insn->a_reg = A_REG; 944 insn->dst_reg = BPF_REG_A;
992 insn->x_reg = TMP_REG; 945 insn->src_reg = BPF_REG_TMP;
993 bpf_src = BPF_X; 946 bpf_src = BPF_X;
994 } else { 947 } else {
995 insn->a_reg = A_REG; 948 insn->dst_reg = BPF_REG_A;
996 insn->x_reg = X_REG; 949 insn->src_reg = BPF_REG_X;
997 insn->imm = fp->k; 950 insn->imm = fp->k;
998 bpf_src = BPF_SRC(fp->code); 951 bpf_src = BPF_SRC(fp->code);
999 } 952 }
@@ -1002,7 +955,7 @@ do_pass:
1002 if (fp->jf == 0) { 955 if (fp->jf == 0) {
1003 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 956 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
1004 target = i + fp->jt + 1; 957 target = i + fp->jt + 1;
1005 EMIT_JMP; 958 BPF_EMIT_JMP;
1006 break; 959 break;
1007 } 960 }
1008 961
@@ -1010,127 +963,94 @@ do_pass:
1010 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { 963 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
1011 insn->code = BPF_JMP | BPF_JNE | bpf_src; 964 insn->code = BPF_JMP | BPF_JNE | bpf_src;
1012 target = i + fp->jf + 1; 965 target = i + fp->jf + 1;
1013 EMIT_JMP; 966 BPF_EMIT_JMP;
1014 break; 967 break;
1015 } 968 }
1016 969
1017 /* Other jumps are mapped into two insns: Jxx and JA. */ 970 /* Other jumps are mapped into two insns: Jxx and JA. */
1018 target = i + fp->jt + 1; 971 target = i + fp->jt + 1;
1019 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 972 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
1020 EMIT_JMP; 973 BPF_EMIT_JMP;
1021 insn++; 974 insn++;
1022 975
1023 insn->code = BPF_JMP | BPF_JA; 976 insn->code = BPF_JMP | BPF_JA;
1024 target = i + fp->jf + 1; 977 target = i + fp->jf + 1;
1025 EMIT_JMP; 978 BPF_EMIT_JMP;
1026 break; 979 break;
1027 980
1028 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 981 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
1029 case BPF_LDX | BPF_MSH | BPF_B: 982 case BPF_LDX | BPF_MSH | BPF_B:
1030 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 983 /* tmp = A */
1031 insn->a_reg = TMP_REG; 984 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
1032 insn->x_reg = A_REG; 985 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
1033 insn++; 986 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
1034 987 /* A &= 0xf */
1035 insn->code = BPF_LD | BPF_ABS | BPF_B; 988 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
1036 insn->a_reg = A_REG; 989 /* A <<= 2 */
1037 insn->imm = fp->k; 990 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
1038 insn++; 991 /* X = A */
1039 992 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1040 insn->code = BPF_ALU | BPF_AND | BPF_K; 993 /* A = tmp */
1041 insn->a_reg = A_REG; 994 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
1042 insn->imm = 0xf;
1043 insn++;
1044
1045 insn->code = BPF_ALU | BPF_LSH | BPF_K;
1046 insn->a_reg = A_REG;
1047 insn->imm = 2;
1048 insn++;
1049
1050 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1051 insn->a_reg = X_REG;
1052 insn->x_reg = A_REG;
1053 insn++;
1054
1055 insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
1056 insn->a_reg = A_REG;
1057 insn->x_reg = TMP_REG;
1058 break; 995 break;
1059 996
1060 /* RET_K, RET_A are remaped into 2 insns. */ 997 /* RET_K, RET_A are remaped into 2 insns. */
1061 case BPF_RET | BPF_A: 998 case BPF_RET | BPF_A:
1062 case BPF_RET | BPF_K: 999 case BPF_RET | BPF_K:
1063 insn->code = BPF_ALU | BPF_MOV | 1000 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
1064 (BPF_RVAL(fp->code) == BPF_K ? 1001 BPF_K : BPF_X, BPF_REG_0,
1065 BPF_K : BPF_X); 1002 BPF_REG_A, fp->k);
1066 insn->a_reg = 0; 1003 *insn = BPF_EXIT_INSN();
1067 insn->x_reg = A_REG;
1068 insn->imm = fp->k;
1069 insn++;
1070
1071 insn->code = BPF_JMP | BPF_EXIT;
1072 break; 1004 break;
1073 1005
1074 /* Store to stack. */ 1006 /* Store to stack. */
1075 case BPF_ST: 1007 case BPF_ST:
1076 case BPF_STX: 1008 case BPF_STX:
1077 insn->code = BPF_STX | BPF_MEM | BPF_W; 1009 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
1078 insn->a_reg = FP_REG; 1010 BPF_ST ? BPF_REG_A : BPF_REG_X,
1079 insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG; 1011 -(BPF_MEMWORDS - fp->k) * 4);
1080 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1081 break; 1012 break;
1082 1013
1083 /* Load from stack. */ 1014 /* Load from stack. */
1084 case BPF_LD | BPF_MEM: 1015 case BPF_LD | BPF_MEM:
1085 case BPF_LDX | BPF_MEM: 1016 case BPF_LDX | BPF_MEM:
1086 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1017 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1087 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1018 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
1088 A_REG : X_REG; 1019 -(BPF_MEMWORDS - fp->k) * 4);
1089 insn->x_reg = FP_REG;
1090 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1091 break; 1020 break;
1092 1021
1093 /* A = K or X = K */ 1022 /* A = K or X = K */
1094 case BPF_LD | BPF_IMM: 1023 case BPF_LD | BPF_IMM:
1095 case BPF_LDX | BPF_IMM: 1024 case BPF_LDX | BPF_IMM:
1096 insn->code = BPF_ALU | BPF_MOV | BPF_K; 1025 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
1097 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1026 BPF_REG_A : BPF_REG_X, fp->k);
1098 A_REG : X_REG;
1099 insn->imm = fp->k;
1100 break; 1027 break;
1101 1028
1102 /* X = A */ 1029 /* X = A */
1103 case BPF_MISC | BPF_TAX: 1030 case BPF_MISC | BPF_TAX:
1104 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 1031 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1105 insn->a_reg = X_REG;
1106 insn->x_reg = A_REG;
1107 break; 1032 break;
1108 1033
1109 /* A = X */ 1034 /* A = X */
1110 case BPF_MISC | BPF_TXA: 1035 case BPF_MISC | BPF_TXA:
1111 insn->code = BPF_ALU64 | BPF_MOV | BPF_X; 1036 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
1112 insn->a_reg = A_REG;
1113 insn->x_reg = X_REG;
1114 break; 1037 break;
1115 1038
1116 /* A = skb->len or X = skb->len */ 1039 /* A = skb->len or X = skb->len */
1117 case BPF_LD | BPF_W | BPF_LEN: 1040 case BPF_LD | BPF_W | BPF_LEN:
1118 case BPF_LDX | BPF_W | BPF_LEN: 1041 case BPF_LDX | BPF_W | BPF_LEN:
1119 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1042 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1120 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1043 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
1121 A_REG : X_REG; 1044 offsetof(struct sk_buff, len));
1122 insn->x_reg = CTX_REG;
1123 insn->off = offsetof(struct sk_buff, len);
1124 break; 1045 break;
1125 1046
1126 /* access seccomp_data fields */ 1047 /* Access seccomp_data fields. */
1127 case BPF_LDX | BPF_ABS | BPF_W: 1048 case BPF_LDX | BPF_ABS | BPF_W:
1128 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1049 /* A = *(u32 *) (ctx + K) */
1129 insn->a_reg = A_REG; 1050 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
1130 insn->x_reg = CTX_REG;
1131 insn->off = fp->k;
1132 break; 1051 break;
1133 1052
1053 /* Unkown instruction. */
1134 default: 1054 default:
1135 goto err; 1055 goto err;
1136 } 1056 }
@@ -1139,7 +1059,6 @@ do_pass:
1139 if (new_prog) 1059 if (new_prog)
1140 memcpy(new_insn, tmp_insns, 1060 memcpy(new_insn, tmp_insns,
1141 sizeof(*insn) * (insn - tmp_insns)); 1061 sizeof(*insn) * (insn - tmp_insns));
1142
1143 new_insn += insn - tmp_insns; 1062 new_insn += insn - tmp_insns;
1144 } 1063 }
1145 1064
@@ -1154,7 +1073,6 @@ do_pass:
1154 new_flen = new_insn - new_prog; 1073 new_flen = new_insn - new_prog;
1155 if (pass > 2) 1074 if (pass > 2)
1156 goto err; 1075 goto err;
1157
1158 goto do_pass; 1076 goto do_pass;
1159 } 1077 }
1160 1078
@@ -1178,44 +1096,46 @@ err:
1178 */ 1096 */
1179static int check_load_and_stores(struct sock_filter *filter, int flen) 1097static int check_load_and_stores(struct sock_filter *filter, int flen)
1180{ 1098{
1181 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ 1099 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
1182 int pc, ret = 0; 1100 int pc, ret = 0;
1183 1101
1184 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103
1185 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
1186 if (!masks) 1105 if (!masks)
1187 return -ENOMEM; 1106 return -ENOMEM;
1107
1188 memset(masks, 0xff, flen * sizeof(*masks)); 1108 memset(masks, 0xff, flen * sizeof(*masks));
1189 1109
1190 for (pc = 0; pc < flen; pc++) { 1110 for (pc = 0; pc < flen; pc++) {
1191 memvalid &= masks[pc]; 1111 memvalid &= masks[pc];
1192 1112
1193 switch (filter[pc].code) { 1113 switch (filter[pc].code) {
1194 case BPF_S_ST: 1114 case BPF_ST:
1195 case BPF_S_STX: 1115 case BPF_STX:
1196 memvalid |= (1 << filter[pc].k); 1116 memvalid |= (1 << filter[pc].k);
1197 break; 1117 break;
1198 case BPF_S_LD_MEM: 1118 case BPF_LD | BPF_MEM:
1199 case BPF_S_LDX_MEM: 1119 case BPF_LDX | BPF_MEM:
1200 if (!(memvalid & (1 << filter[pc].k))) { 1120 if (!(memvalid & (1 << filter[pc].k))) {
1201 ret = -EINVAL; 1121 ret = -EINVAL;
1202 goto error; 1122 goto error;
1203 } 1123 }
1204 break; 1124 break;
1205 case BPF_S_JMP_JA: 1125 case BPF_JMP | BPF_JA:
1206 /* a jump must set masks on target */ 1126 /* A jump must set masks on target */
1207 masks[pc + 1 + filter[pc].k] &= memvalid; 1127 masks[pc + 1 + filter[pc].k] &= memvalid;
1208 memvalid = ~0; 1128 memvalid = ~0;
1209 break; 1129 break;
1210 case BPF_S_JMP_JEQ_K: 1130 case BPF_JMP | BPF_JEQ | BPF_K:
1211 case BPF_S_JMP_JEQ_X: 1131 case BPF_JMP | BPF_JEQ | BPF_X:
1212 case BPF_S_JMP_JGE_K: 1132 case BPF_JMP | BPF_JGE | BPF_K:
1213 case BPF_S_JMP_JGE_X: 1133 case BPF_JMP | BPF_JGE | BPF_X:
1214 case BPF_S_JMP_JGT_K: 1134 case BPF_JMP | BPF_JGT | BPF_K:
1215 case BPF_S_JMP_JGT_X: 1135 case BPF_JMP | BPF_JGT | BPF_X:
1216 case BPF_S_JMP_JSET_X: 1136 case BPF_JMP | BPF_JSET | BPF_K:
1217 case BPF_S_JMP_JSET_K: 1137 case BPF_JMP | BPF_JSET | BPF_X:
1218 /* a jump must set masks on targets */ 1138 /* A jump must set masks on targets */
1219 masks[pc + 1 + filter[pc].jt] &= memvalid; 1139 masks[pc + 1 + filter[pc].jt] &= memvalid;
1220 masks[pc + 1 + filter[pc].jf] &= memvalid; 1140 masks[pc + 1 + filter[pc].jf] &= memvalid;
1221 memvalid = ~0; 1141 memvalid = ~0;
@@ -1227,6 +1147,72 @@ error:
1227 return ret; 1147 return ret;
1228} 1148}
1229 1149
1150static bool chk_code_allowed(u16 code_to_probe)
1151{
1152 static const bool codes[] = {
1153 /* 32 bit ALU operations */
1154 [BPF_ALU | BPF_ADD | BPF_K] = true,
1155 [BPF_ALU | BPF_ADD | BPF_X] = true,
1156 [BPF_ALU | BPF_SUB | BPF_K] = true,
1157 [BPF_ALU | BPF_SUB | BPF_X] = true,
1158 [BPF_ALU | BPF_MUL | BPF_K] = true,
1159 [BPF_ALU | BPF_MUL | BPF_X] = true,
1160 [BPF_ALU | BPF_DIV | BPF_K] = true,
1161 [BPF_ALU | BPF_DIV | BPF_X] = true,
1162 [BPF_ALU | BPF_MOD | BPF_K] = true,
1163 [BPF_ALU | BPF_MOD | BPF_X] = true,
1164 [BPF_ALU | BPF_AND | BPF_K] = true,
1165 [BPF_ALU | BPF_AND | BPF_X] = true,
1166 [BPF_ALU | BPF_OR | BPF_K] = true,
1167 [BPF_ALU | BPF_OR | BPF_X] = true,
1168 [BPF_ALU | BPF_XOR | BPF_K] = true,
1169 [BPF_ALU | BPF_XOR | BPF_X] = true,
1170 [BPF_ALU | BPF_LSH | BPF_K] = true,
1171 [BPF_ALU | BPF_LSH | BPF_X] = true,
1172 [BPF_ALU | BPF_RSH | BPF_K] = true,
1173 [BPF_ALU | BPF_RSH | BPF_X] = true,
1174 [BPF_ALU | BPF_NEG] = true,
1175 /* Load instructions */
1176 [BPF_LD | BPF_W | BPF_ABS] = true,
1177 [BPF_LD | BPF_H | BPF_ABS] = true,
1178 [BPF_LD | BPF_B | BPF_ABS] = true,
1179 [BPF_LD | BPF_W | BPF_LEN] = true,
1180 [BPF_LD | BPF_W | BPF_IND] = true,
1181 [BPF_LD | BPF_H | BPF_IND] = true,
1182 [BPF_LD | BPF_B | BPF_IND] = true,
1183 [BPF_LD | BPF_IMM] = true,
1184 [BPF_LD | BPF_MEM] = true,
1185 [BPF_LDX | BPF_W | BPF_LEN] = true,
1186 [BPF_LDX | BPF_B | BPF_MSH] = true,
1187 [BPF_LDX | BPF_IMM] = true,
1188 [BPF_LDX | BPF_MEM] = true,
1189 /* Store instructions */
1190 [BPF_ST] = true,
1191 [BPF_STX] = true,
1192 /* Misc instructions */
1193 [BPF_MISC | BPF_TAX] = true,
1194 [BPF_MISC | BPF_TXA] = true,
1195 /* Return instructions */
1196 [BPF_RET | BPF_K] = true,
1197 [BPF_RET | BPF_A] = true,
1198 /* Jump instructions */
1199 [BPF_JMP | BPF_JA] = true,
1200 [BPF_JMP | BPF_JEQ | BPF_K] = true,
1201 [BPF_JMP | BPF_JEQ | BPF_X] = true,
1202 [BPF_JMP | BPF_JGE | BPF_K] = true,
1203 [BPF_JMP | BPF_JGE | BPF_X] = true,
1204 [BPF_JMP | BPF_JGT | BPF_K] = true,
1205 [BPF_JMP | BPF_JGT | BPF_X] = true,
1206 [BPF_JMP | BPF_JSET | BPF_K] = true,
1207 [BPF_JMP | BPF_JSET | BPF_X] = true,
1208 };
1209
1210 if (code_to_probe >= ARRAY_SIZE(codes))
1211 return false;
1212
1213 return codes[code_to_probe];
1214}
1215
1230/** 1216/**
1231 * sk_chk_filter - verify socket filter code 1217 * sk_chk_filter - verify socket filter code
1232 * @filter: filter to verify 1218 * @filter: filter to verify
@@ -1243,153 +1229,76 @@ error:
1243 */ 1229 */
1244int sk_chk_filter(struct sock_filter *filter, unsigned int flen) 1230int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
1245{ 1231{
1246 /*
1247 * Valid instructions are initialized to non-0.
1248 * Invalid instructions are initialized to 0.
1249 */
1250 static const u8 codes[] = {
1251 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
1252 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
1253 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
1254 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
1255 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
1256 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
1257 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
1258 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
1259 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
1260 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
1261 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
1262 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
1263 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
1264 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
1265 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
1266 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
1267 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
1268 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
1269 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
1270 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
1271 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
1272 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
1273 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
1274 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
1275 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
1276 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
1277 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
1278 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
1279 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
1280 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
1281 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
1282 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
1283 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
1284 [BPF_RET|BPF_K] = BPF_S_RET_K,
1285 [BPF_RET|BPF_A] = BPF_S_RET_A,
1286 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
1287 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
1288 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
1289 [BPF_ST] = BPF_S_ST,
1290 [BPF_STX] = BPF_S_STX,
1291 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
1292 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
1293 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
1294 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
1295 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
1296 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
1297 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
1298 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
1299 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
1300 };
1301 int pc;
1302 bool anc_found; 1232 bool anc_found;
1233 int pc;
1303 1234
1304 if (flen == 0 || flen > BPF_MAXINSNS) 1235 if (flen == 0 || flen > BPF_MAXINSNS)
1305 return -EINVAL; 1236 return -EINVAL;
1306 1237
1307 /* check the filter code now */ 1238 /* Check the filter code now */
1308 for (pc = 0; pc < flen; pc++) { 1239 for (pc = 0; pc < flen; pc++) {
1309 struct sock_filter *ftest = &filter[pc]; 1240 struct sock_filter *ftest = &filter[pc];
1310 u16 code = ftest->code;
1311 1241
1312 if (code >= ARRAY_SIZE(codes)) 1242 /* May we actually operate on this code? */
1313 return -EINVAL; 1243 if (!chk_code_allowed(ftest->code))
1314 code = codes[code];
1315 if (!code)
1316 return -EINVAL; 1244 return -EINVAL;
1245
1317 /* Some instructions need special checks */ 1246 /* Some instructions need special checks */
1318 switch (code) { 1247 switch (ftest->code) {
1319 case BPF_S_ALU_DIV_K: 1248 case BPF_ALU | BPF_DIV | BPF_K:
1320 case BPF_S_ALU_MOD_K: 1249 case BPF_ALU | BPF_MOD | BPF_K:
1321 /* check for division by zero */ 1250 /* Check for division by zero */
1322 if (ftest->k == 0) 1251 if (ftest->k == 0)
1323 return -EINVAL; 1252 return -EINVAL;
1324 break; 1253 break;
1325 case BPF_S_LD_MEM: 1254 case BPF_LD | BPF_MEM:
1326 case BPF_S_LDX_MEM: 1255 case BPF_LDX | BPF_MEM:
1327 case BPF_S_ST: 1256 case BPF_ST:
1328 case BPF_S_STX: 1257 case BPF_STX:
1329 /* check for invalid memory addresses */ 1258 /* Check for invalid memory addresses */
1330 if (ftest->k >= BPF_MEMWORDS) 1259 if (ftest->k >= BPF_MEMWORDS)
1331 return -EINVAL; 1260 return -EINVAL;
1332 break; 1261 break;
1333 case BPF_S_JMP_JA: 1262 case BPF_JMP | BPF_JA:
1334 /* 1263 /* Note, the large ftest->k might cause loops.
1335 * Note, the large ftest->k might cause loops.
1336 * Compare this with conditional jumps below, 1264 * Compare this with conditional jumps below,
1337 * where offsets are limited. --ANK (981016) 1265 * where offsets are limited. --ANK (981016)
1338 */ 1266 */
1339 if (ftest->k >= (unsigned int)(flen-pc-1)) 1267 if (ftest->k >= (unsigned int)(flen - pc - 1))
1340 return -EINVAL; 1268 return -EINVAL;
1341 break; 1269 break;
1342 case BPF_S_JMP_JEQ_K: 1270 case BPF_JMP | BPF_JEQ | BPF_K:
1343 case BPF_S_JMP_JEQ_X: 1271 case BPF_JMP | BPF_JEQ | BPF_X:
1344 case BPF_S_JMP_JGE_K: 1272 case BPF_JMP | BPF_JGE | BPF_K:
1345 case BPF_S_JMP_JGE_X: 1273 case BPF_JMP | BPF_JGE | BPF_X:
1346 case BPF_S_JMP_JGT_K: 1274 case BPF_JMP | BPF_JGT | BPF_K:
1347 case BPF_S_JMP_JGT_X: 1275 case BPF_JMP | BPF_JGT | BPF_X:
1348 case BPF_S_JMP_JSET_X: 1276 case BPF_JMP | BPF_JSET | BPF_K:
1349 case BPF_S_JMP_JSET_K: 1277 case BPF_JMP | BPF_JSET | BPF_X:
1350 /* for conditionals both must be safe */ 1278 /* Both conditionals must be safe */
1351 if (pc + ftest->jt + 1 >= flen || 1279 if (pc + ftest->jt + 1 >= flen ||
1352 pc + ftest->jf + 1 >= flen) 1280 pc + ftest->jf + 1 >= flen)
1353 return -EINVAL; 1281 return -EINVAL;
1354 break; 1282 break;
1355 case BPF_S_LD_W_ABS: 1283 case BPF_LD | BPF_W | BPF_ABS:
1356 case BPF_S_LD_H_ABS: 1284 case BPF_LD | BPF_H | BPF_ABS:
1357 case BPF_S_LD_B_ABS: 1285 case BPF_LD | BPF_B | BPF_ABS:
1358 anc_found = false; 1286 anc_found = false;
1359#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ 1287 if (bpf_anc_helper(ftest) & BPF_ANC)
1360 code = BPF_S_ANC_##CODE; \ 1288 anc_found = true;
1361 anc_found = true; \ 1289 /* Ancillary operation unknown or unsupported */
1362 break
1363 switch (ftest->k) {
1364 ANCILLARY(PROTOCOL);
1365 ANCILLARY(PKTTYPE);
1366 ANCILLARY(IFINDEX);
1367 ANCILLARY(NLATTR);
1368 ANCILLARY(NLATTR_NEST);
1369 ANCILLARY(MARK);
1370 ANCILLARY(QUEUE);
1371 ANCILLARY(HATYPE);
1372 ANCILLARY(RXHASH);
1373 ANCILLARY(CPU);
1374 ANCILLARY(ALU_XOR_X);
1375 ANCILLARY(VLAN_TAG);
1376 ANCILLARY(VLAN_TAG_PRESENT);
1377 ANCILLARY(PAY_OFFSET);
1378 }
1379
1380 /* ancillary operation unknown or unsupported */
1381 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1290 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1382 return -EINVAL; 1291 return -EINVAL;
1383 } 1292 }
1384 ftest->code = code;
1385 } 1293 }
1386 1294
1387 /* last instruction must be a RET code */ 1295 /* Last instruction must be a RET code */
1388 switch (filter[flen - 1].code) { 1296 switch (filter[flen - 1].code) {
1389 case BPF_S_RET_K: 1297 case BPF_RET | BPF_K:
1390 case BPF_S_RET_A: 1298 case BPF_RET | BPF_A:
1391 return check_load_and_stores(filter, flen); 1299 return check_load_and_stores(filter, flen);
1392 } 1300 }
1301
1393 return -EINVAL; 1302 return -EINVAL;
1394} 1303}
1395EXPORT_SYMBOL(sk_chk_filter); 1304EXPORT_SYMBOL(sk_chk_filter);
@@ -1434,7 +1343,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
1434 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1343 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1435 1344
1436 sk_release_orig_filter(fp); 1345 sk_release_orig_filter(fp);
1437 bpf_jit_free(fp); 1346 sk_filter_free(fp);
1438} 1347}
1439 1348
1440/** 1349/**
@@ -1472,7 +1381,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1472 1381
1473 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1474 if (fp_new) { 1383 if (fp_new) {
1475 memcpy(fp_new, fp, sizeof(struct sk_filter)); 1384 *fp_new = *fp;
1476 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're kepping orig_prog in fp_new along,
1477 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1478 * from the old fp. 1387 * from the old fp.
@@ -1489,7 +1398,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1489{ 1398{
1490 struct sock_filter *old_prog; 1399 struct sock_filter *old_prog;
1491 struct sk_filter *old_fp; 1400 struct sk_filter *old_fp;
1492 int i, err, new_len, old_len = fp->len; 1401 int err, new_len, old_len = fp->len;
1493 1402
1494 /* We are free to overwrite insns et al right here as it 1403 /* We are free to overwrite insns et al right here as it
1495 * won't be used at this point in time anymore internally 1404 * won't be used at this point in time anymore internally
@@ -1499,13 +1408,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1499 BUILD_BUG_ON(sizeof(struct sock_filter) != 1408 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1500 sizeof(struct sock_filter_int)); 1409 sizeof(struct sock_filter_int));
1501 1410
1502 /* For now, we need to unfiddle BPF_S_* identifiers in place.
1503 * This can sooner or later on be subject to removal, e.g. when
1504 * JITs have been converted.
1505 */
1506 for (i = 0; i < fp->len; i++)
1507 sk_decode_filter(&fp->insns[i], &fp->insns[i]);
1508
1509 /* Conversion cannot happen on overlapping memory areas, 1411 /* Conversion cannot happen on overlapping memory areas,
1510 * so we need to keep the user BPF around until the 2nd 1412 * so we need to keep the user BPF around until the 2nd
1511 * pass. At this time, the user BPF is stored in fp->insns. 1413 * pass. At this time, the user BPF is stored in fp->insns.
@@ -1534,7 +1436,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1534 goto out_err_free; 1436 goto out_err_free;
1535 } 1437 }
1536 1438
1537 fp->bpf_func = sk_run_filter_int_skb;
1538 fp->len = new_len; 1439 fp->len = new_len;
1539 1440
1540 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */ 1441 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1547,6 +1448,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1547 */ 1448 */
1548 goto out_err_free; 1449 goto out_err_free;
1549 1450
1451 sk_filter_select_runtime(fp);
1452
1550 kfree(old_prog); 1453 kfree(old_prog);
1551 return fp; 1454 return fp;
1552 1455
@@ -1561,6 +1464,33 @@ out_err:
1561 return ERR_PTR(err); 1464 return ERR_PTR(err);
1562} 1465}
1563 1466
1467void __weak bpf_int_jit_compile(struct sk_filter *prog)
1468{
1469}
1470
1471/**
1472 * sk_filter_select_runtime - select execution runtime for BPF program
1473 * @fp: sk_filter populated with internal BPF program
1474 *
1475 * try to JIT internal BPF program, if JIT is not available select interpreter
1476 * BPF program will be executed via SK_RUN_FILTER() macro
1477 */
1478void sk_filter_select_runtime(struct sk_filter *fp)
1479{
1480 fp->bpf_func = (void *) __sk_run_filter;
1481
1482 /* Probe if internal BPF can be JITed */
1483 bpf_int_jit_compile(fp);
1484}
1485EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
1486
1487/* free internal BPF program */
1488void sk_filter_free(struct sk_filter *fp)
1489{
1490 bpf_jit_free(fp);
1491}
1492EXPORT_SYMBOL_GPL(sk_filter_free);
1493
1564static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, 1494static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1565 struct sock *sk) 1495 struct sock *sk)
1566{ 1496{
@@ -1603,7 +1533,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1603 * a negative errno code is returned. On success the return is zero. 1533 * a negative errno code is returned. On success the return is zero.
1604 */ 1534 */
1605int sk_unattached_filter_create(struct sk_filter **pfp, 1535int sk_unattached_filter_create(struct sk_filter **pfp,
1606 struct sock_fprog *fprog) 1536 struct sock_fprog_kern *fprog)
1607{ 1537{
1608 unsigned int fsize = sk_filter_proglen(fprog); 1538 unsigned int fsize = sk_filter_proglen(fprog);
1609 struct sk_filter *fp; 1539 struct sk_filter *fp;
@@ -1724,83 +1654,6 @@ int sk_detach_filter(struct sock *sk)
1724} 1654}
1725EXPORT_SYMBOL_GPL(sk_detach_filter); 1655EXPORT_SYMBOL_GPL(sk_detach_filter);
1726 1656
1727void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
1728{
1729 static const u16 decodes[] = {
1730 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
1731 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
1732 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
1733 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
1734 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
1735 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
1736 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
1737 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
1738 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
1739 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
1740 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
1741 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
1742 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
1743 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
1744 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
1745 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
1746 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
1747 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
1748 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
1749 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
1750 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
1751 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
1752 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
1753 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
1754 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
1755 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
1756 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
1757 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
1758 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
1759 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
1760 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
1761 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
1762 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
1763 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
1764 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
1765 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
1766 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
1767 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
1768 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
1769 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
1770 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
1771 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
1772 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
1773 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
1774 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
1775 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
1776 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
1777 [BPF_S_RET_K] = BPF_RET|BPF_K,
1778 [BPF_S_RET_A] = BPF_RET|BPF_A,
1779 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
1780 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
1781 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
1782 [BPF_S_ST] = BPF_ST,
1783 [BPF_S_STX] = BPF_STX,
1784 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
1785 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
1786 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
1787 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
1788 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
1789 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
1790 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
1791 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
1792 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
1793 };
1794 u16 code;
1795
1796 code = filt->code;
1797
1798 to->code = decodes[code];
1799 to->jt = filt->jt;
1800 to->jf = filt->jf;
1801 to->k = filt->k;
1802}
1803
1804int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 1657int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1805 unsigned int len) 1658 unsigned int len)
1806{ 1659{
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7c8ffd974961..85b62691f4f2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work)
273{ 273{
274 const struct pernet_operations *ops; 274 const struct pernet_operations *ops;
275 struct net *net, *tmp; 275 struct net *net, *tmp;
276 LIST_HEAD(net_kill_list); 276 struct list_head net_kill_list;
277 LIST_HEAD(net_exit_list); 277 LIST_HEAD(net_exit_list);
278 278
279 /* Atomically snapshot the list of namespaces to cleanup */ 279 /* Atomically snapshot the list of namespaces to cleanup */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0304f981f7ff..fc17a9d309ac 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
573 is_zero_ether_addr(pkt_dev->src_mac) ? 573 is_zero_ether_addr(pkt_dev->src_mac) ?
574 pkt_dev->odev->dev_addr : pkt_dev->src_mac); 574 pkt_dev->odev->dev_addr : pkt_dev->src_mac);
575 575
576 seq_printf(seq, "dst_mac: "); 576 seq_puts(seq, "dst_mac: ");
577 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 577 seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
578 578
579 seq_printf(seq, 579 seq_printf(seq,
@@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
588 588
589 if (pkt_dev->nr_labels) { 589 if (pkt_dev->nr_labels) {
590 unsigned int i; 590 unsigned int i;
591 seq_printf(seq, " mpls: "); 591 seq_puts(seq, " mpls: ");
592 for (i = 0; i < pkt_dev->nr_labels; i++) 592 for (i = 0; i < pkt_dev->nr_labels; i++)
593 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 593 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
594 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 594 i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
613 if (pkt_dev->node >= 0) 613 if (pkt_dev->node >= 0)
614 seq_printf(seq, " node: %d\n", pkt_dev->node); 614 seq_printf(seq, " node: %d\n", pkt_dev->node);
615 615
616 seq_printf(seq, " Flags: "); 616 seq_puts(seq, " Flags: ");
617 617
618 if (pkt_dev->flags & F_IPV6) 618 if (pkt_dev->flags & F_IPV6)
619 seq_printf(seq, "IPV6 "); 619 seq_puts(seq, "IPV6 ");
620 620
621 if (pkt_dev->flags & F_IPSRC_RND) 621 if (pkt_dev->flags & F_IPSRC_RND)
622 seq_printf(seq, "IPSRC_RND "); 622 seq_puts(seq, "IPSRC_RND ");
623 623
624 if (pkt_dev->flags & F_IPDST_RND) 624 if (pkt_dev->flags & F_IPDST_RND)
625 seq_printf(seq, "IPDST_RND "); 625 seq_puts(seq, "IPDST_RND ");
626 626
627 if (pkt_dev->flags & F_TXSIZE_RND) 627 if (pkt_dev->flags & F_TXSIZE_RND)
628 seq_printf(seq, "TXSIZE_RND "); 628 seq_puts(seq, "TXSIZE_RND ");
629 629
630 if (pkt_dev->flags & F_UDPSRC_RND) 630 if (pkt_dev->flags & F_UDPSRC_RND)
631 seq_printf(seq, "UDPSRC_RND "); 631 seq_puts(seq, "UDPSRC_RND ");
632 632
633 if (pkt_dev->flags & F_UDPDST_RND) 633 if (pkt_dev->flags & F_UDPDST_RND)
634 seq_printf(seq, "UDPDST_RND "); 634 seq_puts(seq, "UDPDST_RND ");
635 635
636 if (pkt_dev->flags & F_UDPCSUM) 636 if (pkt_dev->flags & F_UDPCSUM)
637 seq_printf(seq, "UDPCSUM "); 637 seq_puts(seq, "UDPCSUM ");
638 638
639 if (pkt_dev->flags & F_MPLS_RND) 639 if (pkt_dev->flags & F_MPLS_RND)
640 seq_printf(seq, "MPLS_RND "); 640 seq_puts(seq, "MPLS_RND ");
641 641
642 if (pkt_dev->flags & F_QUEUE_MAP_RND) 642 if (pkt_dev->flags & F_QUEUE_MAP_RND)
643 seq_printf(seq, "QUEUE_MAP_RND "); 643 seq_puts(seq, "QUEUE_MAP_RND ");
644 644
645 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 645 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
646 seq_printf(seq, "QUEUE_MAP_CPU "); 646 seq_puts(seq, "QUEUE_MAP_CPU ");
647 647
648 if (pkt_dev->cflows) { 648 if (pkt_dev->cflows) {
649 if (pkt_dev->flags & F_FLOW_SEQ) 649 if (pkt_dev->flags & F_FLOW_SEQ)
650 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ 650 seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/
651 else 651 else
652 seq_printf(seq, "FLOW_RND "); 652 seq_puts(seq, "FLOW_RND ");
653 } 653 }
654 654
655#ifdef CONFIG_XFRM 655#ifdef CONFIG_XFRM
656 if (pkt_dev->flags & F_IPSEC_ON) { 656 if (pkt_dev->flags & F_IPSEC_ON) {
657 seq_printf(seq, "IPSEC "); 657 seq_puts(seq, "IPSEC ");
658 if (pkt_dev->spi) 658 if (pkt_dev->spi)
659 seq_printf(seq, "spi:%u", pkt_dev->spi); 659 seq_printf(seq, "spi:%u", pkt_dev->spi);
660 } 660 }
661#endif 661#endif
662 662
663 if (pkt_dev->flags & F_MACSRC_RND) 663 if (pkt_dev->flags & F_MACSRC_RND)
664 seq_printf(seq, "MACSRC_RND "); 664 seq_puts(seq, "MACSRC_RND ");
665 665
666 if (pkt_dev->flags & F_MACDST_RND) 666 if (pkt_dev->flags & F_MACDST_RND)
667 seq_printf(seq, "MACDST_RND "); 667 seq_puts(seq, "MACDST_RND ");
668 668
669 if (pkt_dev->flags & F_VID_RND) 669 if (pkt_dev->flags & F_VID_RND)
670 seq_printf(seq, "VID_RND "); 670 seq_puts(seq, "VID_RND ");
671 671
672 if (pkt_dev->flags & F_SVID_RND) 672 if (pkt_dev->flags & F_SVID_RND)
673 seq_printf(seq, "SVID_RND "); 673 seq_puts(seq, "SVID_RND ");
674 674
675 if (pkt_dev->flags & F_NODE) 675 if (pkt_dev->flags & F_NODE)
676 seq_printf(seq, "NODE_ALLOC "); 676 seq_puts(seq, "NODE_ALLOC ");
677 677
678 seq_puts(seq, "\n"); 678 seq_puts(seq, "\n");
679 679
@@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
716 if (pkt_dev->result[0]) 716 if (pkt_dev->result[0])
717 seq_printf(seq, "Result: %s\n", pkt_dev->result); 717 seq_printf(seq, "Result: %s\n", pkt_dev->result);
718 else 718 else
719 seq_printf(seq, "Result: Idle\n"); 719 seq_puts(seq, "Result: Idle\n");
720 720
721 return 0; 721 return 0;
722} 722}
@@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
1735 1735
1736 BUG_ON(!t); 1736 BUG_ON(!t);
1737 1737
1738 seq_printf(seq, "Running: "); 1738 seq_puts(seq, "Running: ");
1739 1739
1740 if_lock(t); 1740 if_lock(t);
1741 list_for_each_entry(pkt_dev, &t->if_list, list) 1741 list_for_each_entry(pkt_dev, &t->if_list, list)
1742 if (pkt_dev->running) 1742 if (pkt_dev->running)
1743 seq_printf(seq, "%s ", pkt_dev->odevname); 1743 seq_printf(seq, "%s ", pkt_dev->odevname);
1744 1744
1745 seq_printf(seq, "\nStopped: "); 1745 seq_puts(seq, "\nStopped: ");
1746 1746
1747 list_for_each_entry(pkt_dev, &t->if_list, list) 1747 list_for_each_entry(pkt_dev, &t->if_list, list)
1748 if (!pkt_dev->running) 1748 if (!pkt_dev->running)
@@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
1751 if (t->result[0]) 1751 if (t->result[0])
1752 seq_printf(seq, "\nResult: %s\n", t->result); 1752 seq_printf(seq, "\nResult: %s\n", t->result);
1753 else 1753 else
1754 seq_printf(seq, "\nResult: NA\n"); 1754 seq_puts(seq, "\nResult: NA\n");
1755 1755
1756 if_unlock(t); 1756 if_unlock(t);
1757 1757
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index eaba0f68f860..d3027a73fd4b 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw);
88 88
89void __init ptp_classifier_init(void) 89void __init ptp_classifier_init(void)
90{ 90{
91 static struct sock_filter ptp_filter[] = { 91 static struct sock_filter ptp_filter[] __initdata = {
92 { 0x28, 0, 0, 0x0000000c }, 92 { 0x28, 0, 0, 0x0000000c },
93 { 0x15, 0, 12, 0x00000800 }, 93 { 0x15, 0, 12, 0x00000800 },
94 { 0x30, 0, 0, 0x00000017 }, 94 { 0x30, 0, 0, 0x00000017 },
@@ -133,7 +133,7 @@ void __init ptp_classifier_init(void)
133 { 0x16, 0, 0, 0x00000000 }, 133 { 0x16, 0, 0, 0x00000000 },
134 { 0x06, 0, 0, 0x00000000 }, 134 { 0x06, 0, 0, 0x00000000 },
135 }; 135 };
136 struct sock_fprog ptp_prog = { 136 struct sock_fprog_kern ptp_prog = {
137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
138 }; 138 };
139 139
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d57d7bc22182..233b5ae87583 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -798,8 +798,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
798 size += num_vfs * 798 size += num_vfs *
799 (nla_total_size(sizeof(struct ifla_vf_mac)) + 799 (nla_total_size(sizeof(struct ifla_vf_mac)) +
800 nla_total_size(sizeof(struct ifla_vf_vlan)) + 800 nla_total_size(sizeof(struct ifla_vf_vlan)) +
801 nla_total_size(sizeof(struct ifla_vf_tx_rate)) + 801 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
802 nla_total_size(sizeof(struct ifla_vf_spoofchk))); 802 nla_total_size(sizeof(struct ifla_vf_rate)));
803 return size; 803 return size;
804 } else 804 } else
805 return 0; 805 return 0;
@@ -1065,6 +1065,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1065 struct ifla_vf_info ivi; 1065 struct ifla_vf_info ivi;
1066 struct ifla_vf_mac vf_mac; 1066 struct ifla_vf_mac vf_mac;
1067 struct ifla_vf_vlan vf_vlan; 1067 struct ifla_vf_vlan vf_vlan;
1068 struct ifla_vf_rate vf_rate;
1068 struct ifla_vf_tx_rate vf_tx_rate; 1069 struct ifla_vf_tx_rate vf_tx_rate;
1069 struct ifla_vf_spoofchk vf_spoofchk; 1070 struct ifla_vf_spoofchk vf_spoofchk;
1070 struct ifla_vf_link_state vf_linkstate; 1071 struct ifla_vf_link_state vf_linkstate;
@@ -1085,6 +1086,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1085 break; 1086 break;
1086 vf_mac.vf = 1087 vf_mac.vf =
1087 vf_vlan.vf = 1088 vf_vlan.vf =
1089 vf_rate.vf =
1088 vf_tx_rate.vf = 1090 vf_tx_rate.vf =
1089 vf_spoofchk.vf = 1091 vf_spoofchk.vf =
1090 vf_linkstate.vf = ivi.vf; 1092 vf_linkstate.vf = ivi.vf;
@@ -1092,7 +1094,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1092 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 1094 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1093 vf_vlan.vlan = ivi.vlan; 1095 vf_vlan.vlan = ivi.vlan;
1094 vf_vlan.qos = ivi.qos; 1096 vf_vlan.qos = ivi.qos;
1095 vf_tx_rate.rate = ivi.tx_rate; 1097 vf_tx_rate.rate = ivi.max_tx_rate;
1098 vf_rate.min_tx_rate = ivi.min_tx_rate;
1099 vf_rate.max_tx_rate = ivi.max_tx_rate;
1096 vf_spoofchk.setting = ivi.spoofchk; 1100 vf_spoofchk.setting = ivi.spoofchk;
1097 vf_linkstate.link_state = ivi.linkstate; 1101 vf_linkstate.link_state = ivi.linkstate;
1098 vf = nla_nest_start(skb, IFLA_VF_INFO); 1102 vf = nla_nest_start(skb, IFLA_VF_INFO);
@@ -1102,6 +1106,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1102 } 1106 }
1103 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || 1107 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1104 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || 1108 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1109 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1110 &vf_rate) ||
1105 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 1111 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1106 &vf_tx_rate) || 1112 &vf_tx_rate) ||
1107 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 1113 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
@@ -1208,6 +1214,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1208 .len = sizeof(struct ifla_vf_tx_rate) }, 1214 .len = sizeof(struct ifla_vf_tx_rate) },
1209 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, 1215 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
1210 .len = sizeof(struct ifla_vf_spoofchk) }, 1216 .len = sizeof(struct ifla_vf_spoofchk) },
1217 [IFLA_VF_RATE] = { .type = NLA_BINARY,
1218 .len = sizeof(struct ifla_vf_rate) },
1211 [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY, 1219 [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY,
1212 .len = sizeof(struct ifla_vf_link_state) }, 1220 .len = sizeof(struct ifla_vf_link_state) },
1213}; 1221};
@@ -1369,11 +1377,29 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
1369 } 1377 }
1370 case IFLA_VF_TX_RATE: { 1378 case IFLA_VF_TX_RATE: {
1371 struct ifla_vf_tx_rate *ivt; 1379 struct ifla_vf_tx_rate *ivt;
1380 struct ifla_vf_info ivf;
1372 ivt = nla_data(vf); 1381 ivt = nla_data(vf);
1373 err = -EOPNOTSUPP; 1382 err = -EOPNOTSUPP;
1374 if (ops->ndo_set_vf_tx_rate) 1383 if (ops->ndo_get_vf_config)
1375 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, 1384 err = ops->ndo_get_vf_config(dev, ivt->vf,
1376 ivt->rate); 1385 &ivf);
1386 if (err)
1387 break;
1388 err = -EOPNOTSUPP;
1389 if (ops->ndo_set_vf_rate)
1390 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1391 ivf.min_tx_rate,
1392 ivt->rate);
1393 break;
1394 }
1395 case IFLA_VF_RATE: {
1396 struct ifla_vf_rate *ivt;
1397 ivt = nla_data(vf);
1398 err = -EOPNOTSUPP;
1399 if (ops->ndo_set_vf_rate)
1400 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1401 ivt->min_tx_rate,
1402 ivt->max_tx_rate);
1377 break; 1403 break;
1378 } 1404 }
1379 case IFLA_VF_SPOOFCHK: { 1405 case IFLA_VF_SPOOFCHK: {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 897da56f3aff..ba71212f0251 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
85#endif 85#endif
86 86
87#ifdef CONFIG_INET 87#ifdef CONFIG_INET
88__u32 secure_ip_id(__be32 daddr)
89{
90 u32 hash[MD5_DIGEST_WORDS];
91
92 net_secret_init();
93 hash[0] = (__force __u32) daddr;
94 hash[1] = net_secret[13];
95 hash[2] = net_secret[14];
96 hash[3] = net_secret[15];
97
98 md5_transform(hash, net_secret);
99
100 return hash[0];
101}
102
103__u32 secure_ipv6_id(const __be32 daddr[4])
104{
105 __u32 hash[4];
106
107 net_secret_init();
108 memcpy(hash, daddr, 16);
109 md5_transform(hash, net_secret);
110
111 return hash[0];
112}
113 88
114__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 89__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
115 __be16 sport, __be16 dport) 90 __be16 sport, __be16 dport)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9433047b2453..bf92824af3f7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
694#endif 694#endif
695 memcpy(new->cb, old->cb, sizeof(old->cb)); 695 memcpy(new->cb, old->cb, sizeof(old->cb));
696 new->csum = old->csum; 696 new->csum = old->csum;
697 new->local_df = old->local_df; 697 new->ignore_df = old->ignore_df;
698 new->pkt_type = old->pkt_type; 698 new->pkt_type = old->pkt_type;
699 new->ip_summed = old->ip_summed; 699 new->ip_summed = old->ip_summed;
700 skb_copy_queue_mapping(new, old); 700 skb_copy_queue_mapping(new, old);
@@ -951,10 +951,13 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
951EXPORT_SYMBOL(skb_copy); 951EXPORT_SYMBOL(skb_copy);
952 952
953/** 953/**
954 * __pskb_copy - create copy of an sk_buff with private head. 954 * __pskb_copy_fclone - create copy of an sk_buff with private head.
955 * @skb: buffer to copy 955 * @skb: buffer to copy
956 * @headroom: headroom of new skb 956 * @headroom: headroom of new skb
957 * @gfp_mask: allocation priority 957 * @gfp_mask: allocation priority
958 * @fclone: if true allocate the copy of the skb from the fclone
959 * cache instead of the head cache; it is recommended to set this
960 * to true for the cases where the copy will likely be cloned
958 * 961 *
959 * Make a copy of both an &sk_buff and part of its data, located 962 * Make a copy of both an &sk_buff and part of its data, located
960 * in header. Fragmented data remain shared. This is used when 963 * in header. Fragmented data remain shared. This is used when
@@ -964,11 +967,12 @@ EXPORT_SYMBOL(skb_copy);
964 * The returned buffer has a reference count of 1. 967 * The returned buffer has a reference count of 1.
965 */ 968 */
966 969
967struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 970struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
971 gfp_t gfp_mask, bool fclone)
968{ 972{
969 unsigned int size = skb_headlen(skb) + headroom; 973 unsigned int size = skb_headlen(skb) + headroom;
970 struct sk_buff *n = __alloc_skb(size, gfp_mask, 974 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
971 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 975 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
972 976
973 if (!n) 977 if (!n)
974 goto out; 978 goto out;
@@ -1008,7 +1012,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
1008out: 1012out:
1009 return n; 1013 return n;
1010} 1014}
1011EXPORT_SYMBOL(__pskb_copy); 1015EXPORT_SYMBOL(__pskb_copy_fclone);
1012 1016
1013/** 1017/**
1014 * pskb_expand_head - reallocate header of &sk_buff 1018 * pskb_expand_head - reallocate header of &sk_buff
@@ -2886,7 +2890,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2886 if (unlikely(!proto)) 2890 if (unlikely(!proto))
2887 return ERR_PTR(-EINVAL); 2891 return ERR_PTR(-EINVAL);
2888 2892
2889 csum = !!can_checksum_protocol(features, proto); 2893 csum = !head_skb->encap_hdr_csum &&
2894 !!can_checksum_protocol(features, proto);
2890 2895
2891 headroom = skb_headroom(head_skb); 2896 headroom = skb_headroom(head_skb);
2892 pos = skb_headlen(head_skb); 2897 pos = skb_headlen(head_skb);
@@ -2984,6 +2989,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2984 nskb->csum = skb_copy_and_csum_bits(head_skb, offset, 2989 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2985 skb_put(nskb, len), 2990 skb_put(nskb, len),
2986 len, 0); 2991 len, 0);
2992 SKB_GSO_CB(nskb)->csum_start =
2993 skb_headroom(nskb) + offset;
2987 continue; 2994 continue;
2988 } 2995 }
2989 2996
@@ -3053,6 +3060,8 @@ perform_csum_check:
3053 nskb->csum = skb_checksum(nskb, doffset, 3060 nskb->csum = skb_checksum(nskb, doffset,
3054 nskb->len - doffset, 0); 3061 nskb->len - doffset, 0);
3055 nskb->ip_summed = CHECKSUM_NONE; 3062 nskb->ip_summed = CHECKSUM_NONE;
3063 SKB_GSO_CB(nskb)->csum_start =
3064 skb_headroom(nskb) + doffset;
3056 } 3065 }
3057 } while ((offset += len) < head_skb->len); 3066 } while ((offset += len) < head_skb->len);
3058 3067
@@ -3914,7 +3923,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3914 skb->tstamp.tv64 = 0; 3923 skb->tstamp.tv64 = 0;
3915 skb->pkt_type = PACKET_HOST; 3924 skb->pkt_type = PACKET_HOST;
3916 skb->skb_iif = 0; 3925 skb->skb_iif = 0;
3917 skb->local_df = 0; 3926 skb->ignore_df = 0;
3918 skb_dst_drop(skb); 3927 skb_dst_drop(skb);
3919 skb->mark = 0; 3928 skb->mark = 0;
3920 secpath_reset(skb); 3929 secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 664ee4295b6f..026e01f70274 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -784,7 +784,7 @@ set_rcvbuf:
784 break; 784 break;
785 785
786 case SO_NO_CHECK: 786 case SO_NO_CHECK:
787 sk->sk_no_check = valbool; 787 sk->sk_no_check_tx = valbool;
788 break; 788 break;
789 789
790 case SO_PRIORITY: 790 case SO_PRIORITY:
@@ -1064,7 +1064,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1064 break; 1064 break;
1065 1065
1066 case SO_NO_CHECK: 1066 case SO_NO_CHECK:
1067 v.val = sk->sk_no_check; 1067 v.val = sk->sk_no_check_tx;
1068 break; 1068 break;
1069 1069
1070 case SO_PRIORITY: 1070 case SO_PRIORITY:
diff --git a/net/core/tso.c b/net/core/tso.c
new file mode 100644
index 000000000000..8c3203c585b0
--- /dev/null
+++ b/net/core/tso.c
@@ -0,0 +1,77 @@
1#include <linux/export.h>
2#include <net/ip.h>
3#include <net/tso.h>
4
5/* Calculate expected number of TX descriptors */
6int tso_count_descs(struct sk_buff *skb)
7{
8 /* The Marvell Way */
9 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
10}
11EXPORT_SYMBOL(tso_count_descs);
12
13void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
14 int size, bool is_last)
15{
16 struct iphdr *iph;
17 struct tcphdr *tcph;
18 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
19 int mac_hdr_len = skb_network_offset(skb);
20
21 memcpy(hdr, skb->data, hdr_len);
22 iph = (struct iphdr *)(hdr + mac_hdr_len);
23 iph->id = htons(tso->ip_id);
24 iph->tot_len = htons(size + hdr_len - mac_hdr_len);
25 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
26 tcph->seq = htonl(tso->tcp_seq);
27 tso->ip_id++;
28
29 if (!is_last) {
30 /* Clear all special flags for not last packet */
31 tcph->psh = 0;
32 tcph->fin = 0;
33 tcph->rst = 0;
34 }
35}
36EXPORT_SYMBOL(tso_build_hdr);
37
38void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
39{
40 tso->tcp_seq += size;
41 tso->size -= size;
42 tso->data += size;
43
44 if ((tso->size == 0) &&
45 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
46 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
47
48 /* Move to next segment */
49 tso->size = frag->size;
50 tso->data = page_address(frag->page.p) + frag->page_offset;
51 tso->next_frag_idx++;
52 }
53}
54EXPORT_SYMBOL(tso_build_data);
55
56void tso_start(struct sk_buff *skb, struct tso_t *tso)
57{
58 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
59
60 tso->ip_id = ntohs(ip_hdr(skb)->id);
61 tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
62 tso->next_frag_idx = 0;
63
64 /* Build first data */
65 tso->size = skb_headlen(skb) - hdr_len;
66 tso->data = skb->data + hdr_len;
67 if ((tso->size == 0) &&
68 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
69 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
70
71 /* Move to next segment */
72 tso->size = frag->size;
73 tso->data = page_address(frag->page.p) + frag->page_offset;
74 tso->next_frag_idx++;
75 }
76}
77EXPORT_SYMBOL(tso_start);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 22b5d818b200..6ca645c4b48e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -1024,7 +1024,6 @@ static struct inet_protosw dccp_v4_protosw = {
1024 .protocol = IPPROTO_DCCP, 1024 .protocol = IPPROTO_DCCP,
1025 .prot = &dccp_v4_prot, 1025 .prot = &dccp_v4_prot,
1026 .ops = &inet_dccp_ops, 1026 .ops = &inet_dccp_ops,
1027 .no_check = 0,
1028 .flags = INET_PROTOSW_ICSK, 1027 .flags = INET_PROTOSW_ICSK,
1029}; 1028};
1030 1029
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index eb892b4f4814..de2c1e719305 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1084,14 +1084,15 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1084 1084
1085static inline int dccp_mib_init(void) 1085static inline int dccp_mib_init(void)
1086{ 1086{
1087 return snmp_mib_init((void __percpu **)dccp_statistics, 1087 dccp_statistics = alloc_percpu(struct dccp_mib);
1088 sizeof(struct dccp_mib), 1088 if (!dccp_statistics)
1089 __alignof__(struct dccp_mib)); 1089 return -ENOMEM;
1090 return 0;
1090} 1091}
1091 1092
1092static inline void dccp_mib_exit(void) 1093static inline void dccp_mib_exit(void)
1093{ 1094{
1094 snmp_mib_free((void __percpu **)dccp_statistics); 1095 free_percpu(dccp_statistics);
1095} 1096}
1096 1097
1097static int thash_entries; 1098static int thash_entries;
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 607ab71b5a0c..53731e45403c 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -20,6 +20,7 @@
20 20
21/* Boundary values */ 21/* Boundary values */
22static int zero = 0, 22static int zero = 0,
23 one = 1,
23 u8_max = 0xFF; 24 u8_max = 0xFF;
24static unsigned long seqw_min = DCCPF_SEQ_WMIN, 25static unsigned long seqw_min = DCCPF_SEQ_WMIN,
25 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */ 26 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
@@ -58,7 +59,7 @@ static struct ctl_table dccp_default_table[] = {
58 .maxlen = sizeof(sysctl_dccp_request_retries), 59 .maxlen = sizeof(sysctl_dccp_request_retries),
59 .mode = 0644, 60 .mode = 0644,
60 .proc_handler = proc_dointvec_minmax, 61 .proc_handler = proc_dointvec_minmax,
61 .extra1 = &zero, 62 .extra1 = &one,
62 .extra2 = &u8_max, 63 .extra2 = &u8_max,
63 }, 64 },
64 { 65 {
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 16f0b223102e..1cd46a345cb0 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -280,7 +280,7 @@ static ktime_t dccp_timestamp_seed;
280 */ 280 */
281u32 dccp_timestamp(void) 281u32 dccp_timestamp(void)
282{ 282{
283 s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); 283 u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
284 284
285 do_div(delta, 10); 285 do_div(delta, 10);
286 return delta; 286 return delta;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4c04848953bd..ae011b46c071 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -481,7 +481,7 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
481 481
482 sk->sk_backlog_rcv = dn_nsp_backlog_rcv; 482 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
483 sk->sk_destruct = dn_destruct; 483 sk->sk_destruct = dn_destruct;
484 sk->sk_no_check = 1; 484 sk->sk_no_check_tx = 1;
485 sk->sk_family = PF_DECnet; 485 sk->sk_family = PF_DECnet;
486 sk->sk_protocol = 0; 486 sk->sk_protocol = 0;
487 sk->sk_allocation = gfp; 487 sk->sk_allocation = gfp;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 6853d22ebc07..9acec61f5433 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -93,8 +93,8 @@ int dns_query(const char *type, const char *name, size_t namelen,
93 } 93 }
94 94
95 if (!namelen) 95 if (!namelen)
96 namelen = strlen(name); 96 namelen = strnlen(name, 256);
97 if (namelen < 3) 97 if (namelen < 3 || namelen > 255)
98 return -EINVAL; 98 return -EINVAL;
99 desclen += namelen + 1; 99 desclen += namelen + 1;
100 100
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 02c0e1716f64..64c5af0a10dd 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -346,7 +346,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
346 return slave_dev; 346 return slave_dev;
347 347
348 slave_dev->features = master->vlan_features; 348 slave_dev->features = master->vlan_features;
349 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); 349 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
350 eth_hw_addr_inherit(slave_dev, master); 350 eth_hw_addr_inherit(slave_dev, master);
351 slave_dev->tx_queue_len = 0; 351 slave_dev->tx_queue_len = 0;
352 352
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
index 0f5a69ed746d..fe6bd7a71081 100644
--- a/net/ieee802154/6lowpan_rtnl.c
+++ b/net/ieee802154/6lowpan_rtnl.c
@@ -92,6 +92,7 @@ static int lowpan_header_create(struct sk_buff *skb,
92 const u8 *saddr = _saddr; 92 const u8 *saddr = _saddr;
93 const u8 *daddr = _daddr; 93 const u8 *daddr = _daddr;
94 struct ieee802154_addr sa, da; 94 struct ieee802154_addr sa, da;
95 struct ieee802154_mac_cb *cb = mac_cb_init(skb);
95 96
96 /* TODO: 97 /* TODO:
97 * if this package isn't ipv6 one, where should it be routed? 98 * if this package isn't ipv6 one, where should it be routed?
@@ -115,8 +116,7 @@ static int lowpan_header_create(struct sk_buff *skb,
115 * from MAC subif of the 'dev' and 'real_dev' network devices, but 116 * from MAC subif of the 'dev' and 'real_dev' network devices, but
116 * this isn't implemented in mainline yet, so currently we assign 0xff 117 * this isn't implemented in mainline yet, so currently we assign 0xff
117 */ 118 */
118 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 119 cb->type = IEEE802154_FC_TYPE_DATA;
119 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
120 120
121 /* prepare wpan address data */ 121 /* prepare wpan address data */
122 sa.mode = IEEE802154_ADDR_LONG; 122 sa.mode = IEEE802154_ADDR_LONG;
@@ -135,11 +135,10 @@ static int lowpan_header_create(struct sk_buff *skb,
135 } else { 135 } else {
136 da.mode = IEEE802154_ADDR_LONG; 136 da.mode = IEEE802154_ADDR_LONG;
137 da.extended_addr = ieee802154_devaddr_from_raw(daddr); 137 da.extended_addr = ieee802154_devaddr_from_raw(daddr);
138
139 /* request acknowledgment */
140 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
141 } 138 }
142 139
140 cb->ackreq = !lowpan_is_addr_broadcast(daddr);
141
143 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, 142 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
144 type, (void *)&da, (void *)&sa, 0); 143 type, (void *)&da, (void *)&sa, 0);
145} 144}
@@ -221,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p)
221 return 0; 220 return 0;
222} 221}
223 222
224static int 223static struct sk_buff*
225lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, 224lowpan_alloc_frag(struct sk_buff *skb, int size,
226 int mlen, int plen, int offset, int type) 225 const struct ieee802154_hdr *master_hdr)
227{ 226{
227 struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
228 struct sk_buff *frag; 228 struct sk_buff *frag;
229 int hlen; 229 int rc;
230 230
231 hlen = (type == LOWPAN_DISPATCH_FRAG1) ? 231 frag = alloc_skb(real_dev->hard_header_len +
232 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE; 232 real_dev->needed_tailroom + size,
233 233 GFP_ATOMIC);
234 raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); 234
235 if (likely(frag)) {
236 frag->dev = real_dev;
237 frag->priority = skb->priority;
238 skb_reserve(frag, real_dev->hard_header_len);
239 skb_reset_network_header(frag);
240 *mac_cb(frag) = *mac_cb(skb);
241
242 rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
243 &master_hdr->source, size);
244 if (rc < 0) {
245 kfree_skb(frag);
246 return ERR_PTR(-rc);
247 }
248 } else {
249 frag = ERR_PTR(ENOMEM);
250 }
235 251
236 frag = netdev_alloc_skb(skb->dev, 252 return frag;
237 hlen + mlen + plen + IEEE802154_MFR_SIZE); 253}
238 if (!frag)
239 return -ENOMEM;
240 254
241 frag->priority = skb->priority; 255static int
256lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
257 u8 *frag_hdr, int frag_hdrlen,
258 int offset, int len)
259{
260 struct sk_buff *frag;
242 261
243 /* copy header, MFR and payload */ 262 raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
244 skb_put(frag, mlen);
245 skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
246 263
247 skb_put(frag, hlen); 264 frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
248 skb_copy_to_linear_data_offset(frag, mlen, head, hlen); 265 if (IS_ERR(frag))
266 return -PTR_ERR(frag);
249 267
250 skb_put(frag, plen); 268 memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
251 skb_copy_to_linear_data_offset(frag, mlen + hlen, 269 memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
252 skb_network_header(skb) + offset, plen);
253 270
254 raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); 271 raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
255 272
256 return dev_queue_xmit(frag); 273 return dev_queue_xmit(frag);
257} 274}
258 275
259static int 276static int
260lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev) 277lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
278 const struct ieee802154_hdr *wpan_hdr)
261{ 279{
262 int err; 280 u16 dgram_size, dgram_offset;
263 u16 dgram_offset, dgram_size, payload_length, header_length, 281 __be16 frag_tag;
264 lowpan_size, frag_plen, offset; 282 u8 frag_hdr[5];
265 __be16 tag; 283 int frag_cap, frag_len, payload_cap, rc;
266 u8 head[5]; 284 int skb_unprocessed, skb_offset;
267 285
268 header_length = skb->mac_len;
269 payload_length = skb->len - header_length;
270 tag = lowpan_dev_info(dev)->fragment_tag++;
271 lowpan_size = skb_network_header_len(skb);
272 dgram_size = lowpan_uncompress_size(skb, &dgram_offset) - 286 dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
273 header_length; 287 skb->mac_len;
288 frag_tag = lowpan_dev_info(dev)->fragment_tag++;
274 289
275 /* first fragment header */ 290 frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
276 head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7); 291 frag_hdr[1] = dgram_size & 0xff;
277 head[1] = dgram_size & 0xff; 292 memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
278 memcpy(head + 2, &tag, sizeof(tag));
279 293
280 /* calc the nearest payload length(divided to 8) for first fragment 294 payload_cap = ieee802154_max_payload(wpan_hdr);
281 * which fits into a IEEE802154_MTU
282 */
283 frag_plen = round_down(IEEE802154_MTU - header_length -
284 LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
285 IEEE802154_MFR_SIZE, 8);
286
287 err = lowpan_fragment_xmit(skb, head, header_length,
288 frag_plen + lowpan_size, 0,
289 LOWPAN_DISPATCH_FRAG1);
290 if (err) {
291 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
292 __func__, tag);
293 goto exit;
294 }
295 295
296 offset = lowpan_size + frag_plen; 296 frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
297 dgram_offset += frag_plen; 297 skb_network_header_len(skb), 8);
298 298
299 /* next fragment header */ 299 skb_offset = skb_network_header_len(skb);
300 head[0] &= ~LOWPAN_DISPATCH_FRAG1; 300 skb_unprocessed = skb->len - skb->mac_len - skb_offset;
301 head[0] |= LOWPAN_DISPATCH_FRAGN;
302 301
303 frag_plen = round_down(IEEE802154_MTU - header_length - 302 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
304 LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8); 303 LOWPAN_FRAG1_HEAD_SIZE, 0,
304 frag_len + skb_network_header_len(skb));
305 if (rc) {
306 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
307 __func__, frag_tag);
308 goto err;
309 }
305 310
306 while (payload_length - offset > 0) { 311 frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
307 int len = frag_plen; 312 frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
313 frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
308 314
309 head[4] = dgram_offset >> 3; 315 do {
316 dgram_offset += frag_len;
317 skb_offset += frag_len;
318 skb_unprocessed -= frag_len;
319 frag_len = min(frag_cap, skb_unprocessed);
310 320
311 if (payload_length - offset < len) 321 frag_hdr[4] = dgram_offset >> 3;
312 len = payload_length - offset;
313 322
314 err = lowpan_fragment_xmit(skb, head, header_length, len, 323 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
315 offset, LOWPAN_DISPATCH_FRAGN); 324 LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
316 if (err) { 325 frag_len);
326 if (rc) {
317 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", 327 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
318 __func__, tag, offset); 328 __func__, frag_tag, skb_offset);
319 goto exit; 329 goto err;
320 } 330 }
331 } while (skb_unprocessed > frag_cap);
321 332
322 offset += len; 333 consume_skb(skb);
323 dgram_offset += len; 334 return NET_XMIT_SUCCESS;
324 }
325 335
326exit: 336err:
327 return err; 337 kfree_skb(skb);
338 return rc;
328} 339}
329 340
330static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) 341static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
331{ 342{
332 int err = -1; 343 struct ieee802154_hdr wpan_hdr;
344 int max_single;
333 345
334 pr_debug("package xmit\n"); 346 pr_debug("package xmit\n");
335 347
336 skb->dev = lowpan_dev_info(dev)->real_dev; 348 if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
337 if (skb->dev == NULL) { 349 kfree_skb(skb);
338 pr_debug("ERROR: no real wpan device found\n"); 350 return NET_XMIT_DROP;
339 goto error;
340 } 351 }
341 352
342 /* Send directly if less than the MTU minus the 2 checksum bytes. */ 353 max_single = ieee802154_max_payload(&wpan_hdr);
343 if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
344 err = dev_queue_xmit(skb);
345 goto out;
346 }
347 354
348 pr_debug("frame is too big, fragmentation is needed\n"); 355 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
349 err = lowpan_skb_fragmentation(skb, dev); 356 skb->dev = lowpan_dev_info(dev)->real_dev;
350error: 357 return dev_queue_xmit(skb);
351 dev_kfree_skb(skb); 358 } else {
352out: 359 netdev_tx_t rc;
353 if (err) 360
354 pr_debug("ERROR: xmit failed\n"); 361 pr_debug("frame is too big, fragmentation is needed\n");
362 rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
355 363
356 return (err < 0) ? NET_XMIT_DROP : err; 364 return rc < 0 ? NET_XMIT_DROP : rc;
365 }
357} 366}
358 367
359static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) 368static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 786437bc0c08..4f0ed8780194 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -21,6 +21,7 @@
21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 21 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
22 */ 22 */
23 23
24#include <linux/capability.h>
24#include <linux/net.h> 25#include <linux/net.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/if_arp.h> 27#include <linux/if_arp.h>
@@ -45,7 +46,12 @@ struct dgram_sock {
45 struct ieee802154_addr dst_addr; 46 struct ieee802154_addr dst_addr;
46 47
47 unsigned int bound:1; 48 unsigned int bound:1;
49 unsigned int connected:1;
48 unsigned int want_ack:1; 50 unsigned int want_ack:1;
51 unsigned int secen:1;
52 unsigned int secen_override:1;
53 unsigned int seclevel:3;
54 unsigned int seclevel_override:1;
49}; 55};
50 56
51static inline struct dgram_sock *dgram_sk(const struct sock *sk) 57static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -73,10 +79,7 @@ static int dgram_init(struct sock *sk)
73{ 79{
74 struct dgram_sock *ro = dgram_sk(sk); 80 struct dgram_sock *ro = dgram_sk(sk);
75 81
76 ro->dst_addr.mode = IEEE802154_ADDR_LONG;
77 ro->dst_addr.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
78 ro->want_ack = 1; 82 ro->want_ack = 1;
79 memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
80 return 0; 83 return 0;
81} 84}
82 85
@@ -183,6 +186,7 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
183 } 186 }
184 187
185 ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr); 188 ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
189 ro->connected = 1;
186 190
187out: 191out:
188 release_sock(sk); 192 release_sock(sk);
@@ -194,10 +198,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
194 struct dgram_sock *ro = dgram_sk(sk); 198 struct dgram_sock *ro = dgram_sk(sk);
195 199
196 lock_sock(sk); 200 lock_sock(sk);
197 201 ro->connected = 0;
198 ro->dst_addr.mode = IEEE802154_ADDR_LONG;
199 memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
200
201 release_sock(sk); 202 release_sock(sk);
202 203
203 return 0; 204 return 0;
@@ -209,7 +210,9 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
209 struct net_device *dev; 210 struct net_device *dev;
210 unsigned int mtu; 211 unsigned int mtu;
211 struct sk_buff *skb; 212 struct sk_buff *skb;
213 struct ieee802154_mac_cb *cb;
212 struct dgram_sock *ro = dgram_sk(sk); 214 struct dgram_sock *ro = dgram_sk(sk);
215 struct ieee802154_addr dst_addr;
213 int hlen, tlen; 216 int hlen, tlen;
214 int err; 217 int err;
215 218
@@ -218,6 +221,11 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
218 return -EOPNOTSUPP; 221 return -EOPNOTSUPP;
219 } 222 }
220 223
224 if (!ro->connected && !msg->msg_name)
225 return -EDESTADDRREQ;
226 else if (ro->connected && msg->msg_name)
227 return -EISCONN;
228
221 if (!ro->bound) 229 if (!ro->bound)
222 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); 230 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
223 else 231 else
@@ -249,18 +257,28 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
249 257
250 skb_reset_network_header(skb); 258 skb_reset_network_header(skb);
251 259
252 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 260 cb = mac_cb_init(skb);
253 if (ro->want_ack) 261 cb->type = IEEE802154_FC_TYPE_DATA;
254 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; 262 cb->ackreq = ro->want_ack;
263
264 if (msg->msg_name) {
265 DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
255 266
256 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); 267 ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
257 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, 268 } else {
258 ro->bound ? &ro->src_addr : NULL, size); 269 dst_addr = ro->dst_addr;
270 }
271
272 cb->secen = ro->secen;
273 cb->secen_override = ro->secen_override;
274 cb->seclevel = ro->seclevel;
275 cb->seclevel_override = ro->seclevel_override;
276
277 err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
278 ro->bound ? &ro->src_addr : NULL, size);
259 if (err < 0) 279 if (err < 0)
260 goto out_skb; 280 goto out_skb;
261 281
262 skb_reset_mac_header(skb);
263
264 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 282 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
265 if (err < 0) 283 if (err < 0)
266 goto out_skb; 284 goto out_skb;
@@ -419,6 +437,20 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
419 case WPAN_WANTACK: 437 case WPAN_WANTACK:
420 val = ro->want_ack; 438 val = ro->want_ack;
421 break; 439 break;
440 case WPAN_SECURITY:
441 if (!ro->secen_override)
442 val = WPAN_SECURITY_DEFAULT;
443 else if (ro->secen)
444 val = WPAN_SECURITY_ON;
445 else
446 val = WPAN_SECURITY_OFF;
447 break;
448 case WPAN_SECURITY_LEVEL:
449 if (!ro->seclevel_override)
450 val = WPAN_SECURITY_LEVEL_DEFAULT;
451 else
452 val = ro->seclevel;
453 break;
422 default: 454 default:
423 return -ENOPROTOOPT; 455 return -ENOPROTOOPT;
424 } 456 }
@@ -434,6 +466,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
434 char __user *optval, unsigned int optlen) 466 char __user *optval, unsigned int optlen)
435{ 467{
436 struct dgram_sock *ro = dgram_sk(sk); 468 struct dgram_sock *ro = dgram_sk(sk);
469 struct net *net = sock_net(sk);
437 int val; 470 int val;
438 int err = 0; 471 int err = 0;
439 472
@@ -449,6 +482,47 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
449 case WPAN_WANTACK: 482 case WPAN_WANTACK:
450 ro->want_ack = !!val; 483 ro->want_ack = !!val;
451 break; 484 break;
485 case WPAN_SECURITY:
486 if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
487 !ns_capable(net->user_ns, CAP_NET_RAW)) {
488 err = -EPERM;
489 break;
490 }
491
492 switch (val) {
493 case WPAN_SECURITY_DEFAULT:
494 ro->secen_override = 0;
495 break;
496 case WPAN_SECURITY_ON:
497 ro->secen_override = 1;
498 ro->secen = 1;
499 break;
500 case WPAN_SECURITY_OFF:
501 ro->secen_override = 1;
502 ro->secen = 0;
503 break;
504 default:
505 err = -EINVAL;
506 break;
507 }
508 break;
509 case WPAN_SECURITY_LEVEL:
510 if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
511 !ns_capable(net->user_ns, CAP_NET_RAW)) {
512 err = -EPERM;
513 break;
514 }
515
516 if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
517 val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
518 err = -EINVAL;
519 } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
520 ro->seclevel_override = 0;
521 } else {
522 ro->seclevel_override = 1;
523 ro->seclevel = val;
524 }
525 break;
452 default: 526 default:
453 err = -ENOPROTOOPT; 527 err = -ENOPROTOOPT;
454 break; 528 break;
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index bed42a48408c..c09294e39ca6 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -195,15 +195,16 @@ ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
195 return pos; 195 return pos;
196} 196}
197 197
198static int ieee802154_sechdr_lengths[4] = {
199 [IEEE802154_SCF_KEY_IMPLICIT] = 5,
200 [IEEE802154_SCF_KEY_INDEX] = 6,
201 [IEEE802154_SCF_KEY_SHORT_INDEX] = 10,
202 [IEEE802154_SCF_KEY_HW_INDEX] = 14,
203};
204
198static int ieee802154_hdr_sechdr_len(u8 sc) 205static int ieee802154_hdr_sechdr_len(u8 sc)
199{ 206{
200 switch (IEEE802154_SCF_KEY_ID_MODE(sc)) { 207 return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)];
201 case IEEE802154_SCF_KEY_IMPLICIT: return 5;
202 case IEEE802154_SCF_KEY_INDEX: return 6;
203 case IEEE802154_SCF_KEY_SHORT_INDEX: return 10;
204 case IEEE802154_SCF_KEY_HW_INDEX: return 14;
205 default: return -EINVAL;
206 }
207} 208}
208 209
209static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr) 210static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
@@ -285,3 +286,40 @@ ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
285 return pos; 286 return pos;
286} 287}
287EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs); 288EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
289
290int
291ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
292{
293 const u8 *buf = skb_mac_header(skb);
294 int pos;
295
296 pos = ieee802154_hdr_peek_addrs(skb, hdr);
297 if (pos < 0)
298 return -EINVAL;
299
300 if (hdr->fc.security_enabled) {
301 u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos));
302 int want = pos + ieee802154_sechdr_lengths[key_id_mode];
303
304 if (buf + want > skb_tail_pointer(skb))
305 return -EINVAL;
306
307 pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec);
308 }
309
310 return pos;
311}
312EXPORT_SYMBOL_GPL(ieee802154_hdr_peek);
313
314int ieee802154_max_payload(const struct ieee802154_hdr *hdr)
315{
316 int hlen = ieee802154_hdr_minlen(hdr);
317
318 if (hdr->fc.security_enabled) {
319 hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1;
320 hlen += ieee802154_sechdr_authtag_len(&hdr->sec);
321 }
322
323 return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE;
324}
325EXPORT_SYMBOL_GPL(ieee802154_max_payload);
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
index 6693a5cf01ce..8b83a231299e 100644
--- a/net/ieee802154/ieee802154.h
+++ b/net/ieee802154/ieee802154.h
@@ -68,4 +68,23 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
68int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb); 68int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
69int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info); 69int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
70 70
71int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
72int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
73int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
74int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
75int ieee802154_llsec_dump_keys(struct sk_buff *skb,
76 struct netlink_callback *cb);
77int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
78int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
79int ieee802154_llsec_dump_devs(struct sk_buff *skb,
80 struct netlink_callback *cb);
81int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
82int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
83int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
84 struct netlink_callback *cb);
85int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
86int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
87int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
88 struct netlink_callback *cb);
89
71#endif 90#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 04b20589d97a..26efcf4fd2ff 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -124,6 +124,26 @@ static const struct genl_ops ieee8021154_ops[] = {
124 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, 124 IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
125 ieee802154_dump_iface), 125 ieee802154_dump_iface),
126 IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams), 126 IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams),
127 IEEE802154_OP(IEEE802154_LLSEC_GETPARAMS, ieee802154_llsec_getparams),
128 IEEE802154_OP(IEEE802154_LLSEC_SETPARAMS, ieee802154_llsec_setparams),
129 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_KEY, NULL,
130 ieee802154_llsec_dump_keys),
131 IEEE802154_OP(IEEE802154_LLSEC_ADD_KEY, ieee802154_llsec_add_key),
132 IEEE802154_OP(IEEE802154_LLSEC_DEL_KEY, ieee802154_llsec_del_key),
133 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEV, NULL,
134 ieee802154_llsec_dump_devs),
135 IEEE802154_OP(IEEE802154_LLSEC_ADD_DEV, ieee802154_llsec_add_dev),
136 IEEE802154_OP(IEEE802154_LLSEC_DEL_DEV, ieee802154_llsec_del_dev),
137 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEVKEY, NULL,
138 ieee802154_llsec_dump_devkeys),
139 IEEE802154_OP(IEEE802154_LLSEC_ADD_DEVKEY, ieee802154_llsec_add_devkey),
140 IEEE802154_OP(IEEE802154_LLSEC_DEL_DEVKEY, ieee802154_llsec_del_devkey),
141 IEEE802154_DUMP(IEEE802154_LLSEC_LIST_SECLEVEL, NULL,
142 ieee802154_llsec_dump_seclevels),
143 IEEE802154_OP(IEEE802154_LLSEC_ADD_SECLEVEL,
144 ieee802154_llsec_add_seclevel),
145 IEEE802154_OP(IEEE802154_LLSEC_DEL_SECLEVEL,
146 ieee802154_llsec_del_seclevel),
127}; 147};
128 148
129static const struct genl_multicast_group ieee802154_mcgrps[] = { 149static const struct genl_multicast_group ieee802154_mcgrps[] = {
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 5d285498c0f6..a3281b8bfd5b 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -715,3 +715,812 @@ out:
715 dev_put(dev); 715 dev_put(dev);
716 return rc; 716 return rc;
717} 717}
718
719
720
721static int
722ieee802154_llsec_parse_key_id(struct genl_info *info,
723 struct ieee802154_llsec_key_id *desc)
724{
725 memset(desc, 0, sizeof(*desc));
726
727 if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE])
728 return -EINVAL;
729
730 desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
731
732 if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
733 if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
734 !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
735 info->attrs[IEEE802154_ATTR_HW_ADDR]))
736 return -EINVAL;
737
738 desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
739
740 if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) {
741 desc->device_addr.mode = IEEE802154_ADDR_SHORT;
742 desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
743 } else {
744 desc->device_addr.mode = IEEE802154_ADDR_LONG;
745 desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
746 }
747 }
748
749 if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
750 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID])
751 return -EINVAL;
752
753 if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
754 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT])
755 return -EINVAL;
756
757 if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
758 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED])
759 return -EINVAL;
760
761 if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT)
762 desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]);
763
764 switch (desc->mode) {
765 case IEEE802154_SCF_KEY_SHORT_INDEX:
766 {
767 u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
768 desc->short_source = cpu_to_le32(source);
769 break;
770 }
771 case IEEE802154_SCF_KEY_HW_INDEX:
772 desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]);
773 break;
774 }
775
776 return 0;
777}
778
779static int
780ieee802154_llsec_fill_key_id(struct sk_buff *msg,
781 const struct ieee802154_llsec_key_id *desc)
782{
783 if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode))
784 return -EMSGSIZE;
785
786 if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
787 if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID,
788 desc->device_addr.pan_id))
789 return -EMSGSIZE;
790
791 if (desc->device_addr.mode == IEEE802154_ADDR_SHORT &&
792 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
793 desc->device_addr.short_addr))
794 return -EMSGSIZE;
795
796 if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
797 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
798 desc->device_addr.extended_addr))
799 return -EMSGSIZE;
800 }
801
802 if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
803 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id))
804 return -EMSGSIZE;
805
806 if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
807 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
808 le32_to_cpu(desc->short_source)))
809 return -EMSGSIZE;
810
811 if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
812 nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
813 desc->extended_source))
814 return -EMSGSIZE;
815
816 return 0;
817}
818
819int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
820{
821 struct sk_buff *msg;
822 struct net_device *dev = NULL;
823 int rc = -ENOBUFS;
824 struct ieee802154_mlme_ops *ops;
825 void *hdr;
826 struct ieee802154_llsec_params params;
827
828 pr_debug("%s\n", __func__);
829
830 dev = ieee802154_nl_get_dev(info);
831 if (!dev)
832 return -ENODEV;
833
834 ops = ieee802154_mlme_ops(dev);
835 if (!ops->llsec) {
836 rc = -EOPNOTSUPP;
837 goto out_dev;
838 }
839
840 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
841 if (!msg)
842 goto out_dev;
843
844 hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
845 IEEE802154_LLSEC_GETPARAMS);
846 if (!hdr)
847 goto out_free;
848
849 rc = ops->llsec->get_params(dev, &params);
850 if (rc < 0)
851 goto out_free;
852
853 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
854 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
855 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) ||
856 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
857 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
858 be32_to_cpu(params.frame_counter)) ||
859 ieee802154_llsec_fill_key_id(msg, &params.out_key))
860 goto out_free;
861
862 dev_put(dev);
863
864 return ieee802154_nl_reply(msg, info);
865out_free:
866 nlmsg_free(msg);
867out_dev:
868 dev_put(dev);
869 return rc;
870}
871
872int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
873{
874 struct net_device *dev = NULL;
875 int rc = -EINVAL;
876 struct ieee802154_mlme_ops *ops;
877 struct ieee802154_llsec_params params;
878 int changed = 0;
879
880 pr_debug("%s\n", __func__);
881
882 dev = ieee802154_nl_get_dev(info);
883 if (!dev)
884 return -ENODEV;
885
886 if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
887 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
888 !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
889 goto out;
890
891 ops = ieee802154_mlme_ops(dev);
892 if (!ops->llsec) {
893 rc = -EOPNOTSUPP;
894 goto out;
895 }
896
897 if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
898 nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
899 goto out;
900
901 if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
902 params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
903 changed |= IEEE802154_LLSEC_PARAM_ENABLED;
904 }
905
906 if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
907 if (ieee802154_llsec_parse_key_id(info, &params.out_key))
908 goto out;
909
910 changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
911 }
912
913 if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
914 params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
915 changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
916 }
917
918 if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
919 u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
920
921 params.frame_counter = cpu_to_be32(fc);
922 changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
923 }
924
925 rc = ops->llsec->set_params(dev, &params, changed);
926
927 dev_put(dev);
928
929 return rc;
930out:
931 dev_put(dev);
932 return rc;
933}
934
935
936
937struct llsec_dump_data {
938 struct sk_buff *skb;
939 int s_idx, s_idx2;
940 int portid;
941 int nlmsg_seq;
942 struct net_device *dev;
943 struct ieee802154_mlme_ops *ops;
944 struct ieee802154_llsec_table *table;
945};
946
947static int
948ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
949 int (*step)(struct llsec_dump_data*))
950{
951 struct net *net = sock_net(skb->sk);
952 struct net_device *dev;
953 struct llsec_dump_data data;
954 int idx = 0;
955 int first_dev = cb->args[0];
956 int rc;
957
958 for_each_netdev(net, dev) {
959 if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
960 goto skip;
961
962 data.ops = ieee802154_mlme_ops(dev);
963 if (!data.ops->llsec)
964 goto skip;
965
966 data.skb = skb;
967 data.s_idx = cb->args[1];
968 data.s_idx2 = cb->args[2];
969 data.dev = dev;
970 data.portid = NETLINK_CB(cb->skb).portid;
971 data.nlmsg_seq = cb->nlh->nlmsg_seq;
972
973 data.ops->llsec->lock_table(dev);
974 data.ops->llsec->get_table(data.dev, &data.table);
975 rc = step(&data);
976 data.ops->llsec->unlock_table(dev);
977
978 if (rc < 0)
979 break;
980
981skip:
982 idx++;
983 }
984 cb->args[0] = idx;
985
986 return skb->len;
987}
988
989static int
990ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
991 int (*fn)(struct net_device*, struct genl_info*))
992{
993 struct net_device *dev = NULL;
994 int rc = -EINVAL;
995
996 dev = ieee802154_nl_get_dev(info);
997 if (!dev)
998 return -ENODEV;
999
1000 if (!ieee802154_mlme_ops(dev)->llsec)
1001 rc = -EOPNOTSUPP;
1002 else
1003 rc = fn(dev, info);
1004
1005 dev_put(dev);
1006 return rc;
1007}
1008
1009
1010
1011static int
1012ieee802154_llsec_parse_key(struct genl_info *info,
1013 struct ieee802154_llsec_key *key)
1014{
1015 u8 frames;
1016 u32 commands[256 / 32];
1017
1018 memset(key, 0, sizeof(*key));
1019
1020 if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] ||
1021 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES])
1022 return -EINVAL;
1023
1024 frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]);
1025 if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) &&
1026 !info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS])
1027 return -EINVAL;
1028
1029 if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) {
1030 nla_memcpy(commands,
1031 info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS],
1032 256 / 8);
1033
1034 if (commands[0] || commands[1] || commands[2] || commands[3] ||
1035 commands[4] || commands[5] || commands[6] ||
1036 commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1))
1037 return -EINVAL;
1038
1039 key->cmd_frame_ids = commands[7];
1040 }
1041
1042 key->frame_types = frames;
1043
1044 nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES],
1045 IEEE802154_LLSEC_KEY_SIZE);
1046
1047 return 0;
1048}
1049
1050static int llsec_add_key(struct net_device *dev, struct genl_info *info)
1051{
1052 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1053 struct ieee802154_llsec_key key;
1054 struct ieee802154_llsec_key_id id;
1055
1056 if (ieee802154_llsec_parse_key(info, &key) ||
1057 ieee802154_llsec_parse_key_id(info, &id))
1058 return -EINVAL;
1059
1060 return ops->llsec->add_key(dev, &id, &key);
1061}
1062
1063int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
1064{
1065 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1066 (NLM_F_CREATE | NLM_F_EXCL))
1067 return -EINVAL;
1068
1069 return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
1070}
1071
1072static int llsec_remove_key(struct net_device *dev, struct genl_info *info)
1073{
1074 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1075 struct ieee802154_llsec_key_id id;
1076
1077 if (ieee802154_llsec_parse_key_id(info, &id))
1078 return -EINVAL;
1079
1080 return ops->llsec->del_key(dev, &id);
1081}
1082
1083int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
1084{
1085 return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
1086}
1087
1088static int
1089ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq,
1090 const struct ieee802154_llsec_key_entry *key,
1091 const struct net_device *dev)
1092{
1093 void *hdr;
1094 u32 commands[256 / 32];
1095
1096 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1097 IEEE802154_LLSEC_LIST_KEY);
1098 if (!hdr)
1099 goto out;
1100
1101 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1102 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1103 ieee802154_llsec_fill_key_id(msg, &key->id) ||
1104 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
1105 key->key->frame_types))
1106 goto nla_put_failure;
1107
1108 if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) {
1109 memset(commands, 0, sizeof(commands));
1110 commands[7] = key->key->cmd_frame_ids;
1111 if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
1112 sizeof(commands), commands))
1113 goto nla_put_failure;
1114 }
1115
1116 if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES,
1117 IEEE802154_LLSEC_KEY_SIZE, key->key->key))
1118 goto nla_put_failure;
1119
1120 genlmsg_end(msg, hdr);
1121 return 0;
1122
1123nla_put_failure:
1124 genlmsg_cancel(msg, hdr);
1125out:
1126 return -EMSGSIZE;
1127}
1128
1129static int llsec_iter_keys(struct llsec_dump_data *data)
1130{
1131 struct ieee802154_llsec_key_entry *pos;
1132 int rc = 0, idx = 0;
1133
1134 list_for_each_entry(pos, &data->table->keys, list) {
1135 if (idx++ < data->s_idx)
1136 continue;
1137
1138 if (ieee802154_nl_fill_key(data->skb, data->portid,
1139 data->nlmsg_seq, pos, data->dev)) {
1140 rc = -EMSGSIZE;
1141 break;
1142 }
1143
1144 data->s_idx++;
1145 }
1146
1147 return rc;
1148}
1149
1150int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
1151{
1152 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
1153}
1154
1155
1156
1157static int
1158llsec_parse_dev(struct genl_info *info,
1159 struct ieee802154_llsec_device *dev)
1160{
1161 memset(dev, 0, sizeof(*dev));
1162
1163 if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
1164 !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
1165 !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] ||
1166 !info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] ||
1167 (!!info->attrs[IEEE802154_ATTR_PAN_ID] !=
1168 !!info->attrs[IEEE802154_ATTR_SHORT_ADDR]))
1169 return -EINVAL;
1170
1171 if (info->attrs[IEEE802154_ATTR_PAN_ID]) {
1172 dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
1173 dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
1174 } else {
1175 dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF);
1176 }
1177
1178 dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1179 dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
1180 dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
1181 dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]);
1182
1183 if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX)
1184 return -EINVAL;
1185
1186 return 0;
1187}
1188
1189static int llsec_add_dev(struct net_device *dev, struct genl_info *info)
1190{
1191 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1192 struct ieee802154_llsec_device desc;
1193
1194 if (llsec_parse_dev(info, &desc))
1195 return -EINVAL;
1196
1197 return ops->llsec->add_dev(dev, &desc);
1198}
1199
1200int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
1201{
1202 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1203 (NLM_F_CREATE | NLM_F_EXCL))
1204 return -EINVAL;
1205
1206 return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
1207}
1208
1209static int llsec_del_dev(struct net_device *dev, struct genl_info *info)
1210{
1211 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1212 __le64 devaddr;
1213
1214 if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
1215 return -EINVAL;
1216
1217 devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1218
1219 return ops->llsec->del_dev(dev, devaddr);
1220}
1221
1222int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
1223{
1224 return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
1225}
1226
1227static int
1228ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
1229 const struct ieee802154_llsec_device *desc,
1230 const struct net_device *dev)
1231{
1232 void *hdr;
1233
1234 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1235 IEEE802154_LLSEC_LIST_DEV);
1236 if (!hdr)
1237 goto out;
1238
1239 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1240 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1241 nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
1242 nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
1243 desc->short_addr) ||
1244 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
1245 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
1246 desc->frame_counter) ||
1247 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
1248 desc->seclevel_exempt) ||
1249 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode))
1250 goto nla_put_failure;
1251
1252 genlmsg_end(msg, hdr);
1253 return 0;
1254
1255nla_put_failure:
1256 genlmsg_cancel(msg, hdr);
1257out:
1258 return -EMSGSIZE;
1259}
1260
1261static int llsec_iter_devs(struct llsec_dump_data *data)
1262{
1263 struct ieee802154_llsec_device *pos;
1264 int rc = 0, idx = 0;
1265
1266 list_for_each_entry(pos, &data->table->devices, list) {
1267 if (idx++ < data->s_idx)
1268 continue;
1269
1270 if (ieee802154_nl_fill_dev(data->skb, data->portid,
1271 data->nlmsg_seq, pos, data->dev)) {
1272 rc = -EMSGSIZE;
1273 break;
1274 }
1275
1276 data->s_idx++;
1277 }
1278
1279 return rc;
1280}
1281
1282int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
1283{
1284 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
1285}
1286
1287
1288
1289static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
1290{
1291 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1292 struct ieee802154_llsec_device_key key;
1293 __le64 devaddr;
1294
1295 if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
1296 !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
1297 ieee802154_llsec_parse_key_id(info, &key.key_id))
1298 return -EINVAL;
1299
1300 devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1301 key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
1302
1303 return ops->llsec->add_devkey(dev, devaddr, &key);
1304}
1305
1306int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
1307{
1308 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1309 (NLM_F_CREATE | NLM_F_EXCL))
1310 return -EINVAL;
1311
1312 return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
1313}
1314
1315static int llsec_del_devkey(struct net_device *dev, struct genl_info *info)
1316{
1317 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1318 struct ieee802154_llsec_device_key key;
1319 __le64 devaddr;
1320
1321 if (!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
1322 ieee802154_llsec_parse_key_id(info, &key.key_id))
1323 return -EINVAL;
1324
1325 devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
1326
1327 return ops->llsec->del_devkey(dev, devaddr, &key);
1328}
1329
1330int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
1331{
1332 return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
1333}
1334
1335static int
1336ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
1337 __le64 devaddr,
1338 const struct ieee802154_llsec_device_key *devkey,
1339 const struct net_device *dev)
1340{
1341 void *hdr;
1342
1343 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1344 IEEE802154_LLSEC_LIST_DEVKEY);
1345 if (!hdr)
1346 goto out;
1347
1348 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1349 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1350 nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
1351 nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
1352 devkey->frame_counter) ||
1353 ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
1354 goto nla_put_failure;
1355
1356 genlmsg_end(msg, hdr);
1357 return 0;
1358
1359nla_put_failure:
1360 genlmsg_cancel(msg, hdr);
1361out:
1362 return -EMSGSIZE;
1363}
1364
1365static int llsec_iter_devkeys(struct llsec_dump_data *data)
1366{
1367 struct ieee802154_llsec_device *dpos;
1368 struct ieee802154_llsec_device_key *kpos;
1369 int rc = 0, idx = 0, idx2;
1370
1371 list_for_each_entry(dpos, &data->table->devices, list) {
1372 if (idx++ < data->s_idx)
1373 continue;
1374
1375 idx2 = 0;
1376
1377 list_for_each_entry(kpos, &dpos->keys, list) {
1378 if (idx2++ < data->s_idx2)
1379 continue;
1380
1381 if (ieee802154_nl_fill_devkey(data->skb, data->portid,
1382 data->nlmsg_seq,
1383 dpos->hwaddr, kpos,
1384 data->dev)) {
1385 return rc = -EMSGSIZE;
1386 }
1387
1388 data->s_idx2++;
1389 }
1390
1391 data->s_idx++;
1392 }
1393
1394 return rc;
1395}
1396
1397int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
1398 struct netlink_callback *cb)
1399{
1400 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
1401}
1402
1403
1404
1405static int
1406llsec_parse_seclevel(struct genl_info *info,
1407 struct ieee802154_llsec_seclevel *sl)
1408{
1409 memset(sl, 0, sizeof(*sl));
1410
1411 if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] ||
1412 !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] ||
1413 !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE])
1414 return -EINVAL;
1415
1416 sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]);
1417 if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) {
1418 if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID])
1419 return -EINVAL;
1420
1421 sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]);
1422 }
1423
1424 sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]);
1425 sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
1426
1427 return 0;
1428}
1429
1430static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info)
1431{
1432 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1433 struct ieee802154_llsec_seclevel sl;
1434
1435 if (llsec_parse_seclevel(info, &sl))
1436 return -EINVAL;
1437
1438 return ops->llsec->add_seclevel(dev, &sl);
1439}
1440
1441int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
1442{
1443 if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
1444 (NLM_F_CREATE | NLM_F_EXCL))
1445 return -EINVAL;
1446
1447 return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
1448}
1449
1450static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info)
1451{
1452 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
1453 struct ieee802154_llsec_seclevel sl;
1454
1455 if (llsec_parse_seclevel(info, &sl))
1456 return -EINVAL;
1457
1458 return ops->llsec->del_seclevel(dev, &sl);
1459}
1460
1461int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
1462{
1463 return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
1464}
1465
1466static int
1467ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq,
1468 const struct ieee802154_llsec_seclevel *sl,
1469 const struct net_device *dev)
1470{
1471 void *hdr;
1472
1473 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
1474 IEEE802154_LLSEC_LIST_SECLEVEL);
1475 if (!hdr)
1476 goto out;
1477
1478 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
1479 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
1480 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) ||
1481 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) ||
1482 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
1483 sl->device_override))
1484 goto nla_put_failure;
1485
1486 if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
1487 nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
1488 sl->cmd_frame_id))
1489 goto nla_put_failure;
1490
1491 genlmsg_end(msg, hdr);
1492 return 0;
1493
1494nla_put_failure:
1495 genlmsg_cancel(msg, hdr);
1496out:
1497 return -EMSGSIZE;
1498}
1499
1500static int llsec_iter_seclevels(struct llsec_dump_data *data)
1501{
1502 struct ieee802154_llsec_seclevel *pos;
1503 int rc = 0, idx = 0;
1504
1505 list_for_each_entry(pos, &data->table->security_levels, list) {
1506 if (idx++ < data->s_idx)
1507 continue;
1508
1509 if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
1510 data->nlmsg_seq, pos,
1511 data->dev)) {
1512 rc = -EMSGSIZE;
1513 break;
1514 }
1515
1516 data->s_idx++;
1517 }
1518
1519 return rc;
1520}
1521
1522int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
1523 struct netlink_callback *cb)
1524{
1525 return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
1526}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index fd7be5e45cef..3a703ab88348 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -62,5 +62,21 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
62 [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, }, 62 [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
63 63
64 [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, }, 64 [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
65
66 [IEEE802154_ATTR_LLSEC_ENABLED] = { .type = NLA_U8, },
67 [IEEE802154_ATTR_LLSEC_SECLEVEL] = { .type = NLA_U8, },
68 [IEEE802154_ATTR_LLSEC_KEY_MODE] = { .type = NLA_U8, },
69 [IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT] = { .type = NLA_U32, },
70 [IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED] = { .type = NLA_HW_ADDR, },
71 [IEEE802154_ATTR_LLSEC_KEY_ID] = { .type = NLA_U8, },
72 [IEEE802154_ATTR_LLSEC_FRAME_COUNTER] = { .type = NLA_U32 },
73 [IEEE802154_ATTR_LLSEC_KEY_BYTES] = { .len = 16, },
74 [IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] = { .type = NLA_U8, },
75 [IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS] = { .len = 258 / 8 },
76 [IEEE802154_ATTR_LLSEC_FRAME_TYPE] = { .type = NLA_U8, },
77 [IEEE802154_ATTR_LLSEC_CMD_FRAME_ID] = { .type = NLA_U8, },
78 [IEEE802154_ATTR_LLSEC_SECLEVELS] = { .type = NLA_U8, },
79 [IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
80 [IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
65}; 81};
66 82
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
index ef2d54372b13..6f1428c4870b 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/reassembly.c
@@ -36,7 +36,7 @@ struct lowpan_frag_info {
36 u8 d_offset; 36 u8 d_offset;
37}; 37};
38 38
39struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb) 39static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
40{ 40{
41 return (struct lowpan_frag_info *)skb->cb; 41 return (struct lowpan_frag_info *)skb->cb;
42} 42}
@@ -120,6 +120,8 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
120 struct inet_frag_queue *q; 120 struct inet_frag_queue *q;
121 struct lowpan_create_arg arg; 121 struct lowpan_create_arg arg;
122 unsigned int hash; 122 unsigned int hash;
123 struct netns_ieee802154_lowpan *ieee802154_lowpan =
124 net_ieee802154_lowpan(net);
123 125
124 arg.tag = frag_info->d_tag; 126 arg.tag = frag_info->d_tag;
125 arg.d_size = frag_info->d_size; 127 arg.d_size = frag_info->d_size;
@@ -129,7 +131,7 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
129 read_lock(&lowpan_frags.lock); 131 read_lock(&lowpan_frags.lock);
130 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst); 132 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
131 133
132 q = inet_frag_find(&net->ieee802154_lowpan.frags, 134 q = inet_frag_find(&ieee802154_lowpan->frags,
133 &lowpan_frags, &arg, hash); 135 &lowpan_frags, &arg, hash);
134 if (IS_ERR_OR_NULL(q)) { 136 if (IS_ERR_OR_NULL(q)) {
135 inet_frag_maybe_warn_overflow(q, pr_fmt()); 137 inet_frag_maybe_warn_overflow(q, pr_fmt());
@@ -357,6 +359,8 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
357 struct net *net = dev_net(skb->dev); 359 struct net *net = dev_net(skb->dev);
358 struct lowpan_frag_info *frag_info = lowpan_cb(skb); 360 struct lowpan_frag_info *frag_info = lowpan_cb(skb);
359 struct ieee802154_addr source, dest; 361 struct ieee802154_addr source, dest;
362 struct netns_ieee802154_lowpan *ieee802154_lowpan =
363 net_ieee802154_lowpan(net);
360 int err; 364 int err;
361 365
362 source = mac_cb(skb)->source; 366 source = mac_cb(skb)->source;
@@ -366,10 +370,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
366 if (err < 0) 370 if (err < 0)
367 goto err; 371 goto err;
368 372
369 if (frag_info->d_size > net->ieee802154_lowpan.max_dsize) 373 if (frag_info->d_size > ieee802154_lowpan->max_dsize)
370 goto err; 374 goto err;
371 375
372 inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false); 376 inet_frag_evictor(&ieee802154_lowpan->frags, &lowpan_frags, false);
373 377
374 fq = fq_find(net, frag_info, &source, &dest); 378 fq = fq_find(net, frag_info, &source, &dest);
375 if (fq != NULL) { 379 if (fq != NULL) {
@@ -436,6 +440,8 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
436{ 440{
437 struct ctl_table *table; 441 struct ctl_table *table;
438 struct ctl_table_header *hdr; 442 struct ctl_table_header *hdr;
443 struct netns_ieee802154_lowpan *ieee802154_lowpan =
444 net_ieee802154_lowpan(net);
439 445
440 table = lowpan_frags_ns_ctl_table; 446 table = lowpan_frags_ns_ctl_table;
441 if (!net_eq(net, &init_net)) { 447 if (!net_eq(net, &init_net)) {
@@ -444,10 +450,10 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
444 if (table == NULL) 450 if (table == NULL)
445 goto err_alloc; 451 goto err_alloc;
446 452
447 table[0].data = &net->ieee802154_lowpan.frags.high_thresh; 453 table[0].data = &ieee802154_lowpan->frags.high_thresh;
448 table[1].data = &net->ieee802154_lowpan.frags.low_thresh; 454 table[1].data = &ieee802154_lowpan->frags.low_thresh;
449 table[2].data = &net->ieee802154_lowpan.frags.timeout; 455 table[2].data = &ieee802154_lowpan->frags.timeout;
450 table[3].data = &net->ieee802154_lowpan.max_dsize; 456 table[3].data = &ieee802154_lowpan->max_dsize;
451 457
452 /* Don't export sysctls to unprivileged users */ 458 /* Don't export sysctls to unprivileged users */
453 if (net->user_ns != &init_user_ns) 459 if (net->user_ns != &init_user_ns)
@@ -458,7 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
458 if (hdr == NULL) 464 if (hdr == NULL)
459 goto err_reg; 465 goto err_reg;
460 466
461 net->ieee802154_lowpan.sysctl.frags_hdr = hdr; 467 ieee802154_lowpan->sysctl.frags_hdr = hdr;
462 return 0; 468 return 0;
463 469
464err_reg: 470err_reg:
@@ -471,9 +477,11 @@ err_alloc:
471static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net) 477static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
472{ 478{
473 struct ctl_table *table; 479 struct ctl_table *table;
480 struct netns_ieee802154_lowpan *ieee802154_lowpan =
481 net_ieee802154_lowpan(net);
474 482
475 table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg; 483 table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
476 unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr); 484 unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
477 if (!net_eq(net, &init_net)) 485 if (!net_eq(net, &init_net))
478 kfree(table); 486 kfree(table);
479} 487}
@@ -514,20 +522,26 @@ static inline void lowpan_frags_sysctl_unregister(void)
514 522
515static int __net_init lowpan_frags_init_net(struct net *net) 523static int __net_init lowpan_frags_init_net(struct net *net)
516{ 524{
517 net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; 525 struct netns_ieee802154_lowpan *ieee802154_lowpan =
518 net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH; 526 net_ieee802154_lowpan(net);
519 net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
520 net->ieee802154_lowpan.max_dsize = 0xFFFF;
521 527
522 inet_frags_init_net(&net->ieee802154_lowpan.frags); 528 ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
529 ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
530 ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
531 ieee802154_lowpan->max_dsize = 0xFFFF;
532
533 inet_frags_init_net(&ieee802154_lowpan->frags);
523 534
524 return lowpan_frags_ns_sysctl_register(net); 535 return lowpan_frags_ns_sysctl_register(net);
525} 536}
526 537
527static void __net_exit lowpan_frags_exit_net(struct net *net) 538static void __net_exit lowpan_frags_exit_net(struct net *net)
528{ 539{
540 struct netns_ieee802154_lowpan *ieee802154_lowpan =
541 net_ieee802154_lowpan(net);
542
529 lowpan_frags_ns_sysctl_unregister(net); 543 lowpan_frags_ns_sysctl_unregister(net);
530 inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags); 544 inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
531} 545}
532 546
533static struct pernet_operations lowpan_frags_ops = { 547static struct pernet_operations lowpan_frags_ops = {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6d6dd345bc4d..d5e6836cf772 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -254,7 +254,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
254 struct inet_sock *inet; 254 struct inet_sock *inet;
255 struct proto *answer_prot; 255 struct proto *answer_prot;
256 unsigned char answer_flags; 256 unsigned char answer_flags;
257 char answer_no_check;
258 int try_loading_module = 0; 257 int try_loading_module = 0;
259 int err; 258 int err;
260 259
@@ -312,7 +311,6 @@ lookup_protocol:
312 311
313 sock->ops = answer->ops; 312 sock->ops = answer->ops;
314 answer_prot = answer->prot; 313 answer_prot = answer->prot;
315 answer_no_check = answer->no_check;
316 answer_flags = answer->flags; 314 answer_flags = answer->flags;
317 rcu_read_unlock(); 315 rcu_read_unlock();
318 316
@@ -324,7 +322,6 @@ lookup_protocol:
324 goto out; 322 goto out;
325 323
326 err = 0; 324 err = 0;
327 sk->sk_no_check = answer_no_check;
328 if (INET_PROTOSW_REUSE & answer_flags) 325 if (INET_PROTOSW_REUSE & answer_flags)
329 sk->sk_reuse = SK_CAN_REUSE; 326 sk->sk_reuse = SK_CAN_REUSE;
330 327
@@ -1002,7 +999,6 @@ static struct inet_protosw inetsw_array[] =
1002 .protocol = IPPROTO_TCP, 999 .protocol = IPPROTO_TCP,
1003 .prot = &tcp_prot, 1000 .prot = &tcp_prot,
1004 .ops = &inet_stream_ops, 1001 .ops = &inet_stream_ops,
1005 .no_check = 0,
1006 .flags = INET_PROTOSW_PERMANENT | 1002 .flags = INET_PROTOSW_PERMANENT |
1007 INET_PROTOSW_ICSK, 1003 INET_PROTOSW_ICSK,
1008 }, 1004 },
@@ -1012,7 +1008,6 @@ static struct inet_protosw inetsw_array[] =
1012 .protocol = IPPROTO_UDP, 1008 .protocol = IPPROTO_UDP,
1013 .prot = &udp_prot, 1009 .prot = &udp_prot,
1014 .ops = &inet_dgram_ops, 1010 .ops = &inet_dgram_ops,
1015 .no_check = UDP_CSUM_DEFAULT,
1016 .flags = INET_PROTOSW_PERMANENT, 1011 .flags = INET_PROTOSW_PERMANENT,
1017 }, 1012 },
1018 1013
@@ -1021,7 +1016,6 @@ static struct inet_protosw inetsw_array[] =
1021 .protocol = IPPROTO_ICMP, 1016 .protocol = IPPROTO_ICMP,
1022 .prot = &ping_prot, 1017 .prot = &ping_prot,
1023 .ops = &inet_dgram_ops, 1018 .ops = &inet_dgram_ops,
1024 .no_check = UDP_CSUM_DEFAULT,
1025 .flags = INET_PROTOSW_REUSE, 1019 .flags = INET_PROTOSW_REUSE,
1026 }, 1020 },
1027 1021
@@ -1030,7 +1024,6 @@ static struct inet_protosw inetsw_array[] =
1030 .protocol = IPPROTO_IP, /* wild card */ 1024 .protocol = IPPROTO_IP, /* wild card */
1031 .prot = &raw_prot, 1025 .prot = &raw_prot,
1032 .ops = &inet_sockraw_ops, 1026 .ops = &inet_sockraw_ops,
1033 .no_check = UDP_CSUM_DEFAULT,
1034 .flags = INET_PROTOSW_REUSE, 1027 .flags = INET_PROTOSW_REUSE,
1035 } 1028 }
1036}; 1029};
@@ -1261,10 +1254,12 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1261 SKB_GSO_DODGY | 1254 SKB_GSO_DODGY |
1262 SKB_GSO_TCP_ECN | 1255 SKB_GSO_TCP_ECN |
1263 SKB_GSO_GRE | 1256 SKB_GSO_GRE |
1257 SKB_GSO_GRE_CSUM |
1264 SKB_GSO_IPIP | 1258 SKB_GSO_IPIP |
1265 SKB_GSO_SIT | 1259 SKB_GSO_SIT |
1266 SKB_GSO_TCPV6 | 1260 SKB_GSO_TCPV6 |
1267 SKB_GSO_UDP_TUNNEL | 1261 SKB_GSO_UDP_TUNNEL |
1262 SKB_GSO_UDP_TUNNEL_CSUM |
1268 SKB_GSO_MPLS | 1263 SKB_GSO_MPLS |
1269 0))) 1264 0)))
1270 goto out; 1265 goto out;
@@ -1476,22 +1471,20 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1476} 1471}
1477EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1472EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1478 1473
1479unsigned long snmp_fold_field(void __percpu *mib[], int offt) 1474unsigned long snmp_fold_field(void __percpu *mib, int offt)
1480{ 1475{
1481 unsigned long res = 0; 1476 unsigned long res = 0;
1482 int i, j; 1477 int i;
1483 1478
1484 for_each_possible_cpu(i) { 1479 for_each_possible_cpu(i)
1485 for (j = 0; j < SNMP_ARRAY_SZ; j++) 1480 res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
1486 res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
1487 }
1488 return res; 1481 return res;
1489} 1482}
1490EXPORT_SYMBOL_GPL(snmp_fold_field); 1483EXPORT_SYMBOL_GPL(snmp_fold_field);
1491 1484
1492#if BITS_PER_LONG==32 1485#if BITS_PER_LONG==32
1493 1486
1494u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset) 1487u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1495{ 1488{
1496 u64 res = 0; 1489 u64 res = 0;
1497 int cpu; 1490 int cpu;
@@ -1502,7 +1495,7 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1502 u64 v; 1495 u64 v;
1503 unsigned int start; 1496 unsigned int start;
1504 1497
1505 bhptr = per_cpu_ptr(mib[0], cpu); 1498 bhptr = per_cpu_ptr(mib, cpu);
1506 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); 1499 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1507 do { 1500 do {
1508 start = u64_stats_fetch_begin_irq(syncp); 1501 start = u64_stats_fetch_begin_irq(syncp);
@@ -1516,25 +1509,6 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1516EXPORT_SYMBOL_GPL(snmp_fold_field64); 1509EXPORT_SYMBOL_GPL(snmp_fold_field64);
1517#endif 1510#endif
1518 1511
1519int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
1520{
1521 BUG_ON(ptr == NULL);
1522 ptr[0] = __alloc_percpu(mibsize, align);
1523 if (!ptr[0])
1524 return -ENOMEM;
1525
1526#if SNMP_ARRAY_SZ == 2
1527 ptr[1] = __alloc_percpu(mibsize, align);
1528 if (!ptr[1]) {
1529 free_percpu(ptr[0]);
1530 ptr[0] = NULL;
1531 return -ENOMEM;
1532 }
1533#endif
1534 return 0;
1535}
1536EXPORT_SYMBOL_GPL(snmp_mib_init);
1537
1538#ifdef CONFIG_IP_MULTICAST 1512#ifdef CONFIG_IP_MULTICAST
1539static const struct net_protocol igmp_protocol = { 1513static const struct net_protocol igmp_protocol = {
1540 .handler = igmp_rcv, 1514 .handler = igmp_rcv,
@@ -1570,40 +1544,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1570{ 1544{
1571 int i; 1545 int i;
1572 1546
1573 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, 1547 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1574 sizeof(struct tcp_mib), 1548 if (!net->mib.tcp_statistics)
1575 __alignof__(struct tcp_mib)) < 0)
1576 goto err_tcp_mib; 1549 goto err_tcp_mib;
1577 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics, 1550 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1578 sizeof(struct ipstats_mib), 1551 if (!net->mib.ip_statistics)
1579 __alignof__(struct ipstats_mib)) < 0)
1580 goto err_ip_mib; 1552 goto err_ip_mib;
1581 1553
1582 for_each_possible_cpu(i) { 1554 for_each_possible_cpu(i) {
1583 struct ipstats_mib *af_inet_stats; 1555 struct ipstats_mib *af_inet_stats;
1584 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i); 1556 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1585 u64_stats_init(&af_inet_stats->syncp); 1557 u64_stats_init(&af_inet_stats->syncp);
1586#if SNMP_ARRAY_SZ == 2
1587 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
1588 u64_stats_init(&af_inet_stats->syncp);
1589#endif
1590 } 1558 }
1591 1559
1592 if (snmp_mib_init((void __percpu **)net->mib.net_statistics, 1560 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1593 sizeof(struct linux_mib), 1561 if (!net->mib.net_statistics)
1594 __alignof__(struct linux_mib)) < 0)
1595 goto err_net_mib; 1562 goto err_net_mib;
1596 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics, 1563 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1597 sizeof(struct udp_mib), 1564 if (!net->mib.udp_statistics)
1598 __alignof__(struct udp_mib)) < 0)
1599 goto err_udp_mib; 1565 goto err_udp_mib;
1600 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics, 1566 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1601 sizeof(struct udp_mib), 1567 if (!net->mib.udplite_statistics)
1602 __alignof__(struct udp_mib)) < 0)
1603 goto err_udplite_mib; 1568 goto err_udplite_mib;
1604 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics, 1569 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1605 sizeof(struct icmp_mib), 1570 if (!net->mib.icmp_statistics)
1606 __alignof__(struct icmp_mib)) < 0)
1607 goto err_icmp_mib; 1571 goto err_icmp_mib;
1608 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib), 1572 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1609 GFP_KERNEL); 1573 GFP_KERNEL);
@@ -1614,17 +1578,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1614 return 0; 1578 return 0;
1615 1579
1616err_icmpmsg_mib: 1580err_icmpmsg_mib:
1617 snmp_mib_free((void __percpu **)net->mib.icmp_statistics); 1581 free_percpu(net->mib.icmp_statistics);
1618err_icmp_mib: 1582err_icmp_mib:
1619 snmp_mib_free((void __percpu **)net->mib.udplite_statistics); 1583 free_percpu(net->mib.udplite_statistics);
1620err_udplite_mib: 1584err_udplite_mib:
1621 snmp_mib_free((void __percpu **)net->mib.udp_statistics); 1585 free_percpu(net->mib.udp_statistics);
1622err_udp_mib: 1586err_udp_mib:
1623 snmp_mib_free((void __percpu **)net->mib.net_statistics); 1587 free_percpu(net->mib.net_statistics);
1624err_net_mib: 1588err_net_mib:
1625 snmp_mib_free((void __percpu **)net->mib.ip_statistics); 1589 free_percpu(net->mib.ip_statistics);
1626err_ip_mib: 1590err_ip_mib:
1627 snmp_mib_free((void __percpu **)net->mib.tcp_statistics); 1591 free_percpu(net->mib.tcp_statistics);
1628err_tcp_mib: 1592err_tcp_mib:
1629 return -ENOMEM; 1593 return -ENOMEM;
1630} 1594}
@@ -1632,12 +1596,12 @@ err_tcp_mib:
1632static __net_exit void ipv4_mib_exit_net(struct net *net) 1596static __net_exit void ipv4_mib_exit_net(struct net *net)
1633{ 1597{
1634 kfree(net->mib.icmpmsg_statistics); 1598 kfree(net->mib.icmpmsg_statistics);
1635 snmp_mib_free((void __percpu **)net->mib.icmp_statistics); 1599 free_percpu(net->mib.icmp_statistics);
1636 snmp_mib_free((void __percpu **)net->mib.udplite_statistics); 1600 free_percpu(net->mib.udplite_statistics);
1637 snmp_mib_free((void __percpu **)net->mib.udp_statistics); 1601 free_percpu(net->mib.udp_statistics);
1638 snmp_mib_free((void __percpu **)net->mib.net_statistics); 1602 free_percpu(net->mib.net_statistics);
1639 snmp_mib_free((void __percpu **)net->mib.ip_statistics); 1603 free_percpu(net->mib.ip_statistics);
1640 snmp_mib_free((void __percpu **)net->mib.tcp_statistics); 1604 free_percpu(net->mib.tcp_statistics);
1641} 1605}
1642 1606
1643static __net_initdata struct pernet_operations ipv4_mib_ops = { 1607static __net_initdata struct pernet_operations ipv4_mib_ops = {
@@ -1736,13 +1700,9 @@ static int __init inet_init(void)
1736 1700
1737 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); 1701 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
1738 1702
1739 sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
1740 if (!sysctl_local_reserved_ports)
1741 goto out;
1742
1743 rc = proto_register(&tcp_prot, 1); 1703 rc = proto_register(&tcp_prot, 1);
1744 if (rc) 1704 if (rc)
1745 goto out_free_reserved_ports; 1705 goto out;
1746 1706
1747 rc = proto_register(&udp_prot, 1); 1707 rc = proto_register(&udp_prot, 1);
1748 if (rc) 1708 if (rc)
@@ -1852,8 +1812,6 @@ out_unregister_udp_proto:
1852 proto_unregister(&udp_prot); 1812 proto_unregister(&udp_prot);
1853out_unregister_tcp_proto: 1813out_unregister_tcp_proto:
1854 proto_unregister(&tcp_prot); 1814 proto_unregister(&tcp_prot);
1855out_free_reserved_ports:
1856 kfree(sysctl_local_reserved_ports);
1857 goto out; 1815 goto out;
1858} 1816}
1859 1817
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 8b5134c582f1..a3095fdefbed 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -86,18 +86,26 @@ out:
86} 86}
87EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
88 88
89/* Because UDP xmit path can manipulate sk_dst_cache without holding
90 * socket lock, we need to use sk_dst_set() here,
91 * even if we own the socket lock.
92 */
89void ip4_datagram_release_cb(struct sock *sk) 93void ip4_datagram_release_cb(struct sock *sk)
90{ 94{
91 const struct inet_sock *inet = inet_sk(sk); 95 const struct inet_sock *inet = inet_sk(sk);
92 const struct ip_options_rcu *inet_opt; 96 const struct ip_options_rcu *inet_opt;
93 __be32 daddr = inet->inet_daddr; 97 __be32 daddr = inet->inet_daddr;
98 struct dst_entry *dst;
94 struct flowi4 fl4; 99 struct flowi4 fl4;
95 struct rtable *rt; 100 struct rtable *rt;
96 101
97 if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
98 return;
99
100 rcu_read_lock(); 102 rcu_read_lock();
103
104 dst = __sk_dst_get(sk);
105 if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
106 rcu_read_unlock();
107 return;
108 }
101 inet_opt = rcu_dereference(inet->inet_opt); 109 inet_opt = rcu_dereference(inet->inet_opt);
102 if (inet_opt && inet_opt->opt.srr) 110 if (inet_opt && inet_opt->opt.srr)
103 daddr = inet_opt->opt.faddr; 111 daddr = inet_opt->opt.faddr;
@@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
105 inet->inet_saddr, inet->inet_dport, 113 inet->inet_saddr, inet->inet_dport,
106 inet->inet_sport, sk->sk_protocol, 114 inet->inet_sport, sk->sk_protocol,
107 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); 115 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
108 if (!IS_ERR(rt)) 116
109 __sk_dst_set(sk, &rt->dst); 117 dst = !IS_ERR(rt) ? &rt->dst : NULL;
118 sk_dst_set(sk, dst);
119
110 rcu_read_unlock(); 120 rcu_read_unlock();
111} 121}
112EXPORT_SYMBOL_GPL(ip4_datagram_release_cb); 122EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bdbf68bb2e2d..e9449376b58e 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -106,7 +106,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
106#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) 106#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
107 107
108static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; 108static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
109static DEFINE_SPINLOCK(inet_addr_hash_lock);
110 109
111static u32 inet_addr_hash(struct net *net, __be32 addr) 110static u32 inet_addr_hash(struct net *net, __be32 addr)
112{ 111{
@@ -119,16 +118,14 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
119{ 118{
120 u32 hash = inet_addr_hash(net, ifa->ifa_local); 119 u32 hash = inet_addr_hash(net, ifa->ifa_local);
121 120
122 spin_lock(&inet_addr_hash_lock); 121 ASSERT_RTNL();
123 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); 122 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
124 spin_unlock(&inet_addr_hash_lock);
125} 123}
126 124
127static void inet_hash_remove(struct in_ifaddr *ifa) 125static void inet_hash_remove(struct in_ifaddr *ifa)
128{ 126{
129 spin_lock(&inet_addr_hash_lock); 127 ASSERT_RTNL();
130 hlist_del_init_rcu(&ifa->hash); 128 hlist_del_init_rcu(&ifa->hash);
131 spin_unlock(&inet_addr_hash_lock);
132} 129}
133 130
134/** 131/**
@@ -830,7 +827,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
830 ifa_existing = find_matching_ifa(ifa); 827 ifa_existing = find_matching_ifa(ifa);
831 if (!ifa_existing) { 828 if (!ifa_existing) {
832 /* It would be best to check for !NLM_F_CREATE here but 829 /* It would be best to check for !NLM_F_CREATE here but
833 * userspace alreay relies on not having to provide this. 830 * userspace already relies on not having to provide this.
834 */ 831 */
835 set_ifa_lifetime(ifa, valid_lft, prefered_lft); 832 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
836 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid); 833 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 250be7421ab3..4e9619bca732 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -84,7 +84,8 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
84 ptr--; 84 ptr--;
85 } 85 }
86 if (tpi->flags&TUNNEL_CSUM && 86 if (tpi->flags&TUNNEL_CSUM &&
87 !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { 87 !(skb_shinfo(skb)->gso_type &
88 (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
88 *ptr = 0; 89 *ptr = 0;
89 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, 90 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
90 skb->len, 0)); 91 skb->len, 0));
@@ -93,28 +94,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
93} 94}
94EXPORT_SYMBOL_GPL(gre_build_header); 95EXPORT_SYMBOL_GPL(gre_build_header);
95 96
96static __sum16 check_checksum(struct sk_buff *skb)
97{
98 __sum16 csum = 0;
99
100 switch (skb->ip_summed) {
101 case CHECKSUM_COMPLETE:
102 csum = csum_fold(skb->csum);
103
104 if (!csum)
105 break;
106 /* Fall through. */
107
108 case CHECKSUM_NONE:
109 skb->csum = 0;
110 csum = __skb_checksum_complete(skb);
111 skb->ip_summed = CHECKSUM_COMPLETE;
112 break;
113 }
114
115 return csum;
116}
117
118static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, 97static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
119 bool *csum_err) 98 bool *csum_err)
120{ 99{
@@ -141,7 +120,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
141 120
142 options = (__be32 *)(greh + 1); 121 options = (__be32 *)(greh + 1);
143 if (greh->flags & GRE_CSUM) { 122 if (greh->flags & GRE_CSUM) {
144 if (check_checksum(skb)) { 123 if (skb_checksum_simple_validate(skb)) {
145 *csum_err = true; 124 *csum_err = true;
146 return -EINVAL; 125 return -EINVAL;
147 } 126 }
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index f1d32280cb54..eb92deb12666 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -42,6 +42,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
42 SKB_GSO_DODGY | 42 SKB_GSO_DODGY |
43 SKB_GSO_TCP_ECN | 43 SKB_GSO_TCP_ECN |
44 SKB_GSO_GRE | 44 SKB_GSO_GRE |
45 SKB_GSO_GRE_CSUM |
45 SKB_GSO_IPIP))) 46 SKB_GSO_IPIP)))
46 goto out; 47 goto out;
47 48
@@ -55,6 +56,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
55 goto out; 56 goto out;
56 57
57 csum = !!(greh->flags & GRE_CSUM); 58 csum = !!(greh->flags & GRE_CSUM);
59 if (csum)
60 skb->encap_hdr_csum = 1;
58 61
59 if (unlikely(!pskb_may_pull(skb, ghl))) 62 if (unlikely(!pskb_may_pull(skb, ghl)))
60 goto out; 63 goto out;
@@ -94,10 +97,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
94 } 97 }
95 } 98 }
96 99
97 greh = (struct gre_base_hdr *)(skb->data); 100 skb_reset_transport_header(skb);
101
102 greh = (struct gre_base_hdr *)
103 skb_transport_header(skb);
98 pcsum = (__be32 *)(greh + 1); 104 pcsum = (__be32 *)(greh + 1);
99 *pcsum = 0; 105 *pcsum = 0;
100 *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); 106 *(__sum16 *)pcsum = gso_make_checksum(skb, 0);
101 } 107 }
102 __skb_push(skb, tnl_hlen - ghl); 108 __skb_push(skb, tnl_hlen - ghl);
103 109
@@ -125,10 +131,12 @@ static __sum16 gro_skb_checksum(struct sk_buff *skb)
125 csum_partial(skb->data, skb_gro_offset(skb), 0)); 131 csum_partial(skb->data, skb_gro_offset(skb), 0));
126 sum = csum_fold(NAPI_GRO_CB(skb)->csum); 132 sum = csum_fold(NAPI_GRO_CB(skb)->csum);
127 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { 133 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
128 if (unlikely(!sum)) 134 if (unlikely(!sum) && !skb->csum_complete_sw)
129 netdev_rx_csum_fault(skb->dev); 135 netdev_rx_csum_fault(skb->dev);
130 } else 136 } else {
131 skb->ip_summed = CHECKSUM_COMPLETE; 137 skb->ip_summed = CHECKSUM_COMPLETE;
138 skb->csum_complete_sw = 1;
139 }
132 140
133 return sum; 141 return sum;
134} 142}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 0134663fdbce..79c3d947a481 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -337,6 +337,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
337 struct sock *sk; 337 struct sock *sk;
338 struct inet_sock *inet; 338 struct inet_sock *inet;
339 __be32 daddr, saddr; 339 __be32 daddr, saddr;
340 u32 mark = IP4_REPLY_MARK(net, skb->mark);
340 341
341 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) 342 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
342 return; 343 return;
@@ -349,6 +350,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
349 icmp_param->data.icmph.checksum = 0; 350 icmp_param->data.icmph.checksum = 0;
350 351
351 inet->tos = ip_hdr(skb)->tos; 352 inet->tos = ip_hdr(skb)->tos;
353 sk->sk_mark = mark;
352 daddr = ipc.addr = ip_hdr(skb)->saddr; 354 daddr = ipc.addr = ip_hdr(skb)->saddr;
353 saddr = fib_compute_spec_dst(skb); 355 saddr = fib_compute_spec_dst(skb);
354 ipc.opt = NULL; 356 ipc.opt = NULL;
@@ -364,6 +366,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
364 memset(&fl4, 0, sizeof(fl4)); 366 memset(&fl4, 0, sizeof(fl4));
365 fl4.daddr = daddr; 367 fl4.daddr = daddr;
366 fl4.saddr = saddr; 368 fl4.saddr = saddr;
369 fl4.flowi4_mark = mark;
367 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 370 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
368 fl4.flowi4_proto = IPPROTO_ICMP; 371 fl4.flowi4_proto = IPPROTO_ICMP;
369 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 372 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -382,7 +385,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
382 struct flowi4 *fl4, 385 struct flowi4 *fl4,
383 struct sk_buff *skb_in, 386 struct sk_buff *skb_in,
384 const struct iphdr *iph, 387 const struct iphdr *iph,
385 __be32 saddr, u8 tos, 388 __be32 saddr, u8 tos, u32 mark,
386 int type, int code, 389 int type, int code,
387 struct icmp_bxm *param) 390 struct icmp_bxm *param)
388{ 391{
@@ -394,6 +397,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
394 fl4->daddr = (param->replyopts.opt.opt.srr ? 397 fl4->daddr = (param->replyopts.opt.opt.srr ?
395 param->replyopts.opt.opt.faddr : iph->saddr); 398 param->replyopts.opt.opt.faddr : iph->saddr);
396 fl4->saddr = saddr; 399 fl4->saddr = saddr;
400 fl4->flowi4_mark = mark;
397 fl4->flowi4_tos = RT_TOS(tos); 401 fl4->flowi4_tos = RT_TOS(tos);
398 fl4->flowi4_proto = IPPROTO_ICMP; 402 fl4->flowi4_proto = IPPROTO_ICMP;
399 fl4->fl4_icmp_type = type; 403 fl4->fl4_icmp_type = type;
@@ -491,6 +495,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
491 struct flowi4 fl4; 495 struct flowi4 fl4;
492 __be32 saddr; 496 __be32 saddr;
493 u8 tos; 497 u8 tos;
498 u32 mark;
494 struct net *net; 499 struct net *net;
495 struct sock *sk; 500 struct sock *sk;
496 501
@@ -592,6 +597,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
592 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | 597 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
593 IPTOS_PREC_INTERNETCONTROL) : 598 IPTOS_PREC_INTERNETCONTROL) :
594 iph->tos; 599 iph->tos;
600 mark = IP4_REPLY_MARK(net, skb_in->mark);
595 601
596 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in)) 602 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
597 goto out_unlock; 603 goto out_unlock;
@@ -608,13 +614,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
608 icmp_param->skb = skb_in; 614 icmp_param->skb = skb_in;
609 icmp_param->offset = skb_network_offset(skb_in); 615 icmp_param->offset = skb_network_offset(skb_in);
610 inet_sk(sk)->tos = tos; 616 inet_sk(sk)->tos = tos;
617 sk->sk_mark = mark;
611 ipc.addr = iph->saddr; 618 ipc.addr = iph->saddr;
612 ipc.opt = &icmp_param->replyopts.opt; 619 ipc.opt = &icmp_param->replyopts.opt;
613 ipc.tx_flags = 0; 620 ipc.tx_flags = 0;
614 ipc.ttl = 0; 621 ipc.ttl = 0;
615 ipc.tos = -1; 622 ipc.tos = -1;
616 623
617 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, 624 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
618 type, code, icmp_param); 625 type, code, icmp_param);
619 if (IS_ERR(rt)) 626 if (IS_ERR(rt))
620 goto out_unlock; 627 goto out_unlock;
@@ -908,16 +915,8 @@ int icmp_rcv(struct sk_buff *skb)
908 915
909 ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); 916 ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
910 917
911 switch (skb->ip_summed) { 918 if (skb_checksum_simple_validate(skb))
912 case CHECKSUM_COMPLETE: 919 goto csum_error;
913 if (!csum_fold(skb->csum))
914 break;
915 /* fall through */
916 case CHECKSUM_NONE:
917 skb->csum = 0;
918 if (__skb_checksum_complete(skb))
919 goto csum_error;
920 }
921 920
922 if (!pskb_pull(skb, sizeof(*icmph))) 921 if (!pskb_pull(skb, sizeof(*icmph)))
923 goto error; 922 goto error;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 97e4d1655d26..6748d420f714 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
369 pip->saddr = fl4.saddr; 369 pip->saddr = fl4.saddr;
370 pip->protocol = IPPROTO_IGMP; 370 pip->protocol = IPPROTO_IGMP;
371 pip->tot_len = 0; /* filled in later */ 371 pip->tot_len = 0; /* filled in later */
372 ip_select_ident(skb, &rt->dst, NULL); 372 ip_select_ident(skb, NULL);
373 ((u8 *)&pip[1])[0] = IPOPT_RA; 373 ((u8 *)&pip[1])[0] = IPOPT_RA;
374 ((u8 *)&pip[1])[1] = 4; 374 ((u8 *)&pip[1])[1] = 4;
375 ((u8 *)&pip[1])[2] = 0; 375 ((u8 *)&pip[1])[2] = 0;
@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
714 iph->daddr = dst; 714 iph->daddr = dst;
715 iph->saddr = fl4.saddr; 715 iph->saddr = fl4.saddr;
716 iph->protocol = IPPROTO_IGMP; 716 iph->protocol = IPPROTO_IGMP;
717 ip_select_ident(skb, &rt->dst, NULL); 717 ip_select_ident(skb, NULL);
718 ((u8 *)&iph[1])[0] = IPOPT_RA; 718 ((u8 *)&iph[1])[0] = IPOPT_RA;
719 ((u8 *)&iph[1])[1] = 4; 719 ((u8 *)&iph[1])[1] = 4;
720 ((u8 *)&iph[1])[2] = 0; 720 ((u8 *)&iph[1])[2] = 0;
@@ -988,16 +988,8 @@ int igmp_rcv(struct sk_buff *skb)
988 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 988 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
989 goto drop; 989 goto drop;
990 990
991 switch (skb->ip_summed) { 991 if (skb_checksum_simple_validate(skb))
992 case CHECKSUM_COMPLETE: 992 goto drop;
993 if (!csum_fold(skb->csum))
994 break;
995 /* fall through */
996 case CHECKSUM_NONE:
997 skb->csum = 0;
998 if (__skb_checksum_complete(skb))
999 goto drop;
1000 }
1001 993
1002 ih = igmp_hdr(skb); 994 ih = igmp_hdr(skb);
1003 switch (ih->type) { 995 switch (ih->type) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index a56b8e6e866a..14d02ea905b6 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -29,9 +29,6 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29EXPORT_SYMBOL(inet_csk_timer_bug_msg); 29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30#endif 30#endif
31 31
32unsigned long *sysctl_local_reserved_ports;
33EXPORT_SYMBOL(sysctl_local_reserved_ports);
34
35void inet_get_local_port_range(struct net *net, int *low, int *high) 32void inet_get_local_port_range(struct net *net, int *low, int *high)
36{ 33{
37 unsigned int seq; 34 unsigned int seq;
@@ -113,7 +110,7 @@ again:
113 110
114 smallest_size = -1; 111 smallest_size = -1;
115 do { 112 do {
116 if (inet_is_reserved_local_port(rover)) 113 if (inet_is_local_reserved_port(net, rover))
117 goto next_nolock; 114 goto next_nolock;
118 head = &hashinfo->bhash[inet_bhashfn(net, rover, 115 head = &hashinfo->bhash[inet_bhashfn(net, rover,
119 hashinfo->bhash_size)]; 116 hashinfo->bhash_size)];
@@ -408,7 +405,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
408 struct net *net = sock_net(sk); 405 struct net *net = sock_net(sk);
409 int flags = inet_sk_flowi_flags(sk); 406 int flags = inet_sk_flowi_flags(sk);
410 407
411 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 408 flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 409 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
413 sk->sk_protocol, 410 sk->sk_protocol,
414 flags, 411 flags,
@@ -445,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
445 442
446 rcu_read_lock(); 443 rcu_read_lock();
447 opt = rcu_dereference(newinet->inet_opt); 444 opt = rcu_dereference(newinet->inet_opt);
448 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 445 flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 446 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
450 sk->sk_protocol, inet_sk_flowi_flags(sk), 447 sk->sk_protocol, inet_sk_flowi_flags(sk),
451 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 448 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
@@ -680,6 +677,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
680 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); 677 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
681 newsk->sk_write_space = sk_stream_write_space; 678 newsk->sk_write_space = sk_stream_write_space;
682 679
680 newsk->sk_mark = inet_rsk(req)->ir_mark;
681
683 newicsk->icsk_retransmits = 0; 682 newicsk->icsk_retransmits = 0;
684 newicsk->icsk_backoff = 0; 683 newicsk->icsk_backoff = 0;
685 newicsk->icsk_probes_out = 0; 684 newicsk->icsk_probes_out = 0;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8b9cf279450d..43116e8c8e13 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -274,7 +274,7 @@ struct sock *__inet_lookup_established(struct net *net,
274 const __be32 daddr, const u16 hnum, 274 const __be32 daddr, const u16 hnum,
275 const int dif) 275 const int dif)
276{ 276{
277 INET_ADDR_COOKIE(acookie, saddr, daddr) 277 INET_ADDR_COOKIE(acookie, saddr, daddr);
278 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 278 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
279 struct sock *sk; 279 struct sock *sk;
280 const struct hlist_nulls_node *node; 280 const struct hlist_nulls_node *node;
@@ -327,7 +327,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
327 __be32 daddr = inet->inet_rcv_saddr; 327 __be32 daddr = inet->inet_rcv_saddr;
328 __be32 saddr = inet->inet_daddr; 328 __be32 saddr = inet->inet_daddr;
329 int dif = sk->sk_bound_dev_if; 329 int dif = sk->sk_bound_dev_if;
330 INET_ADDR_COOKIE(acookie, saddr, daddr) 330 INET_ADDR_COOKIE(acookie, saddr, daddr);
331 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 331 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
332 struct net *net = sock_net(sk); 332 struct net *net = sock_net(sk);
333 unsigned int hash = inet_ehashfn(net, daddr, lport, 333 unsigned int hash = inet_ehashfn(net, daddr, lport,
@@ -500,7 +500,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
500 local_bh_disable(); 500 local_bh_disable();
501 for (i = 1; i <= remaining; i++) { 501 for (i = 1; i <= remaining; i++) {
502 port = low + (i + offset) % remaining; 502 port = low + (i + offset) % remaining;
503 if (inet_is_reserved_local_port(port)) 503 if (inet_is_local_reserved_port(net, port))
504 continue; 504 continue;
505 head = &hinfo->bhash[inet_bhashfn(net, port, 505 head = &hinfo->bhash[inet_bhashfn(net, port,
506 hinfo->bhash_size)]; 506 hinfo->bhash_size)];
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 48f424465112..4ced1b9a97f0 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -26,20 +26,7 @@
26 * Theory of operations. 26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living 27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes. 28 * information about the peer which doesn't depend on routes.
29 * At this moment this information consists only of ID field for the next
30 * outgoing IP packet. This field is incremented with each packet as encoded
31 * in inet_getid() function (include/net/inetpeer.h).
32 * At the moment of writing this notes identifier of IP packets is generated
33 * to be unpredictable using this code only for packets subjected
34 * (actually or potentially) to defragmentation. I.e. DF packets less than
35 * PMTU in size when local fragmentation is disabled use a constant ID and do
36 * not use this code (see ip_select_ident() in include/net/ip.h).
37 * 29 *
38 * Route cache entries hold references to our nodes.
39 * New cache entries get references via lookup by destination IP address in
40 * the avl tree. The reference is grabbed only when it's needed i.e. only
41 * when we try to output IP packet which needs an unpredictable ID (see
42 * __ip_select_ident() in net/ipv4/route.c).
43 * Nodes are removed only when reference counter goes to 0. 30 * Nodes are removed only when reference counter goes to 0.
44 * When it's happened the node may be removed when a sufficient amount of 31 * When it's happened the node may be removed when a sufficient amount of
45 * time has been passed since its last use. The less-recently-used entry can 32 * time has been passed since its last use. The less-recently-used entry can
@@ -62,7 +49,6 @@
62 * refcnt: atomically against modifications on other CPU; 49 * refcnt: atomically against modifications on other CPU;
63 * usually under some other lock to prevent node disappearing 50 * usually under some other lock to prevent node disappearing
64 * daddr: unchangeable 51 * daddr: unchangeable
65 * ip_id_count: atomic value (no lock needed)
66 */ 52 */
67 53
68static struct kmem_cache *peer_cachep __read_mostly; 54static struct kmem_cache *peer_cachep __read_mostly;
@@ -120,7 +106,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
120static void inetpeer_gc_worker(struct work_struct *work) 106static void inetpeer_gc_worker(struct work_struct *work)
121{ 107{
122 struct inet_peer *p, *n, *c; 108 struct inet_peer *p, *n, *c;
123 LIST_HEAD(list); 109 struct list_head list;
124 110
125 spin_lock_bh(&gc_lock); 111 spin_lock_bh(&gc_lock);
126 list_replace_init(&gc_list, &list); 112 list_replace_init(&gc_list, &list);
@@ -497,10 +483,6 @@ relookup:
497 p->daddr = *daddr; 483 p->daddr = *daddr;
498 atomic_set(&p->refcnt, 1); 484 atomic_set(&p->refcnt, 1);
499 atomic_set(&p->rid, 0); 485 atomic_set(&p->rid, 0);
500 atomic_set(&p->ip_id_count,
501 (daddr->family == AF_INET) ?
502 secure_ip_id(daddr->addr.a4) :
503 secure_ipv6_id(daddr->addr.a6));
504 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 486 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
505 p->rate_tokens = 0; 487 p->rate_tokens = 0;
506 /* 60*HZ is arbitrary, but chosen enough high so that the first 488 /* 60*HZ is arbitrary, but chosen enough high so that the first
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 6f111e48e11c..3a83ce5efa80 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -42,7 +42,7 @@
42static bool ip_may_fragment(const struct sk_buff *skb) 42static bool ip_may_fragment(const struct sk_buff *skb)
43{ 43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 skb->local_df; 45 skb->ignore_df;
46} 46}
47 47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 94213c891565..9b842544aea3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -410,7 +410,7 @@ static int ipgre_open(struct net_device *dev)
410 struct flowi4 fl4; 410 struct flowi4 fl4;
411 struct rtable *rt; 411 struct rtable *rt;
412 412
413 rt = ip_route_output_gre(dev_net(dev), &fl4, 413 rt = ip_route_output_gre(t->net, &fl4,
414 t->parms.iph.daddr, 414 t->parms.iph.daddr,
415 t->parms.iph.saddr, 415 t->parms.iph.saddr,
416 t->parms.o_key, 416 t->parms.o_key,
@@ -434,7 +434,7 @@ static int ipgre_close(struct net_device *dev)
434 434
435 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 435 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
436 struct in_device *in_dev; 436 struct in_device *in_dev;
437 in_dev = inetdev_by_index(dev_net(dev), t->mlink); 437 in_dev = inetdev_by_index(t->net, t->mlink);
438 if (in_dev) 438 if (in_dev)
439 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 439 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
440 } 440 }
@@ -478,7 +478,7 @@ static void __gre_tunnel_init(struct net_device *dev)
478 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; 478 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
479 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; 479 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
480 480
481 dev->features |= NETIF_F_NETNS_LOCAL | GRE_FEATURES; 481 dev->features |= GRE_FEATURES;
482 dev->hw_features |= GRE_FEATURES; 482 dev->hw_features |= GRE_FEATURES;
483 483
484 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { 484 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
@@ -649,6 +649,7 @@ static void ipgre_tap_setup(struct net_device *dev)
649{ 649{
650 ether_setup(dev); 650 ether_setup(dev);
651 dev->netdev_ops = &gre_tap_netdev_ops; 651 dev->netdev_ops = &gre_tap_netdev_ops;
652 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
652 ip_tunnel_setup(dev, gre_tap_net_id); 653 ip_tunnel_setup(dev, gre_tap_net_id);
653} 654}
654 655
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index f4ab72e19af9..5e7aecea05cd 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -364,7 +364,7 @@ int ip_options_compile(struct net *net,
364 } 364 }
365 if (optptr[2] <= optlen) { 365 if (optptr[2] <= optlen) {
366 unsigned char *timeptr = NULL; 366 unsigned char *timeptr = NULL;
367 if (optptr[2]+3 > optptr[1]) { 367 if (optptr[2]+3 > optlen) {
368 pp_ptr = optptr + 2; 368 pp_ptr = optptr + 2;
369 goto error; 369 goto error;
370 } 370 }
@@ -376,7 +376,7 @@ int ip_options_compile(struct net *net,
376 optptr[2] += 4; 376 optptr[2] += 4;
377 break; 377 break;
378 case IPOPT_TS_TSANDADDR: 378 case IPOPT_TS_TSANDADDR:
379 if (optptr[2]+7 > optptr[1]) { 379 if (optptr[2]+7 > optlen) {
380 pp_ptr = optptr + 2; 380 pp_ptr = optptr + 2;
381 goto error; 381 goto error;
382 } 382 }
@@ -390,7 +390,7 @@ int ip_options_compile(struct net *net,
390 optptr[2] += 8; 390 optptr[2] += 8;
391 break; 391 break;
392 case IPOPT_TS_PRESPEC: 392 case IPOPT_TS_PRESPEC:
393 if (optptr[2]+7 > optptr[1]) { 393 if (optptr[2]+7 > optlen) {
394 pp_ptr = optptr + 2; 394 pp_ptr = optptr + 2;
395 goto error; 395 goto error;
396 } 396 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a52f50187b54..8d3b6b0e9857 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
149 iph->saddr = saddr; 149 iph->saddr = saddr;
150 iph->protocol = sk->sk_protocol; 150 iph->protocol = sk->sk_protocol;
151 ip_select_ident(skb, &rt->dst, sk); 151 ip_select_ident(skb, sk);
152 152
153 if (opt && opt->opt.optlen) { 153 if (opt && opt->opt.optlen) {
154 iph->ihl += opt->opt.optlen>>2; 154 iph->ihl += opt->opt.optlen>>2;
@@ -415,7 +415,7 @@ packet_routed:
415 skb_reset_network_header(skb); 415 skb_reset_network_header(skb);
416 iph = ip_hdr(skb); 416 iph = ip_hdr(skb);
417 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 417 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
418 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) 418 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
419 iph->frag_off = htons(IP_DF); 419 iph->frag_off = htons(IP_DF);
420 else 420 else
421 iph->frag_off = 0; 421 iph->frag_off = 0;
@@ -430,8 +430,7 @@ packet_routed:
430 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); 430 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
431 } 431 }
432 432
433 ip_select_ident_more(skb, &rt->dst, sk, 433 ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
434 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
435 434
436 /* TODO : should we use skb->sk here instead of sk ? */ 435 /* TODO : should we use skb->sk here instead of sk ? */
437 skb->priority = sk->sk_priority; 436 skb->priority = sk->sk_priority;
@@ -501,7 +500,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
501 iph = ip_hdr(skb); 500 iph = ip_hdr(skb);
502 501
503 mtu = ip_skb_dst_mtu(skb); 502 mtu = ip_skb_dst_mtu(skb);
504 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) || 503 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
505 (IPCB(skb)->frag_max_size && 504 (IPCB(skb)->frag_max_size &&
506 IPCB(skb)->frag_max_size > mtu))) { 505 IPCB(skb)->frag_max_size > mtu))) {
507 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 506 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -866,7 +865,7 @@ static int __ip_append_data(struct sock *sk,
866 865
867 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 866 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
868 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 867 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
869 maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu; 868 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
870 869
871 if (cork->length + length > maxnonfragsize - fragheaderlen) { 870 if (cork->length + length > maxnonfragsize - fragheaderlen) {
872 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 871 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1189,7 +1188,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1189 1188
1190 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1189 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1191 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 1190 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1192 maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu; 1191 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1193 1192
1194 if (cork->length + size > maxnonfragsize - fragheaderlen) { 1193 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1195 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 1194 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1350,10 +1349,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1350 * to fragment the frame generated here. No matter, what transforms 1349 * to fragment the frame generated here. No matter, what transforms
1351 * how transforms change size of the packet, it will come out. 1350 * how transforms change size of the packet, it will come out.
1352 */ 1351 */
1353 skb->local_df = ip_sk_local_df(sk); 1352 skb->ignore_df = ip_sk_ignore_df(sk);
1354 1353
1355 /* DF bit is set when we want to see DF on outgoing frames. 1354 /* DF bit is set when we want to see DF on outgoing frames.
1356 * If local_df is set too, we still allow to fragment this frame 1355 * If ignore_df is set too, we still allow to fragment this frame
1357 * locally. */ 1356 * locally. */
1358 if (inet->pmtudisc == IP_PMTUDISC_DO || 1357 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1359 inet->pmtudisc == IP_PMTUDISC_PROBE || 1358 inet->pmtudisc == IP_PMTUDISC_PROBE ||
@@ -1379,7 +1378,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1379 iph->ttl = ttl; 1378 iph->ttl = ttl;
1380 iph->protocol = sk->sk_protocol; 1379 iph->protocol = sk->sk_protocol;
1381 ip_copy_addrs(iph, fl4); 1380 ip_copy_addrs(iph, fl4);
1382 ip_select_ident(skb, &rt->dst, sk); 1381 ip_select_ident(skb, sk);
1383 1382
1384 if (opt) { 1383 if (opt) {
1385 iph->ihl += opt->optlen>>2; 1384 iph->ihl += opt->optlen>>2;
@@ -1546,7 +1545,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
1546 daddr = replyopts.opt.opt.faddr; 1545 daddr = replyopts.opt.opt.faddr;
1547 } 1546 }
1548 1547
1549 flowi4_init_output(&fl4, arg->bound_dev_if, 0, 1548 flowi4_init_output(&fl4, arg->bound_dev_if,
1549 IP4_REPLY_MARK(net, skb->mark),
1550 RT_TOS(arg->tos), 1550 RT_TOS(arg->tos),
1551 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, 1551 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1552 ip_reply_arg_flowi_flags(arg), 1552 ip_reply_arg_flowi_flags(arg),
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 9b553157e556..097b3e7c1e8f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -396,11 +396,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
396 struct ip_tunnel_net *itn, 396 struct ip_tunnel_net *itn,
397 struct ip_tunnel_parm *parms) 397 struct ip_tunnel_parm *parms)
398{ 398{
399 struct ip_tunnel *nt, *fbt; 399 struct ip_tunnel *nt;
400 struct net_device *dev; 400 struct net_device *dev;
401 401
402 BUG_ON(!itn->fb_tunnel_dev); 402 BUG_ON(!itn->fb_tunnel_dev);
403 fbt = netdev_priv(itn->fb_tunnel_dev);
404 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms); 403 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
405 if (IS_ERR(dev)) 404 if (IS_ERR(dev))
406 return ERR_CAST(dev); 405 return ERR_CAST(dev);
@@ -760,10 +759,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
760 759
761 if (!t && (cmd == SIOCADDTUNNEL)) { 760 if (!t && (cmd == SIOCADDTUNNEL)) {
762 t = ip_tunnel_create(net, itn, p); 761 t = ip_tunnel_create(net, itn, p);
763 if (IS_ERR(t)) { 762 err = PTR_ERR_OR_ZERO(t);
764 err = PTR_ERR(t); 763 break;
765 break;
766 }
767 } 764 }
768 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 765 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
769 if (t != NULL) { 766 if (t != NULL) {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index bcf206c79005..f4c987bb7e94 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
74 iph->daddr = dst; 74 iph->daddr = dst;
75 iph->saddr = src; 75 iph->saddr = src;
76 iph->ttl = ttl; 76 iph->ttl = ttl;
77 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); 77 __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
78 78
79 err = ip_local_out_sk(sk, skb); 79 err = ip_local_out_sk(sk, skb);
80 if (unlikely(net_xmit_eval(err))) 80 if (unlikely(net_xmit_eval(err)))
@@ -135,6 +135,14 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
135 return skb; 135 return skb;
136 } 136 }
137 137
138 /* If packet is not gso and we are resolving any partial checksum,
139 * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
140 * on the outer header without confusing devices that implement
141 * NETIF_F_IP_CSUM with encapsulation.
142 */
143 if (csum_help)
144 skb->encapsulation = 0;
145
138 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { 146 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
139 err = skb_checksum_help(skb); 147 err = skb_checksum_help(skb);
140 if (unlikely(err)) 148 if (unlikely(err))
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 09680ddbc677..62eaa005e146 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
486module_init(ipip_init); 486module_init(ipip_init);
487module_exit(ipip_fini); 487module_exit(ipip_fini);
488MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
489MODULE_ALIAS_RTNL_LINK("ipip");
489MODULE_ALIAS_NETDEV("tunl0"); 490MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index d84dc8d4c916..65bcaa789043 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -484,7 +484,7 @@ static void reg_vif_setup(struct net_device *dev)
484 dev->type = ARPHRD_PIMREG; 484 dev->type = ARPHRD_PIMREG;
485 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 485 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
486 dev->flags = IFF_NOARP; 486 dev->flags = IFF_NOARP;
487 dev->netdev_ops = &reg_vif_netdev_ops, 487 dev->netdev_ops = &reg_vif_netdev_ops;
488 dev->destructor = free_netdev; 488 dev->destructor = free_netdev;
489 dev->features |= NETIF_F_NETNS_LOCAL; 489 dev->features |= NETIF_F_NETNS_LOCAL;
490} 490}
@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1663 iph->protocol = IPPROTO_IPIP; 1663 iph->protocol = IPPROTO_IPIP;
1664 iph->ihl = 5; 1664 iph->ihl = 5;
1665 iph->tot_len = htons(skb->len); 1665 iph->tot_len = htons(skb->len);
1666 ip_select_ident(skb, skb_dst(skb), NULL); 1666 ip_select_ident(skb, NULL);
1667 ip_send_check(iph); 1667 ip_send_check(iph);
1668 1668
1669 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1669 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index ee2886126e3d..f1787c04a4dd 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -91,17 +91,9 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
91 if (nf_ct_is_untracked(ct)) 91 if (nf_ct_is_untracked(ct))
92 return NF_ACCEPT; 92 return NF_ACCEPT;
93 93
94 nat = nfct_nat(ct); 94 nat = nf_ct_nat_ext_add(ct);
95 if (!nat) { 95 if (nat == NULL)
96 /* NAT module was loaded late. */ 96 return NF_ACCEPT;
97 if (nf_ct_is_confirmed(ct))
98 return NF_ACCEPT;
99 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
100 if (nat == NULL) {
101 pr_debug("failed to add NAT extension\n");
102 return NF_ACCEPT;
103 }
104 }
105 97
106 switch (ctinfo) { 98 switch (ctinfo) {
107 case IP_CT_RELATED: 99 case IP_CT_RELATED:
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index f40f321b41fc..b8f6381c7d0b 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -34,7 +34,7 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
34 34
35 if (!err) { 35 if (!err) {
36 ip_send_check(ip_hdr(skb)); 36 ip_send_check(ip_hdr(skb));
37 skb->local_df = 1; 37 skb->ignore_df = 1;
38 } 38 }
39 39
40 return err; 40 return err;
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index b5b256d45e67..3964157d826c 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -48,15 +48,9 @@ static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
48 48
49 NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); 49 NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
50 50
51 nat = nfct_nat(ct); 51 nat = nf_ct_nat_ext_add(ct);
52 if (nat == NULL) { 52 if (nat == NULL)
53 /* Conntrack module was loaded late, can't add extension. */ 53 return NF_ACCEPT;
54 if (nf_ct_is_confirmed(ct))
55 return NF_ACCEPT;
56 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
57 if (nat == NULL)
58 return NF_ACCEPT;
59 }
60 54
61 switch (ctinfo) { 55 switch (ctinfo) {
62 case IP_CT_RELATED: 56 case IP_CT_RELATED:
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index ad737fad6d8b..ae0af9386f7c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -345,15 +345,15 @@ static void icmp_put(struct seq_file *seq)
345 for (i = 0; icmpmibmap[i].name != NULL; i++) 345 for (i = 0; icmpmibmap[i].name != NULL; i++)
346 seq_printf(seq, " Out%s", icmpmibmap[i].name); 346 seq_printf(seq, " Out%s", icmpmibmap[i].name);
347 seq_printf(seq, "\nIcmp: %lu %lu %lu", 347 seq_printf(seq, "\nIcmp: %lu %lu %lu",
348 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 348 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
349 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS), 349 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
350 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS)); 350 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
351 for (i = 0; icmpmibmap[i].name != NULL; i++) 351 for (i = 0; icmpmibmap[i].name != NULL; i++)
352 seq_printf(seq, " %lu", 352 seq_printf(seq, " %lu",
353 atomic_long_read(ptr + icmpmibmap[i].index)); 353 atomic_long_read(ptr + icmpmibmap[i].index));
354 seq_printf(seq, " %lu %lu", 354 seq_printf(seq, " %lu %lu",
355 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 355 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
356 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 356 snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
357 for (i = 0; icmpmibmap[i].name != NULL; i++) 357 for (i = 0; icmpmibmap[i].name != NULL; i++)
358 seq_printf(seq, " %lu", 358 seq_printf(seq, " %lu",
359 atomic_long_read(ptr + (icmpmibmap[i].index | 0x100))); 359 atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
@@ -379,7 +379,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
379 BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); 379 BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
380 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 380 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
381 seq_printf(seq, " %llu", 381 seq_printf(seq, " %llu",
382 snmp_fold_field64((void __percpu **)net->mib.ip_statistics, 382 snmp_fold_field64(net->mib.ip_statistics,
383 snmp4_ipstats_list[i].entry, 383 snmp4_ipstats_list[i].entry,
384 offsetof(struct ipstats_mib, syncp))); 384 offsetof(struct ipstats_mib, syncp)));
385 385
@@ -395,11 +395,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
395 /* MaxConn field is signed, RFC 2012 */ 395 /* MaxConn field is signed, RFC 2012 */
396 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 396 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
397 seq_printf(seq, " %ld", 397 seq_printf(seq, " %ld",
398 snmp_fold_field((void __percpu **)net->mib.tcp_statistics, 398 snmp_fold_field(net->mib.tcp_statistics,
399 snmp4_tcp_list[i].entry)); 399 snmp4_tcp_list[i].entry));
400 else 400 else
401 seq_printf(seq, " %lu", 401 seq_printf(seq, " %lu",
402 snmp_fold_field((void __percpu **)net->mib.tcp_statistics, 402 snmp_fold_field(net->mib.tcp_statistics,
403 snmp4_tcp_list[i].entry)); 403 snmp4_tcp_list[i].entry));
404 } 404 }
405 405
@@ -410,7 +410,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
410 seq_puts(seq, "\nUdp:"); 410 seq_puts(seq, "\nUdp:");
411 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 411 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
412 seq_printf(seq, " %lu", 412 seq_printf(seq, " %lu",
413 snmp_fold_field((void __percpu **)net->mib.udp_statistics, 413 snmp_fold_field(net->mib.udp_statistics,
414 snmp4_udp_list[i].entry)); 414 snmp4_udp_list[i].entry));
415 415
416 /* the UDP and UDP-Lite MIBs are the same */ 416 /* the UDP and UDP-Lite MIBs are the same */
@@ -421,7 +421,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
421 seq_puts(seq, "\nUdpLite:"); 421 seq_puts(seq, "\nUdpLite:");
422 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 422 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
423 seq_printf(seq, " %lu", 423 seq_printf(seq, " %lu",
424 snmp_fold_field((void __percpu **)net->mib.udplite_statistics, 424 snmp_fold_field(net->mib.udplite_statistics,
425 snmp4_udp_list[i].entry)); 425 snmp4_udp_list[i].entry));
426 426
427 seq_putc(seq, '\n'); 427 seq_putc(seq, '\n');
@@ -458,7 +458,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
458 seq_puts(seq, "\nTcpExt:"); 458 seq_puts(seq, "\nTcpExt:");
459 for (i = 0; snmp4_net_list[i].name != NULL; i++) 459 for (i = 0; snmp4_net_list[i].name != NULL; i++)
460 seq_printf(seq, " %lu", 460 seq_printf(seq, " %lu",
461 snmp_fold_field((void __percpu **)net->mib.net_statistics, 461 snmp_fold_field(net->mib.net_statistics,
462 snmp4_net_list[i].entry)); 462 snmp4_net_list[i].entry));
463 463
464 seq_puts(seq, "\nIpExt:"); 464 seq_puts(seq, "\nIpExt:");
@@ -468,7 +468,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
468 seq_puts(seq, "\nIpExt:"); 468 seq_puts(seq, "\nIpExt:");
469 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 469 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
470 seq_printf(seq, " %llu", 470 seq_printf(seq, " %llu",
471 snmp_fold_field64((void __percpu **)net->mib.ip_statistics, 471 snmp_fold_field64(net->mib.ip_statistics,
472 snmp4_ipextstats_list[i].entry, 472 snmp4_ipextstats_list[i].entry,
473 offsetof(struct ipstats_mib, syncp))); 473 offsetof(struct ipstats_mib, syncp)));
474 474
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a9dbe58bdfe7..2c65160565e1 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
389 iph->check = 0; 389 iph->check = 0;
390 iph->tot_len = htons(length); 390 iph->tot_len = htons(length);
391 if (!iph->id) 391 if (!iph->id)
392 ip_select_ident(skb, &rt->dst, NULL); 392 ip_select_ident(skb, NULL);
393 393
394 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 394 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
395 } 395 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5e676be3daeb..082239ffe34a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -89,6 +89,7 @@
89#include <linux/rcupdate.h> 89#include <linux/rcupdate.h>
90#include <linux/times.h> 90#include <linux/times.h>
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include <linux/jhash.h>
92#include <net/dst.h> 93#include <net/dst.h>
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
94#include <net/protocol.h> 95#include <net/protocol.h>
@@ -456,39 +457,19 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
456 return neigh_create(&arp_tbl, pkey, dev); 457 return neigh_create(&arp_tbl, pkey, dev);
457} 458}
458 459
459/* 460atomic_t *ip_idents __read_mostly;
460 * Peer allocation may fail only in serious out-of-memory conditions. However 461EXPORT_SYMBOL(ip_idents);
461 * we still can generate some output.
462 * Random ID selection looks a bit dangerous because we have no chances to
463 * select ID being unique in a reasonable period of time.
464 * But broken packet identifier may be better than no packet at all.
465 */
466static void ip_select_fb_ident(struct iphdr *iph)
467{
468 static DEFINE_SPINLOCK(ip_fb_id_lock);
469 static u32 ip_fallback_id;
470 u32 salt;
471
472 spin_lock_bh(&ip_fb_id_lock);
473 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
474 iph->id = htons(salt & 0xFFFF);
475 ip_fallback_id = salt;
476 spin_unlock_bh(&ip_fb_id_lock);
477}
478 462
479void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) 463void __ip_select_ident(struct iphdr *iph, int segs)
480{ 464{
481 struct net *net = dev_net(dst->dev); 465 static u32 ip_idents_hashrnd __read_mostly;
482 struct inet_peer *peer; 466 u32 hash, id;
483 467
484 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); 468 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
485 if (peer) {
486 iph->id = htons(inet_getid(peer, more));
487 inet_putpeer(peer);
488 return;
489 }
490 469
491 ip_select_fb_ident(iph); 470 hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
471 id = ip_idents_reserve(hash, segs);
472 iph->id = htons(id);
492} 473}
493EXPORT_SYMBOL(__ip_select_ident); 474EXPORT_SYMBOL(__ip_select_ident);
494 475
@@ -993,6 +974,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
993 struct flowi4 fl4; 974 struct flowi4 fl4;
994 struct rtable *rt; 975 struct rtable *rt;
995 976
977 if (!mark)
978 mark = IP4_REPLY_MARK(net, skb->mark);
979
996 __build_flow_key(&fl4, NULL, iph, oif, 980 __build_flow_key(&fl4, NULL, iph, oif,
997 RT_TOS(iph->tos), protocol, mark, flow_flags); 981 RT_TOS(iph->tos), protocol, mark, flow_flags);
998 rt = __ip_route_output_key(net, &fl4); 982 rt = __ip_route_output_key(net, &fl4);
@@ -1010,6 +994,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 struct rtable *rt; 994 struct rtable *rt;
1011 995
1012 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 996 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
997
998 if (!fl4.flowi4_mark)
999 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1000
1013 rt = __ip_route_output_key(sock_net(sk), &fl4); 1001 rt = __ip_route_output_key(sock_net(sk), &fl4);
1014 if (!IS_ERR(rt)) { 1002 if (!IS_ERR(rt)) {
1015 __ip_rt_update_pmtu(rt, &fl4, mtu); 1003 __ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -2704,6 +2692,12 @@ int __init ip_rt_init(void)
2704{ 2692{
2705 int rc = 0; 2693 int rc = 0;
2706 2694
2695 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2696 if (!ip_idents)
2697 panic("IP: failed to allocate ip_idents\n");
2698
2699 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2700
2707#ifdef CONFIG_IP_ROUTE_CLASSID 2701#ifdef CONFIG_IP_ROUTE_CLASSID
2708 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 2702 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2709 if (!ip_rt_acct) 2703 if (!ip_rt_acct)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index f2ed13c2125f..c86624b36a62 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -303,6 +303,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
303 ireq->ir_rmt_port = th->source; 303 ireq->ir_rmt_port = th->source;
304 ireq->ir_loc_addr = ip_hdr(skb)->daddr; 304 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
305 ireq->ir_rmt_addr = ip_hdr(skb)->saddr; 305 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
306 ireq->ir_mark = inet_request_mark(sk, skb);
306 ireq->ecn_ok = ecn_ok; 307 ireq->ecn_ok = ecn_ok;
307 ireq->snd_wscale = tcp_opt.snd_wscale; 308 ireq->snd_wscale = tcp_opt.snd_wscale;
308 ireq->sack_ok = tcp_opt.sack_ok; 309 ireq->sack_ok = tcp_opt.sack_ok;
@@ -339,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
339 * hasn't changed since we received the original syn, but I see 340 * hasn't changed since we received the original syn, but I see
340 * no easy way to do this. 341 * no easy way to do this.
341 */ 342 */
342 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, 343 flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
343 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, 344 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
344 inet_sk_flowi_flags(sk), 345 inet_sk_flowi_flags(sk),
345 (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr, 346 (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5cde8f263d40..79a007c52558 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -437,13 +437,6 @@ static struct ctl_table ipv4_table[] = {
437 .proc_handler = proc_dointvec 437 .proc_handler = proc_dointvec
438 }, 438 },
439 { 439 {
440 .procname = "ip_local_reserved_ports",
441 .data = NULL, /* initialized in sysctl_ipv4_init */
442 .maxlen = 65536,
443 .mode = 0644,
444 .proc_handler = proc_do_large_bitmap,
445 },
446 {
447 .procname = "igmp_max_memberships", 440 .procname = "igmp_max_memberships",
448 .data = &sysctl_igmp_max_memberships, 441 .data = &sysctl_igmp_max_memberships,
449 .maxlen = sizeof(int), 442 .maxlen = sizeof(int),
@@ -825,6 +818,13 @@ static struct ctl_table ipv4_net_table[] = {
825 .proc_handler = ipv4_local_port_range, 818 .proc_handler = ipv4_local_port_range,
826 }, 819 },
827 { 820 {
821 .procname = "ip_local_reserved_ports",
822 .data = &init_net.ipv4.sysctl_local_reserved_ports,
823 .maxlen = 65536,
824 .mode = 0644,
825 .proc_handler = proc_do_large_bitmap,
826 },
827 {
828 .procname = "ip_no_pmtu_disc", 828 .procname = "ip_no_pmtu_disc",
829 .data = &init_net.ipv4.sysctl_ip_no_pmtu_disc, 829 .data = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
830 .maxlen = sizeof(int), 830 .maxlen = sizeof(int),
@@ -838,6 +838,20 @@ static struct ctl_table ipv4_net_table[] = {
838 .mode = 0644, 838 .mode = 0644,
839 .proc_handler = proc_dointvec, 839 .proc_handler = proc_dointvec,
840 }, 840 },
841 {
842 .procname = "fwmark_reflect",
843 .data = &init_net.ipv4.sysctl_fwmark_reflect,
844 .maxlen = sizeof(int),
845 .mode = 0644,
846 .proc_handler = proc_dointvec,
847 },
848 {
849 .procname = "tcp_fwmark_accept",
850 .data = &init_net.ipv4.sysctl_tcp_fwmark_accept,
851 .maxlen = sizeof(int),
852 .mode = 0644,
853 .proc_handler = proc_dointvec,
854 },
841 { } 855 { }
842}; 856};
843 857
@@ -862,8 +876,14 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
862 if (net->ipv4.ipv4_hdr == NULL) 876 if (net->ipv4.ipv4_hdr == NULL)
863 goto err_reg; 877 goto err_reg;
864 878
879 net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
880 if (!net->ipv4.sysctl_local_reserved_ports)
881 goto err_ports;
882
865 return 0; 883 return 0;
866 884
885err_ports:
886 unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
867err_reg: 887err_reg:
868 if (!net_eq(net, &init_net)) 888 if (!net_eq(net, &init_net))
869 kfree(table); 889 kfree(table);
@@ -875,6 +895,7 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
875{ 895{
876 struct ctl_table *table; 896 struct ctl_table *table;
877 897
898 kfree(net->ipv4.sysctl_local_reserved_ports);
878 table = net->ipv4.ipv4_hdr->ctl_table_arg; 899 table = net->ipv4.ipv4_hdr->ctl_table_arg;
879 unregister_net_sysctl_table(net->ipv4.ipv4_hdr); 900 unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
880 kfree(table); 901 kfree(table);
@@ -888,16 +909,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
888static __init int sysctl_ipv4_init(void) 909static __init int sysctl_ipv4_init(void)
889{ 910{
890 struct ctl_table_header *hdr; 911 struct ctl_table_header *hdr;
891 struct ctl_table *i;
892
893 for (i = ipv4_table; i->procname; i++) {
894 if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
895 i->data = sysctl_local_reserved_ports;
896 break;
897 }
898 }
899 if (!i->procname)
900 return -EINVAL;
901 912
902 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); 913 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
903 if (hdr == NULL) 914 if (hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4bd6d52eeffb..eb1dde37e678 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2916,6 +2916,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2916 case TCP_USER_TIMEOUT: 2916 case TCP_USER_TIMEOUT:
2917 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2917 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2918 break; 2918 break;
2919
2920 case TCP_FASTOPEN:
2921 if (icsk->icsk_accept_queue.fastopenq != NULL)
2922 val = icsk->icsk_accept_queue.fastopenq->max_qlen;
2923 else
2924 val = 0;
2925 break;
2926
2919 case TCP_TIMESTAMP: 2927 case TCP_TIMESTAMP:
2920 val = tcp_time_stamp + tp->tsoffset; 2928 val = tcp_time_stamp + tp->tsoffset;
2921 break; 2929 break;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 821846fb0a7e..d5de69bc04f5 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
140 ca->cnt = 1; 140 ca->cnt = 1;
141} 141}
142 142
143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
144 u32 in_flight)
145{ 144{
146 struct tcp_sock *tp = tcp_sk(sk); 145 struct tcp_sock *tp = tcp_sk(sk);
147 struct bictcp *ca = inet_csk_ca(sk); 146 struct bictcp *ca = inet_csk_ca(sk);
148 147
149 if (!tcp_is_cwnd_limited(sk, in_flight)) 148 if (!tcp_is_cwnd_limited(sk))
150 return; 149 return;
151 150
152 if (tp->snd_cwnd <= tp->snd_ssthresh) 151 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 2b9464c93b88..7b09d8b49fa5 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -276,26 +276,6 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
276 return err; 276 return err;
277} 277}
278 278
279/* RFC2861 Check whether we are limited by application or congestion window
280 * This is the inverse of cwnd check in tcp_tso_should_defer
281 */
282bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
283{
284 const struct tcp_sock *tp = tcp_sk(sk);
285 u32 left;
286
287 if (in_flight >= tp->snd_cwnd)
288 return true;
289
290 left = tp->snd_cwnd - in_flight;
291 if (sk_can_gso(sk) &&
292 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
293 left < tp->xmit_size_goal_segs)
294 return true;
295 return left <= tcp_max_tso_deferred_mss(tp);
296}
297EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
298
299/* Slow start is used when congestion window is no greater than the slow start 279/* Slow start is used when congestion window is no greater than the slow start
300 * threshold. We base on RFC2581 and also handle stretch ACKs properly. 280 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
301 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but 281 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
@@ -337,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
337/* This is Jacobson's slow start and congestion avoidance. 317/* This is Jacobson's slow start and congestion avoidance.
338 * SIGCOMM '88, p. 328. 318 * SIGCOMM '88, p. 328.
339 */ 319 */
340void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 320void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
341{ 321{
342 struct tcp_sock *tp = tcp_sk(sk); 322 struct tcp_sock *tp = tcp_sk(sk);
343 323
344 if (!tcp_is_cwnd_limited(sk, in_flight)) 324 if (!tcp_is_cwnd_limited(sk))
345 return; 325 return;
346 326
347 /* In "safe" area, increase. */ 327 /* In "safe" area, increase. */
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index b4f1b29b08bd..a9bd8a4828a9 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
304 ca->cnt = 1; 304 ca->cnt = 1;
305} 305}
306 306
307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
308 u32 in_flight)
309{ 308{
310 struct tcp_sock *tp = tcp_sk(sk); 309 struct tcp_sock *tp = tcp_sk(sk);
311 struct bictcp *ca = inet_csk_ca(sk); 310 struct bictcp *ca = inet_csk_ca(sk);
312 311
313 if (!tcp_is_cwnd_limited(sk, in_flight)) 312 if (!tcp_is_cwnd_limited(sk))
314 return; 313 return;
315 314
316 if (tp->snd_cwnd <= tp->snd_ssthresh) { 315 if (tp->snd_cwnd <= tp->snd_ssthresh) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index f195d9316e55..62e48cf84e60 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -72,25 +72,224 @@ error: kfree(ctx);
72 return err; 72 return err;
73} 73}
74 74
75/* Computes the fastopen cookie for the IP path. 75static bool __tcp_fastopen_cookie_gen(const void *path,
76 * The path is a 128 bits long (pad with zeros for IPv4). 76 struct tcp_fastopen_cookie *foc)
77 *
78 * The caller must check foc->len to determine if a valid cookie
79 * has been generated successfully.
80*/
81void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
82 struct tcp_fastopen_cookie *foc)
83{ 77{
84 __be32 path[4] = { src, dst, 0, 0 };
85 struct tcp_fastopen_context *ctx; 78 struct tcp_fastopen_context *ctx;
79 bool ok = false;
86 80
87 tcp_fastopen_init_key_once(true); 81 tcp_fastopen_init_key_once(true);
88 82
89 rcu_read_lock(); 83 rcu_read_lock();
90 ctx = rcu_dereference(tcp_fastopen_ctx); 84 ctx = rcu_dereference(tcp_fastopen_ctx);
91 if (ctx) { 85 if (ctx) {
92 crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path); 86 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
93 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 87 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
88 ok = true;
94 } 89 }
95 rcu_read_unlock(); 90 rcu_read_unlock();
91 return ok;
92}
93
94/* Generate the fastopen cookie by doing aes128 encryption on both
95 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
96 * addresses. For the longer IPv6 addresses use CBC-MAC.
97 *
98 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
99 */
100static bool tcp_fastopen_cookie_gen(struct request_sock *req,
101 struct sk_buff *syn,
102 struct tcp_fastopen_cookie *foc)
103{
104 if (req->rsk_ops->family == AF_INET) {
105 const struct iphdr *iph = ip_hdr(syn);
106
107 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
108 return __tcp_fastopen_cookie_gen(path, foc);
109 }
110
111#if IS_ENABLED(CONFIG_IPV6)
112 if (req->rsk_ops->family == AF_INET6) {
113 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
114 struct tcp_fastopen_cookie tmp;
115
116 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
117 struct in6_addr *buf = (struct in6_addr *) tmp.val;
118 int i = 4;
119
120 for (i = 0; i < 4; i++)
121 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
122 return __tcp_fastopen_cookie_gen(buf, foc);
123 }
124 }
125#endif
126 return false;
127}
128
129static bool tcp_fastopen_create_child(struct sock *sk,
130 struct sk_buff *skb,
131 struct dst_entry *dst,
132 struct request_sock *req)
133{
134 struct tcp_sock *tp = tcp_sk(sk);
135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
136 struct sock *child;
137
138 req->num_retrans = 0;
139 req->num_timeout = 0;
140 req->sk = NULL;
141
142 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
143 if (child == NULL)
144 return false;
145
146 spin_lock(&queue->fastopenq->lock);
147 queue->fastopenq->qlen++;
148 spin_unlock(&queue->fastopenq->lock);
149
150 /* Initialize the child socket. Have to fix some values to take
151 * into account the child is a Fast Open socket and is created
152 * only out of the bits carried in the SYN packet.
153 */
154 tp = tcp_sk(child);
155
156 tp->fastopen_rsk = req;
157 /* Do a hold on the listner sk so that if the listener is being
158 * closed, the child that has been accepted can live on and still
159 * access listen_lock.
160 */
161 sock_hold(sk);
162 tcp_rsk(req)->listener = sk;
163
164 /* RFC1323: The window in SYN & SYN/ACK segments is never
165 * scaled. So correct it appropriately.
166 */
167 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
168
169 /* Activate the retrans timer so that SYNACK can be retransmitted.
170 * The request socket is not added to the SYN table of the parent
171 * because it's been added to the accept queue directly.
172 */
173 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
174 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
175
176 /* Add the child socket directly into the accept queue */
177 inet_csk_reqsk_queue_add(sk, req, child);
178
179 /* Now finish processing the fastopen child socket. */
180 inet_csk(child)->icsk_af_ops->rebuild_header(child);
181 tcp_init_congestion_control(child);
182 tcp_mtup_init(child);
183 tcp_init_metrics(child);
184 tcp_init_buffer_space(child);
185
186 /* Queue the data carried in the SYN packet. We need to first
187 * bump skb's refcnt because the caller will attempt to free it.
188 *
189 * XXX (TFO) - we honor a zero-payload TFO request for now,
190 * (any reason not to?) but no need to queue the skb since
191 * there is no data. How about SYN+FIN?
192 */
193 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
194 skb = skb_get(skb);
195 skb_dst_drop(skb);
196 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
197 skb_set_owner_r(skb, child);
198 __skb_queue_tail(&child->sk_receive_queue, skb);
199 tp->syn_data_acked = 1;
200 }
201 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
202 sk->sk_data_ready(sk);
203 bh_unlock_sock(child);
204 sock_put(child);
205 WARN_ON(req->sk == NULL);
206 return true;
207}
208EXPORT_SYMBOL(tcp_fastopen_create_child);
209
210static bool tcp_fastopen_queue_check(struct sock *sk)
211{
212 struct fastopen_queue *fastopenq;
213
214 /* Make sure the listener has enabled fastopen, and we don't
215 * exceed the max # of pending TFO requests allowed before trying
216 * to validating the cookie in order to avoid burning CPU cycles
217 * unnecessarily.
218 *
219 * XXX (TFO) - The implication of checking the max_qlen before
220 * processing a cookie request is that clients can't differentiate
221 * between qlen overflow causing Fast Open to be disabled
222 * temporarily vs a server not supporting Fast Open at all.
223 */
224 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
225 if (fastopenq == NULL || fastopenq->max_qlen == 0)
226 return false;
227
228 if (fastopenq->qlen >= fastopenq->max_qlen) {
229 struct request_sock *req1;
230 spin_lock(&fastopenq->lock);
231 req1 = fastopenq->rskq_rst_head;
232 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
233 spin_unlock(&fastopenq->lock);
234 NET_INC_STATS_BH(sock_net(sk),
235 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
236 return false;
237 }
238 fastopenq->rskq_rst_head = req1->dl_next;
239 fastopenq->qlen--;
240 spin_unlock(&fastopenq->lock);
241 reqsk_free(req1);
242 }
243 return true;
244}
245
246/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
247 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
248 * cookie request (foc->len == 0).
249 */
250bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
251 struct request_sock *req,
252 struct tcp_fastopen_cookie *foc,
253 struct dst_entry *dst)
254{
255 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
256 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
257
258 if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
259 (syn_data || foc->len >= 0) &&
260 tcp_fastopen_queue_check(sk))) {
261 foc->len = -1;
262 return false;
263 }
264
265 if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
266 goto fastopen;
267
268 if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
269 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
270 foc->len == valid_foc.len &&
271 !memcmp(foc->val, valid_foc.val, foc->len)) {
272 /* Cookie is valid. Create a (full) child socket to accept
273 * the data in SYN before returning a SYN-ACK to ack the
274 * data. If we fail to create the socket, fall back and
275 * ack the ISN only but includes the same cookie.
276 *
277 * Note: Data-less SYN with valid cookie is allowed to send
278 * data in SYN_RECV state.
279 */
280fastopen:
281 if (tcp_fastopen_create_child(sk, skb, dst, req)) {
282 foc->len = -1;
283 NET_INC_STATS_BH(sock_net(sk),
284 LINUX_MIB_TCPFASTOPENPASSIVE);
285 return true;
286 }
287 }
288
289 NET_INC_STATS_BH(sock_net(sk), foc->len ?
290 LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
291 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
292 *foc = valid_foc;
293 return false;
96} 294}
295EXPORT_SYMBOL(tcp_try_fastopen);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 8b9e7bad77c0..1c4908280d92 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 110}
111 111
112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
113{ 113{
114 struct tcp_sock *tp = tcp_sk(sk); 114 struct tcp_sock *tp = tcp_sk(sk);
115 struct hstcp *ca = inet_csk_ca(sk); 115 struct hstcp *ca = inet_csk_ca(sk);
116 116
117 if (!tcp_is_cwnd_limited(sk, in_flight)) 117 if (!tcp_is_cwnd_limited(sk))
118 return; 118 return;
119 119
120 if (tp->snd_cwnd <= tp->snd_ssthresh) 120 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4a194acfd923..031361311a8b 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
228} 228}
229 229
230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
231{ 231{
232 struct tcp_sock *tp = tcp_sk(sk); 232 struct tcp_sock *tp = tcp_sk(sk);
233 struct htcp *ca = inet_csk_ca(sk); 233 struct htcp *ca = inet_csk_ca(sk);
234 234
235 if (!tcp_is_cwnd_limited(sk, in_flight)) 235 if (!tcp_is_cwnd_limited(sk))
236 return; 236 return;
237 237
238 if (tp->snd_cwnd <= tp->snd_ssthresh) 238 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index a15a799bf768..d8f8f05a4951 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
87 * o Give cwnd a new value based on the model proposed 87 * o Give cwnd a new value based on the model proposed
88 * o remember increments <1 88 * o remember increments <1
89 */ 89 */
90static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked, 90static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
91 u32 in_flight)
92{ 91{
93 struct tcp_sock *tp = tcp_sk(sk); 92 struct tcp_sock *tp = tcp_sk(sk);
94 struct hybla *ca = inet_csk_ca(sk); 93 struct hybla *ca = inet_csk_ca(sk);
@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
101 ca->minrtt_us = tp->srtt_us; 100 ca->minrtt_us = tp->srtt_us;
102 } 101 }
103 102
104 if (!tcp_is_cwnd_limited(sk, in_flight)) 103 if (!tcp_is_cwnd_limited(sk))
105 return; 104 return;
106 105
107 if (!ca->hybla_en) { 106 if (!ca->hybla_en) {
108 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 107 tcp_reno_cong_avoid(sk, ack, acked);
109 return; 108 return;
110 } 109 }
111 110
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 863d105e3015..5999b3972e64 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
255/* 255/*
256 * Increase window in response to successful acknowledgment. 256 * Increase window in response to successful acknowledgment.
257 */ 257 */
258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, 258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
259 u32 in_flight)
260{ 259{
261 struct tcp_sock *tp = tcp_sk(sk); 260 struct tcp_sock *tp = tcp_sk(sk);
262 struct illinois *ca = inet_csk_ca(sk); 261 struct illinois *ca = inet_csk_ca(sk);
@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
265 update_params(sk); 264 update_params(sk);
266 265
267 /* RFC2861 only increase cwnd if fully utilized */ 266 /* RFC2861 only increase cwnd if fully utilized */
268 if (!tcp_is_cwnd_limited(sk, in_flight)) 267 if (!tcp_is_cwnd_limited(sk))
269 return; 268 return;
270 269
271 /* In slow start */ 270 /* In slow start */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3a26b3b23f16..40661fc1e233 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1167,7 +1167,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
1169 } 1169 }
1170 err = tcp_fragment(sk, skb, pkt_len, mss); 1170 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1171 if (err < 0) 1171 if (err < 0)
1172 return err; 1172 return err;
1173 } 1173 }
@@ -2241,7 +2241,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2241 break; 2241 break;
2242 2242
2243 mss = skb_shinfo(skb)->gso_size; 2243 mss = skb_shinfo(skb)->gso_size;
2244 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2244 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
2245 mss, GFP_ATOMIC);
2245 if (err < 0) 2246 if (err < 0)
2246 break; 2247 break;
2247 cnt = packets; 2248 cnt = packets;
@@ -2937,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2937 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); 2938 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
2938} 2939}
2939 2940
2940static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 2941static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2941{ 2942{
2942 const struct inet_connection_sock *icsk = inet_csk(sk); 2943 const struct inet_connection_sock *icsk = inet_csk(sk);
2943 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight); 2944
2945 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
2944 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2946 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2945} 2947}
2946 2948
@@ -3363,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3363 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3365 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3364 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3366 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3365 bool is_dupack = false; 3367 bool is_dupack = false;
3366 u32 prior_in_flight;
3367 u32 prior_fackets; 3368 u32 prior_fackets;
3368 int prior_packets = tp->packets_out; 3369 int prior_packets = tp->packets_out;
3369 const int prior_unsacked = tp->packets_out - tp->sacked_out; 3370 const int prior_unsacked = tp->packets_out - tp->sacked_out;
@@ -3396,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3396 flag |= FLAG_SND_UNA_ADVANCED; 3397 flag |= FLAG_SND_UNA_ADVANCED;
3397 3398
3398 prior_fackets = tp->fackets_out; 3399 prior_fackets = tp->fackets_out;
3399 prior_in_flight = tcp_packets_in_flight(tp);
3400 3400
3401 /* ts_recent update must be made after we are sure that the packet 3401 /* ts_recent update must be made after we are sure that the packet
3402 * is in window. 3402 * is in window.
@@ -3451,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3451 3451
3452 /* Advance cwnd if state allows */ 3452 /* Advance cwnd if state allows */
3453 if (tcp_may_raise_cwnd(sk, flag)) 3453 if (tcp_may_raise_cwnd(sk, flag))
3454 tcp_cong_avoid(sk, ack, acked, prior_in_flight); 3454 tcp_cong_avoid(sk, ack, acked);
3455 3455
3456 if (tcp_ack_is_dubious(sk, flag)) { 3456 if (tcp_ack_is_dubious(sk, flag)) {
3457 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3457 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@@ -4702,28 +4702,6 @@ static int tcp_prune_queue(struct sock *sk)
4702 return -1; 4702 return -1;
4703} 4703}
4704 4704
4705/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
4706 * As additional protections, we do not touch cwnd in retransmission phases,
4707 * and if application hit its sndbuf limit recently.
4708 */
4709void tcp_cwnd_application_limited(struct sock *sk)
4710{
4711 struct tcp_sock *tp = tcp_sk(sk);
4712
4713 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
4714 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
4715 /* Limited by application or receiver window. */
4716 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
4717 u32 win_used = max(tp->snd_cwnd_used, init_win);
4718 if (win_used < tp->snd_cwnd) {
4719 tp->snd_ssthresh = tcp_current_ssthresh(sk);
4720 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
4721 }
4722 tp->snd_cwnd_used = 0;
4723 }
4724 tp->snd_cwnd_stamp = tcp_time_stamp;
4725}
4726
4727static bool tcp_should_expand_sndbuf(const struct sock *sk) 4705static bool tcp_should_expand_sndbuf(const struct sock *sk)
4728{ 4706{
4729 const struct tcp_sock *tp = tcp_sk(sk); 4707 const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 438f3b95143d..77cccda1ad0c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -336,8 +336,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
336 const int code = icmp_hdr(icmp_skb)->code; 336 const int code = icmp_hdr(icmp_skb)->code;
337 struct sock *sk; 337 struct sock *sk;
338 struct sk_buff *skb; 338 struct sk_buff *skb;
339 struct request_sock *req; 339 struct request_sock *fastopen;
340 __u32 seq; 340 __u32 seq, snd_una;
341 __u32 remaining; 341 __u32 remaining;
342 int err; 342 int err;
343 struct net *net = dev_net(icmp_skb->dev); 343 struct net *net = dev_net(icmp_skb->dev);
@@ -378,12 +378,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
378 378
379 icsk = inet_csk(sk); 379 icsk = inet_csk(sk);
380 tp = tcp_sk(sk); 380 tp = tcp_sk(sk);
381 req = tp->fastopen_rsk;
382 seq = ntohl(th->seq); 381 seq = ntohl(th->seq);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
383 if (sk->sk_state != TCP_LISTEN && 385 if (sk->sk_state != TCP_LISTEN &&
384 !between(seq, tp->snd_una, tp->snd_nxt) && 386 !between(seq, snd_una, tp->snd_nxt)) {
385 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
386 /* For a Fast Open socket, allow seq to be snt_isn. */
387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
388 goto out; 388 goto out;
389 } 389 }
@@ -426,11 +426,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) 426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 break; 427 break;
428 if (seq != tp->snd_una || !icsk->icsk_retransmits || 428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 !icsk->icsk_backoff) 429 !icsk->icsk_backoff || fastopen)
430 break; 430 break;
431 431
432 /* XXX (TFO) - revisit the following logic for TFO */
433
434 if (sock_owned_by_user(sk)) 432 if (sock_owned_by_user(sk))
435 break; 433 break;
436 434
@@ -462,14 +460,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
462 goto out; 460 goto out;
463 } 461 }
464 462
465 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
466 * than following the TCP_SYN_RECV case and closing the socket,
467 * we ignore the ICMP error and keep trying like a fully established
468 * socket. Is this the right thing to do?
469 */
470 if (req && req->sk == NULL)
471 goto out;
472
473 switch (sk->sk_state) { 463 switch (sk->sk_state) {
474 struct request_sock *req, **prev; 464 struct request_sock *req, **prev;
475 case TCP_LISTEN: 465 case TCP_LISTEN:
@@ -502,10 +492,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
502 goto out; 492 goto out;
503 493
504 case TCP_SYN_SENT: 494 case TCP_SYN_SENT:
505 case TCP_SYN_RECV: /* Cannot happen. 495 case TCP_SYN_RECV:
506 It can f.e. if SYNs crossed, 496 /* Only in fast or simultaneous open. If a fast open socket is
507 or Fast Open. 497 * is already accepted it is treated as a connected one below.
508 */ 498 */
499 if (fastopen && fastopen->sk == NULL)
500 break;
501
509 if (!sock_owned_by_user(sk)) { 502 if (!sock_owned_by_user(sk)) {
510 sk->sk_err = err; 503 sk->sk_err = err;
511 504
@@ -822,7 +815,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
822 */ 815 */
823static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 816static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
824 struct request_sock *req, 817 struct request_sock *req,
825 u16 queue_mapping) 818 u16 queue_mapping,
819 struct tcp_fastopen_cookie *foc)
826{ 820{
827 const struct inet_request_sock *ireq = inet_rsk(req); 821 const struct inet_request_sock *ireq = inet_rsk(req);
828 struct flowi4 fl4; 822 struct flowi4 fl4;
@@ -833,7 +827,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 827 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 return -1; 828 return -1;
835 829
836 skb = tcp_make_synack(sk, dst, req, NULL); 830 skb = tcp_make_synack(sk, dst, req, foc);
837 831
838 if (skb) { 832 if (skb) {
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 833 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -852,7 +846,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
852 846
853static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) 847static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
854{ 848{
855 int res = tcp_v4_send_synack(sk, NULL, req, 0); 849 int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
856 850
857 if (!res) { 851 if (!res) {
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 852 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -1260,187 +1254,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1260}; 1254};
1261#endif 1255#endif
1262 1256
1263static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1264 struct request_sock *req,
1265 struct tcp_fastopen_cookie *foc,
1266 struct tcp_fastopen_cookie *valid_foc)
1267{
1268 bool skip_cookie = false;
1269 struct fastopen_queue *fastopenq;
1270
1271 if (likely(!fastopen_cookie_present(foc))) {
1272 /* See include/net/tcp.h for the meaning of these knobs */
1273 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1274 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1275 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1276 skip_cookie = true; /* no cookie to validate */
1277 else
1278 return false;
1279 }
1280 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1281 /* A FO option is present; bump the counter. */
1282 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1283
1284 /* Make sure the listener has enabled fastopen, and we don't
1285 * exceed the max # of pending TFO requests allowed before trying
1286 * to validating the cookie in order to avoid burning CPU cycles
1287 * unnecessarily.
1288 *
1289 * XXX (TFO) - The implication of checking the max_qlen before
1290 * processing a cookie request is that clients can't differentiate
1291 * between qlen overflow causing Fast Open to be disabled
1292 * temporarily vs a server not supporting Fast Open at all.
1293 */
1294 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1295 fastopenq == NULL || fastopenq->max_qlen == 0)
1296 return false;
1297
1298 if (fastopenq->qlen >= fastopenq->max_qlen) {
1299 struct request_sock *req1;
1300 spin_lock(&fastopenq->lock);
1301 req1 = fastopenq->rskq_rst_head;
1302 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1303 spin_unlock(&fastopenq->lock);
1304 NET_INC_STATS_BH(sock_net(sk),
1305 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1306 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1307 foc->len = -1;
1308 return false;
1309 }
1310 fastopenq->rskq_rst_head = req1->dl_next;
1311 fastopenq->qlen--;
1312 spin_unlock(&fastopenq->lock);
1313 reqsk_free(req1);
1314 }
1315 if (skip_cookie) {
1316 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1317 return true;
1318 }
1319
1320 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1321 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1322 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1323 ip_hdr(skb)->daddr, valid_foc);
1324 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1325 memcmp(&foc->val[0], &valid_foc->val[0],
1326 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1327 return false;
1328 valid_foc->len = -1;
1329 }
1330 /* Acknowledge the data received from the peer. */
1331 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1332 return true;
1333 } else if (foc->len == 0) { /* Client requesting a cookie */
1334 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1335 ip_hdr(skb)->daddr, valid_foc);
1336 NET_INC_STATS_BH(sock_net(sk),
1337 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1338 } else {
1339 /* Client sent a cookie with wrong size. Treat it
1340 * the same as invalid and return a valid one.
1341 */
1342 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
1343 ip_hdr(skb)->daddr, valid_foc);
1344 }
1345 return false;
1346}
1347
1348static int tcp_v4_conn_req_fastopen(struct sock *sk,
1349 struct sk_buff *skb,
1350 struct sk_buff *skb_synack,
1351 struct request_sock *req)
1352{
1353 struct tcp_sock *tp = tcp_sk(sk);
1354 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1355 const struct inet_request_sock *ireq = inet_rsk(req);
1356 struct sock *child;
1357 int err;
1358
1359 req->num_retrans = 0;
1360 req->num_timeout = 0;
1361 req->sk = NULL;
1362
1363 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1364 if (child == NULL) {
1365 NET_INC_STATS_BH(sock_net(sk),
1366 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1367 kfree_skb(skb_synack);
1368 return -1;
1369 }
1370 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1371 ireq->ir_rmt_addr, ireq->opt);
1372 err = net_xmit_eval(err);
1373 if (!err)
1374 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1375 /* XXX (TFO) - is it ok to ignore error and continue? */
1376
1377 spin_lock(&queue->fastopenq->lock);
1378 queue->fastopenq->qlen++;
1379 spin_unlock(&queue->fastopenq->lock);
1380
1381 /* Initialize the child socket. Have to fix some values to take
1382 * into account the child is a Fast Open socket and is created
1383 * only out of the bits carried in the SYN packet.
1384 */
1385 tp = tcp_sk(child);
1386
1387 tp->fastopen_rsk = req;
1388 /* Do a hold on the listner sk so that if the listener is being
1389 * closed, the child that has been accepted can live on and still
1390 * access listen_lock.
1391 */
1392 sock_hold(sk);
1393 tcp_rsk(req)->listener = sk;
1394
1395 /* RFC1323: The window in SYN & SYN/ACK segments is never
1396 * scaled. So correct it appropriately.
1397 */
1398 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1399
1400 /* Activate the retrans timer so that SYNACK can be retransmitted.
1401 * The request socket is not added to the SYN table of the parent
1402 * because it's been added to the accept queue directly.
1403 */
1404 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1405 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1406
1407 /* Add the child socket directly into the accept queue */
1408 inet_csk_reqsk_queue_add(sk, req, child);
1409
1410 /* Now finish processing the fastopen child socket. */
1411 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1412 tcp_init_congestion_control(child);
1413 tcp_mtup_init(child);
1414 tcp_init_metrics(child);
1415 tcp_init_buffer_space(child);
1416
1417 /* Queue the data carried in the SYN packet. We need to first
1418 * bump skb's refcnt because the caller will attempt to free it.
1419 *
1420 * XXX (TFO) - we honor a zero-payload TFO request for now.
1421 * (Any reason not to?)
1422 */
1423 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1424 /* Don't queue the skb if there is no payload in SYN.
1425 * XXX (TFO) - How about SYN+FIN?
1426 */
1427 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1428 } else {
1429 skb = skb_get(skb);
1430 skb_dst_drop(skb);
1431 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1432 skb_set_owner_r(skb, child);
1433 __skb_queue_tail(&child->sk_receive_queue, skb);
1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435 tp->syn_data_acked = 1;
1436 }
1437 sk->sk_data_ready(sk);
1438 bh_unlock_sock(child);
1439 sock_put(child);
1440 WARN_ON(req->sk == NULL);
1441 return 0;
1442}
1443
1444int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1257int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1445{ 1258{
1446 struct tcp_options_received tmp_opt; 1259 struct tcp_options_received tmp_opt;
@@ -1451,12 +1264,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1451 __be32 saddr = ip_hdr(skb)->saddr; 1264 __be32 saddr = ip_hdr(skb)->saddr;
1452 __be32 daddr = ip_hdr(skb)->daddr; 1265 __be32 daddr = ip_hdr(skb)->daddr;
1453 __u32 isn = TCP_SKB_CB(skb)->when; 1266 __u32 isn = TCP_SKB_CB(skb)->when;
1454 bool want_cookie = false; 1267 bool want_cookie = false, fastopen;
1455 struct flowi4 fl4; 1268 struct flowi4 fl4;
1456 struct tcp_fastopen_cookie foc = { .len = -1 }; 1269 struct tcp_fastopen_cookie foc = { .len = -1 };
1457 struct tcp_fastopen_cookie valid_foc = { .len = -1 }; 1270 int err;
1458 struct sk_buff *skb_synack;
1459 int do_fastopen;
1460 1271
1461 /* Never answer to SYNs send to broadcast or multicast */ 1272 /* Never answer to SYNs send to broadcast or multicast */
1462 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1273 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1507,6 +1318,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1507 ireq->ir_rmt_addr = saddr; 1318 ireq->ir_rmt_addr = saddr;
1508 ireq->no_srccheck = inet_sk(sk)->transparent; 1319 ireq->no_srccheck = inet_sk(sk)->transparent;
1509 ireq->opt = tcp_v4_save_options(skb); 1320 ireq->opt = tcp_v4_save_options(skb);
1321 ireq->ir_mark = inet_request_mark(sk, skb);
1510 1322
1511 if (security_inet_conn_request(sk, skb, req)) 1323 if (security_inet_conn_request(sk, skb, req))
1512 goto drop_and_free; 1324 goto drop_and_free;
@@ -1555,52 +1367,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1555 1367
1556 isn = tcp_v4_init_sequence(skb); 1368 isn = tcp_v4_init_sequence(skb);
1557 } 1369 }
1558 tcp_rsk(req)->snt_isn = isn; 1370 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1559
1560 if (dst == NULL) {
1561 dst = inet_csk_route_req(sk, &fl4, req);
1562 if (dst == NULL)
1563 goto drop_and_free;
1564 }
1565 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1566
1567 /* We don't call tcp_v4_send_synack() directly because we need
1568 * to make sure a child socket can be created successfully before
1569 * sending back synack!
1570 *
1571 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1572 * (or better yet, call tcp_send_synack() in the child context
1573 * directly, but will have to fix bunch of other code first)
1574 * after syn_recv_sock() except one will need to first fix the
1575 * latter to remove its dependency on the current implementation
1576 * of tcp_v4_send_synack()->tcp_select_initial_window().
1577 */
1578 skb_synack = tcp_make_synack(sk, dst, req,
1579 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1580
1581 if (skb_synack) {
1582 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1583 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1584 } else
1585 goto drop_and_free; 1371 goto drop_and_free;
1586 1372
1587 if (likely(!do_fastopen)) { 1373 tcp_rsk(req)->snt_isn = isn;
1588 int err; 1374 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1589 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, 1375 tcp_openreq_init_rwin(req, sk, dst);
1590 ireq->ir_rmt_addr, ireq->opt); 1376 fastopen = !want_cookie &&
1591 err = net_xmit_eval(err); 1377 tcp_try_fastopen(sk, skb, req, &foc, dst);
1378 err = tcp_v4_send_synack(sk, dst, req,
1379 skb_get_queue_mapping(skb), &foc);
1380 if (!fastopen) {
1592 if (err || want_cookie) 1381 if (err || want_cookie)
1593 goto drop_and_free; 1382 goto drop_and_free;
1594 1383
1595 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1384 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1596 tcp_rsk(req)->listener = NULL; 1385 tcp_rsk(req)->listener = NULL;
1597 /* Add the request_sock to the SYN table */
1598 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1386 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1599 if (fastopen_cookie_present(&foc) && foc.len != 0) 1387 }
1600 NET_INC_STATS_BH(sock_net(sk),
1601 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1602 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1603 goto drop_and_free;
1604 1388
1605 return 0; 1389 return 0;
1606 1390
@@ -1744,28 +1528,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1744 return sk; 1528 return sk;
1745} 1529}
1746 1530
1747static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1748{
1749 const struct iphdr *iph = ip_hdr(skb);
1750
1751 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1752 if (!tcp_v4_check(skb->len, iph->saddr,
1753 iph->daddr, skb->csum)) {
1754 skb->ip_summed = CHECKSUM_UNNECESSARY;
1755 return 0;
1756 }
1757 }
1758
1759 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1760 skb->len, IPPROTO_TCP, 0);
1761
1762 if (skb->len <= 76) {
1763 return __skb_checksum_complete(skb);
1764 }
1765 return 0;
1766}
1767
1768
1769/* The socket must have it's spinlock held when we get 1531/* The socket must have it's spinlock held when we get
1770 * here. 1532 * here.
1771 * 1533 *
@@ -1960,7 +1722,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
1960 * Packet length and doff are validated by header prediction, 1722 * Packet length and doff are validated by header prediction,
1961 * provided case of th->doff==0 is eliminated. 1723 * provided case of th->doff==0 is eliminated.
1962 * So, we defer the checks. */ 1724 * So, we defer the checks. */
1963 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb)) 1725
1726 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1964 goto csum_error; 1727 goto csum_error;
1965 1728
1966 th = tcp_hdr(skb); 1729 th = tcp_hdr(skb);
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index c9aecae31327..1e70fa8fa793 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
115 * Will only call newReno CA when away from inference. 115 * Will only call newReno CA when away from inference.
116 * From TCP-LP's paper, this will be handled in additive increasement. 116 * From TCP-LP's paper, this will be handled in additive increasement.
117 */ 117 */
118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
119 u32 in_flight)
120{ 119{
121 struct lp *lp = inet_csk_ca(sk); 120 struct lp *lp = inet_csk_ca(sk);
122 121
123 if (!(lp->flag & LP_WITHIN_INF)) 122 if (!(lp->flag & LP_WITHIN_INF))
124 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 tcp_reno_cong_avoid(sk, ack, acked);
125} 124}
126 125
127/** 126/**
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index dcaf72f10216..4fe041805989 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1159,10 +1159,7 @@ static void __net_exit tcp_net_metrics_exit(struct net *net)
1159 tm = next; 1159 tm = next;
1160 } 1160 }
1161 } 1161 }
1162 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) 1162 kvfree(net->ipv4.tcp_metrics_hash);
1163 vfree(net->ipv4.tcp_metrics_hash);
1164 else
1165 kfree(net->ipv4.tcp_metrics_hash);
1166} 1163}
1167 1164
1168static __net_initdata struct pernet_operations tcp_net_metrics_ops = { 1165static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 05c1b155251d..e68e0d4af6c9 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -362,6 +362,37 @@ void tcp_twsk_destructor(struct sock *sk)
362} 362}
363EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 363EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
364 364
365void tcp_openreq_init_rwin(struct request_sock *req,
366 struct sock *sk, struct dst_entry *dst)
367{
368 struct inet_request_sock *ireq = inet_rsk(req);
369 struct tcp_sock *tp = tcp_sk(sk);
370 __u8 rcv_wscale;
371 int mss = dst_metric_advmss(dst);
372
373 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
374 mss = tp->rx_opt.user_mss;
375
376 /* Set this up on the first call only */
377 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
378
379 /* limit the window selection if the user enforce a smaller rx buffer */
380 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
381 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
382 req->window_clamp = tcp_full_space(sk);
383
384 /* tcp_full_space because it is guaranteed to be the first packet */
385 tcp_select_initial_window(tcp_full_space(sk),
386 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
387 &req->rcv_wnd,
388 &req->window_clamp,
389 ireq->wscale_ok,
390 &rcv_wscale,
391 dst_metric(dst, RTAX_INITRWND));
392 ireq->rcv_wscale = rcv_wscale;
393}
394EXPORT_SYMBOL(tcp_openreq_init_rwin);
395
365static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 396static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
366 struct request_sock *req) 397 struct request_sock *req)
367{ 398{
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b92b81718ca4..4e86c59ec7f7 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -57,10 +57,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
57 SKB_GSO_TCP_ECN | 57 SKB_GSO_TCP_ECN |
58 SKB_GSO_TCPV6 | 58 SKB_GSO_TCPV6 |
59 SKB_GSO_GRE | 59 SKB_GSO_GRE |
60 SKB_GSO_GRE_CSUM |
60 SKB_GSO_IPIP | 61 SKB_GSO_IPIP |
61 SKB_GSO_SIT | 62 SKB_GSO_SIT |
62 SKB_GSO_MPLS | 63 SKB_GSO_MPLS |
63 SKB_GSO_UDP_TUNNEL | 64 SKB_GSO_UDP_TUNNEL |
65 SKB_GSO_UDP_TUNNEL_CSUM |
64 0) || 66 0) ||
65 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 67 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
66 goto out; 68 goto out;
@@ -97,9 +99,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
97 th->check = newcheck; 99 th->check = newcheck;
98 100
99 if (skb->ip_summed != CHECKSUM_PARTIAL) 101 if (skb->ip_summed != CHECKSUM_PARTIAL)
100 th->check = 102 th->check = gso_make_checksum(skb, ~th->check);
101 csum_fold(csum_partial(skb_transport_header(skb),
102 thlen, skb->csum));
103 103
104 seq += mss; 104 seq += mss;
105 if (copy_destructor) { 105 if (copy_destructor) {
@@ -133,8 +133,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
133 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 133 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
134 (__force u32)delta)); 134 (__force u32)delta));
135 if (skb->ip_summed != CHECKSUM_PARTIAL) 135 if (skb->ip_summed != CHECKSUM_PARTIAL)
136 th->check = csum_fold(csum_partial(skb_transport_header(skb), 136 th->check = gso_make_checksum(skb, ~th->check);
137 thlen, skb->csum));
138out: 137out:
139 return segs; 138 return segs;
140} 139}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 12d6016bdd9a..ad7549f1d0ad 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -627,7 +627,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
627 if (unlikely(!ireq->tstamp_ok)) 627 if (unlikely(!ireq->tstamp_ok))
628 remaining -= TCPOLEN_SACKPERM_ALIGNED; 628 remaining -= TCPOLEN_SACKPERM_ALIGNED;
629 } 629 }
630 if (foc != NULL) { 630 if (foc != NULL && foc->len >= 0) {
631 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 631 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
632 need = (need + 3) & ~3U; /* Align to 32 bits */ 632 need = (need + 3) & ~3U; /* Align to 32 bits */
633 if (remaining >= need) { 633 if (remaining >= need) {
@@ -878,15 +878,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
878 BUG_ON(!skb || !tcp_skb_pcount(skb)); 878 BUG_ON(!skb || !tcp_skb_pcount(skb));
879 879
880 if (clone_it) { 880 if (clone_it) {
881 const struct sk_buff *fclone = skb + 1;
882
883 skb_mstamp_get(&skb->skb_mstamp); 881 skb_mstamp_get(&skb->skb_mstamp);
884 882
885 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
886 fclone->fclone == SKB_FCLONE_CLONE))
887 NET_INC_STATS(sock_net(sk),
888 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
889
890 if (unlikely(skb_cloned(skb))) 883 if (unlikely(skb_cloned(skb)))
891 skb = pskb_copy(skb, gfp_mask); 884 skb = pskb_copy(skb, gfp_mask);
892 else 885 else
@@ -1081,7 +1074,7 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
1081 * Remember, these are still headerless SKBs at this point. 1074 * Remember, these are still headerless SKBs at this point.
1082 */ 1075 */
1083int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1076int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1084 unsigned int mss_now) 1077 unsigned int mss_now, gfp_t gfp)
1085{ 1078{
1086 struct tcp_sock *tp = tcp_sk(sk); 1079 struct tcp_sock *tp = tcp_sk(sk);
1087 struct sk_buff *buff; 1080 struct sk_buff *buff;
@@ -1096,11 +1089,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1096 if (nsize < 0) 1089 if (nsize < 0)
1097 nsize = 0; 1090 nsize = 0;
1098 1091
1099 if (skb_unclone(skb, GFP_ATOMIC)) 1092 if (skb_unclone(skb, gfp))
1100 return -ENOMEM; 1093 return -ENOMEM;
1101 1094
1102 /* Get a new skb... force flag on. */ 1095 /* Get a new skb... force flag on. */
1103 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1096 buff = sk_stream_alloc_skb(sk, nsize, gfp);
1104 if (buff == NULL) 1097 if (buff == NULL)
1105 return -ENOMEM; /* We'll just try again later. */ 1098 return -ENOMEM; /* We'll just try again later. */
1106 1099
@@ -1387,12 +1380,43 @@ unsigned int tcp_current_mss(struct sock *sk)
1387 return mss_now; 1380 return mss_now;
1388} 1381}
1389 1382
1390/* Congestion window validation. (RFC2861) */ 1383/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1391static void tcp_cwnd_validate(struct sock *sk) 1384 * As additional protections, we do not touch cwnd in retransmission phases,
1385 * and if application hit its sndbuf limit recently.
1386 */
1387static void tcp_cwnd_application_limited(struct sock *sk)
1388{
1389 struct tcp_sock *tp = tcp_sk(sk);
1390
1391 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1392 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1393 /* Limited by application or receiver window. */
1394 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1395 u32 win_used = max(tp->snd_cwnd_used, init_win);
1396 if (win_used < tp->snd_cwnd) {
1397 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1398 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1399 }
1400 tp->snd_cwnd_used = 0;
1401 }
1402 tp->snd_cwnd_stamp = tcp_time_stamp;
1403}
1404
1405static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1392{ 1406{
1393 struct tcp_sock *tp = tcp_sk(sk); 1407 struct tcp_sock *tp = tcp_sk(sk);
1394 1408
1395 if (tp->packets_out >= tp->snd_cwnd) { 1409 /* Track the maximum number of outstanding packets in each
1410 * window, and remember whether we were cwnd-limited then.
1411 */
1412 if (!before(tp->snd_una, tp->max_packets_seq) ||
1413 tp->packets_out > tp->max_packets_out) {
1414 tp->max_packets_out = tp->packets_out;
1415 tp->max_packets_seq = tp->snd_nxt;
1416 tp->is_cwnd_limited = is_cwnd_limited;
1417 }
1418
1419 if (tcp_is_cwnd_limited(sk)) {
1396 /* Network is feed fully. */ 1420 /* Network is feed fully. */
1397 tp->snd_cwnd_used = 0; 1421 tp->snd_cwnd_used = 0;
1398 tp->snd_cwnd_stamp = tcp_time_stamp; 1422 tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1601,7 +1625,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1601 1625
1602 /* All of a TSO frame must be composed of paged data. */ 1626 /* All of a TSO frame must be composed of paged data. */
1603 if (skb->len != skb->data_len) 1627 if (skb->len != skb->data_len)
1604 return tcp_fragment(sk, skb, len, mss_now); 1628 return tcp_fragment(sk, skb, len, mss_now, gfp);
1605 1629
1606 buff = sk_stream_alloc_skb(sk, 0, gfp); 1630 buff = sk_stream_alloc_skb(sk, 0, gfp);
1607 if (unlikely(buff == NULL)) 1631 if (unlikely(buff == NULL))
@@ -1644,7 +1668,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1644 * 1668 *
1645 * This algorithm is from John Heffner. 1669 * This algorithm is from John Heffner.
1646 */ 1670 */
1647static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1671static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1672 bool *is_cwnd_limited)
1648{ 1673{
1649 struct tcp_sock *tp = tcp_sk(sk); 1674 struct tcp_sock *tp = tcp_sk(sk);
1650 const struct inet_connection_sock *icsk = inet_csk(sk); 1675 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1708,6 +1733,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1708 if (!tp->tso_deferred) 1733 if (!tp->tso_deferred)
1709 tp->tso_deferred = 1 | (jiffies << 1); 1734 tp->tso_deferred = 1 | (jiffies << 1);
1710 1735
1736 if (cong_win < send_win && cong_win < skb->len)
1737 *is_cwnd_limited = true;
1738
1711 return true; 1739 return true;
1712 1740
1713send_now: 1741send_now:
@@ -1868,6 +1896,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1868 unsigned int tso_segs, sent_pkts; 1896 unsigned int tso_segs, sent_pkts;
1869 int cwnd_quota; 1897 int cwnd_quota;
1870 int result; 1898 int result;
1899 bool is_cwnd_limited = false;
1871 1900
1872 sent_pkts = 0; 1901 sent_pkts = 0;
1873 1902
@@ -1892,6 +1921,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1892 1921
1893 cwnd_quota = tcp_cwnd_test(tp, skb); 1922 cwnd_quota = tcp_cwnd_test(tp, skb);
1894 if (!cwnd_quota) { 1923 if (!cwnd_quota) {
1924 is_cwnd_limited = true;
1895 if (push_one == 2) 1925 if (push_one == 2)
1896 /* Force out a loss probe pkt. */ 1926 /* Force out a loss probe pkt. */
1897 cwnd_quota = 1; 1927 cwnd_quota = 1;
@@ -1908,7 +1938,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1908 nonagle : TCP_NAGLE_PUSH)))) 1938 nonagle : TCP_NAGLE_PUSH))))
1909 break; 1939 break;
1910 } else { 1940 } else {
1911 if (!push_one && tcp_tso_should_defer(sk, skb)) 1941 if (!push_one &&
1942 tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
1912 break; 1943 break;
1913 } 1944 }
1914 1945
@@ -1975,7 +2006,7 @@ repair:
1975 /* Send one loss probe per tail loss episode. */ 2006 /* Send one loss probe per tail loss episode. */
1976 if (push_one != 2) 2007 if (push_one != 2)
1977 tcp_schedule_loss_probe(sk); 2008 tcp_schedule_loss_probe(sk);
1978 tcp_cwnd_validate(sk); 2009 tcp_cwnd_validate(sk, is_cwnd_limited);
1979 return false; 2010 return false;
1980 } 2011 }
1981 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); 2012 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
@@ -2039,6 +2070,25 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2039 return true; 2070 return true;
2040} 2071}
2041 2072
2073/* Thanks to skb fast clones, we can detect if a prior transmit of
2074 * a packet is still in a qdisc or driver queue.
2075 * In this case, there is very little point doing a retransmit !
2076 * Note: This is called from BH context only.
2077 */
2078static bool skb_still_in_host_queue(const struct sock *sk,
2079 const struct sk_buff *skb)
2080{
2081 const struct sk_buff *fclone = skb + 1;
2082
2083 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
2084 fclone->fclone == SKB_FCLONE_CLONE)) {
2085 NET_INC_STATS_BH(sock_net(sk),
2086 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2087 return true;
2088 }
2089 return false;
2090}
2091
2042/* When probe timeout (PTO) fires, send a new segment if one exists, else 2092/* When probe timeout (PTO) fires, send a new segment if one exists, else
2043 * retransmit the last segment. 2093 * retransmit the last segment.
2044 */ 2094 */
@@ -2064,12 +2114,16 @@ void tcp_send_loss_probe(struct sock *sk)
2064 if (WARN_ON(!skb)) 2114 if (WARN_ON(!skb))
2065 goto rearm_timer; 2115 goto rearm_timer;
2066 2116
2117 if (skb_still_in_host_queue(sk, skb))
2118 goto rearm_timer;
2119
2067 pcount = tcp_skb_pcount(skb); 2120 pcount = tcp_skb_pcount(skb);
2068 if (WARN_ON(!pcount)) 2121 if (WARN_ON(!pcount))
2069 goto rearm_timer; 2122 goto rearm_timer;
2070 2123
2071 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 2124 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2072 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 2125 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
2126 GFP_ATOMIC)))
2073 goto rearm_timer; 2127 goto rearm_timer;
2074 skb = tcp_write_queue_tail(sk); 2128 skb = tcp_write_queue_tail(sk);
2075 } 2129 }
@@ -2385,6 +2439,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2385 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2439 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2386 return -EAGAIN; 2440 return -EAGAIN;
2387 2441
2442 if (skb_still_in_host_queue(sk, skb))
2443 return -EBUSY;
2444
2388 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2445 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2389 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2446 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2390 BUG(); 2447 BUG();
@@ -2407,7 +2464,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2407 return -EAGAIN; 2464 return -EAGAIN;
2408 2465
2409 if (skb->len > cur_mss) { 2466 if (skb->len > cur_mss) {
2410 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2467 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
2411 return -ENOMEM; /* We'll try again later. */ 2468 return -ENOMEM; /* We'll try again later. */
2412 } else { 2469 } else {
2413 int oldpcount = tcp_skb_pcount(skb); 2470 int oldpcount = tcp_skb_pcount(skb);
@@ -2478,7 +2535,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2478 * see tcp_input.c tcp_sacktag_write_queue(). 2535 * see tcp_input.c tcp_sacktag_write_queue().
2479 */ 2536 */
2480 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2537 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2481 } else { 2538 } else if (err != -EBUSY) {
2482 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2539 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2483 } 2540 }
2484 return err; 2541 return err;
@@ -2756,27 +2813,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2756 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2813 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2757 mss = tp->rx_opt.user_mss; 2814 mss = tp->rx_opt.user_mss;
2758 2815
2759 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2760 __u8 rcv_wscale;
2761 /* Set this up on the first call only */
2762 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2763
2764 /* limit the window selection if the user enforce a smaller rx buffer */
2765 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2766 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2767 req->window_clamp = tcp_full_space(sk);
2768
2769 /* tcp_full_space because it is guaranteed to be the first packet */
2770 tcp_select_initial_window(tcp_full_space(sk),
2771 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2772 &req->rcv_wnd,
2773 &req->window_clamp,
2774 ireq->wscale_ok,
2775 &rcv_wscale,
2776 dst_metric(dst, RTAX_INITRWND));
2777 ireq->rcv_wscale = rcv_wscale;
2778 }
2779
2780 memset(&opts, 0, sizeof(opts)); 2816 memset(&opts, 0, sizeof(opts));
2781#ifdef CONFIG_SYN_COOKIES 2817#ifdef CONFIG_SYN_COOKIES
2782 if (unlikely(req->cookie_ts)) 2818 if (unlikely(req->cookie_ts))
@@ -3209,7 +3245,7 @@ int tcp_write_wakeup(struct sock *sk)
3209 skb->len > mss) { 3245 skb->len > mss) {
3210 seg_size = min(seg_size, mss); 3246 seg_size = min(seg_size, mss);
3211 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3247 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3212 if (tcp_fragment(sk, skb, seg_size, mss)) 3248 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
3213 return -1; 3249 return -1;
3214 } else if (!tcp_skb_pcount(skb)) 3250 } else if (!tcp_skb_pcount(skb))
3215 tcp_set_skb_tso_segs(sk, skb, mss); 3251 tcp_set_skb_tso_segs(sk, skb, mss);
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 0ac50836da4d..8250949b8853 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,12 +15,11 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked, 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
19 u32 in_flight)
20{ 19{
21 struct tcp_sock *tp = tcp_sk(sk); 20 struct tcp_sock *tp = tcp_sk(sk);
22 21
23 if (!tcp_is_cwnd_limited(sk, in_flight)) 22 if (!tcp_is_cwnd_limited(sk))
24 return; 23 return;
25 24
26 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 48539fff6357..9a5e05f27f4f 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1); 163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164} 164}
165 165
166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
167 u32 in_flight)
168{ 167{
169 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
170 struct vegas *vegas = inet_csk_ca(sk); 169 struct vegas *vegas = inet_csk_ca(sk);
171 170
172 if (!vegas->doing_vegas_now) { 171 if (!vegas->doing_vegas_now) {
173 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 172 tcp_reno_cong_avoid(sk, ack, acked);
174 return; 173 return;
175 } 174 }
176 175
@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
195 /* We don't have enough RTT samples to do the Vegas 194 /* We don't have enough RTT samples to do the Vegas
196 * calculation, so we'll behave like Reno. 195 * calculation, so we'll behave like Reno.
197 */ 196 */
198 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 197 tcp_reno_cong_avoid(sk, ack, acked);
199 } else { 198 } else {
200 u32 rtt, diff; 199 u32 rtt, diff;
201 u64 target_cwnd; 200 u64 target_cwnd;
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 1b8e28fcd7e1..27b9825753d1 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
114 tcp_veno_init(sk); 114 tcp_veno_init(sk);
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
118 u32 in_flight)
119{ 118{
120 struct tcp_sock *tp = tcp_sk(sk); 119 struct tcp_sock *tp = tcp_sk(sk);
121 struct veno *veno = inet_csk_ca(sk); 120 struct veno *veno = inet_csk_ca(sk);
122 121
123 if (!veno->doing_veno_now) { 122 if (!veno->doing_veno_now) {
124 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 tcp_reno_cong_avoid(sk, ack, acked);
125 return; 124 return;
126 } 125 }
127 126
128 /* limited by applications */ 127 /* limited by applications */
129 if (!tcp_is_cwnd_limited(sk, in_flight)) 128 if (!tcp_is_cwnd_limited(sk))
130 return; 129 return;
131 130
132 /* We do the Veno calculations only if we got enough rtt samples */ 131 /* We do the Veno calculations only if we got enough rtt samples */
@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
134 /* We don't have enough rtt samples to do the Veno 133 /* We don't have enough rtt samples to do the Veno
135 * calculation, so we'll behave like Reno. 134 * calculation, so we'll behave like Reno.
136 */ 135 */
137 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 136 tcp_reno_cong_avoid(sk, ack, acked);
138 } else { 137 } else {
139 u64 target_cwnd; 138 u64 target_cwnd;
140 u32 rtt; 139 u32 rtt;
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 5ede0e727945..599b79b8eac0 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked, 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
73 u32 in_flight)
74{ 73{
75 struct tcp_sock *tp = tcp_sk(sk); 74 struct tcp_sock *tp = tcp_sk(sk);
76 struct yeah *yeah = inet_csk_ca(sk); 75 struct yeah *yeah = inet_csk_ca(sk);
77 76
78 if (!tcp_is_cwnd_limited(sk, in_flight)) 77 if (!tcp_is_cwnd_limited(sk))
79 return; 78 return;
80 79
81 if (tp->snd_cwnd <= tp->snd_ssthresh) 80 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4468e1adc094..185ed3e59802 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -246,7 +246,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
246 do { 246 do {
247 if (low <= snum && snum <= high && 247 if (low <= snum && snum <= high &&
248 !test_bit(snum >> udptable->log, bitmap) && 248 !test_bit(snum >> udptable->log, bitmap) &&
249 !inet_is_reserved_local_port(snum)) 249 !inet_is_local_reserved_port(net, snum))
250 goto found; 250 goto found;
251 snum += rand; 251 snum += rand;
252 } while (snum != first); 252 } while (snum != first);
@@ -727,13 +727,12 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
727void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) 727void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
728{ 728{
729 struct udphdr *uh = udp_hdr(skb); 729 struct udphdr *uh = udp_hdr(skb);
730 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
731 int offset = skb_transport_offset(skb); 730 int offset = skb_transport_offset(skb);
732 int len = skb->len - offset; 731 int len = skb->len - offset;
733 int hlen = len; 732 int hlen = len;
734 __wsum csum = 0; 733 __wsum csum = 0;
735 734
736 if (!frags) { 735 if (!skb_has_frag_list(skb)) {
737 /* 736 /*
738 * Only one fragment on the socket. 737 * Only one fragment on the socket.
739 */ 738 */
@@ -742,15 +741,17 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
742 uh->check = ~csum_tcpudp_magic(src, dst, len, 741 uh->check = ~csum_tcpudp_magic(src, dst, len,
743 IPPROTO_UDP, 0); 742 IPPROTO_UDP, 0);
744 } else { 743 } else {
744 struct sk_buff *frags;
745
745 /* 746 /*
746 * HW-checksum won't work as there are two or more 747 * HW-checksum won't work as there are two or more
747 * fragments on the socket so that all csums of sk_buffs 748 * fragments on the socket so that all csums of sk_buffs
748 * should be together 749 * should be together
749 */ 750 */
750 do { 751 skb_walk_frags(skb, frags) {
751 csum = csum_add(csum, frags->csum); 752 csum = csum_add(csum, frags->csum);
752 hlen -= frags->len; 753 hlen -= frags->len;
753 } while ((frags = frags->next)); 754 }
754 755
755 csum = skb_checksum(skb, offset, hlen, csum); 756 csum = skb_checksum(skb, offset, hlen, csum);
756 skb->ip_summed = CHECKSUM_NONE; 757 skb->ip_summed = CHECKSUM_NONE;
@@ -762,6 +763,43 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
762} 763}
763EXPORT_SYMBOL_GPL(udp4_hwcsum); 764EXPORT_SYMBOL_GPL(udp4_hwcsum);
764 765
766/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
767 * for the simple case like when setting the checksum for a UDP tunnel.
768 */
769void udp_set_csum(bool nocheck, struct sk_buff *skb,
770 __be32 saddr, __be32 daddr, int len)
771{
772 struct udphdr *uh = udp_hdr(skb);
773
774 if (nocheck)
775 uh->check = 0;
776 else if (skb_is_gso(skb))
777 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
778 else if (skb_dst(skb) && skb_dst(skb)->dev &&
779 (skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
780
781 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
782
783 skb->ip_summed = CHECKSUM_PARTIAL;
784 skb->csum_start = skb_transport_header(skb) - skb->head;
785 skb->csum_offset = offsetof(struct udphdr, check);
786 uh->check = ~udp_v4_check(len, saddr, daddr, 0);
787 } else {
788 __wsum csum;
789
790 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
791
792 uh->check = 0;
793 csum = skb_checksum(skb, 0, len, 0);
794 uh->check = udp_v4_check(len, saddr, daddr, csum);
795 if (uh->check == 0)
796 uh->check = CSUM_MANGLED_0;
797
798 skb->ip_summed = CHECKSUM_UNNECESSARY;
799 }
800}
801EXPORT_SYMBOL(udp_set_csum);
802
765static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) 803static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
766{ 804{
767 struct sock *sk = skb->sk; 805 struct sock *sk = skb->sk;
@@ -785,7 +823,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
785 if (is_udplite) /* UDP-Lite */ 823 if (is_udplite) /* UDP-Lite */
786 csum = udplite_csum(skb); 824 csum = udplite_csum(skb);
787 825
788 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ 826 else if (sk->sk_no_check_tx) { /* UDP csum disabled */
789 827
790 skb->ip_summed = CHECKSUM_NONE; 828 skb->ip_summed = CHECKSUM_NONE;
791 goto send; 829 goto send;
@@ -1495,6 +1533,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1495 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 1533 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
1496 int ret; 1534 int ret;
1497 1535
1536 /* Verify checksum before giving to encap */
1537 if (udp_lib_checksum_complete(skb))
1538 goto csum_error;
1539
1498 ret = encap_rcv(sk, skb); 1540 ret = encap_rcv(sk, skb);
1499 if (ret <= 0) { 1541 if (ret <= 0) {
1500 UDP_INC_STATS_BH(sock_net(sk), 1542 UDP_INC_STATS_BH(sock_net(sk),
@@ -1672,7 +1714,6 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1672static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, 1714static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1673 int proto) 1715 int proto)
1674{ 1716{
1675 const struct iphdr *iph;
1676 int err; 1717 int err;
1677 1718
1678 UDP_SKB_CB(skb)->partial_cov = 0; 1719 UDP_SKB_CB(skb)->partial_cov = 0;
@@ -1684,22 +1725,8 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1684 return err; 1725 return err;
1685 } 1726 }
1686 1727
1687 iph = ip_hdr(skb); 1728 return skb_checksum_init_zero_check(skb, proto, uh->check,
1688 if (uh->check == 0) { 1729 inet_compute_pseudo);
1689 skb->ip_summed = CHECKSUM_UNNECESSARY;
1690 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1691 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1692 proto, skb->csum))
1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1694 }
1695 if (!skb_csum_unnecessary(skb))
1696 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1697 skb->len, proto, 0);
1698 /* Probably, we should checksum udp header (it should be in cache
1699 * in any case) and data in tiny packets (< rx copybreak).
1700 */
1701
1702 return 0;
1703} 1730}
1704 1731
1705/* 1732/*
@@ -1886,7 +1913,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
1886 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); 1913 unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
1887 unsigned int slot2 = hash2 & udp_table.mask; 1914 unsigned int slot2 = hash2 & udp_table.mask;
1888 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1915 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1889 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr) 1916 INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
1890 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1917 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1891 1918
1892 rcu_read_lock(); 1919 rcu_read_lock();
@@ -1979,7 +2006,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1979 int (*push_pending_frames)(struct sock *)) 2006 int (*push_pending_frames)(struct sock *))
1980{ 2007{
1981 struct udp_sock *up = udp_sk(sk); 2008 struct udp_sock *up = udp_sk(sk);
1982 int val; 2009 int val, valbool;
1983 int err = 0; 2010 int err = 0;
1984 int is_udplite = IS_UDPLITE(sk); 2011 int is_udplite = IS_UDPLITE(sk);
1985 2012
@@ -1989,6 +2016,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1989 if (get_user(val, (int __user *)optval)) 2016 if (get_user(val, (int __user *)optval))
1990 return -EFAULT; 2017 return -EFAULT;
1991 2018
2019 valbool = val ? 1 : 0;
2020
1992 switch (optname) { 2021 switch (optname) {
1993 case UDP_CORK: 2022 case UDP_CORK:
1994 if (val != 0) { 2023 if (val != 0) {
@@ -2018,6 +2047,14 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2018 } 2047 }
2019 break; 2048 break;
2020 2049
2050 case UDP_NO_CHECK6_TX:
2051 up->no_check6_tx = valbool;
2052 break;
2053
2054 case UDP_NO_CHECK6_RX:
2055 up->no_check6_rx = valbool;
2056 break;
2057
2021 /* 2058 /*
2022 * UDP-Lite's partial checksum coverage (RFC 3828). 2059 * UDP-Lite's partial checksum coverage (RFC 3828).
2023 */ 2060 */
@@ -2100,6 +2137,14 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
2100 val = up->encap_type; 2137 val = up->encap_type;
2101 break; 2138 break;
2102 2139
2140 case UDP_NO_CHECK6_TX:
2141 val = up->no_check6_tx;
2142 break;
2143
2144 case UDP_NO_CHECK6_RX:
2145 val = up->no_check6_rx;
2146 break;
2147
2103 /* The following two cannot be changed on UDP sockets, the return is 2148 /* The following two cannot be changed on UDP sockets, the return is
2104 * always 0 (which corresponds to the full checksum coverage of UDP). */ 2149 * always 0 (which corresponds to the full checksum coverage of UDP). */
2105 case UDPLITE_SEND_CSCOV: 2150 case UDPLITE_SEND_CSCOV:
@@ -2484,7 +2529,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2484 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 2529 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2485 __be16 protocol = skb->protocol; 2530 __be16 protocol = skb->protocol;
2486 netdev_features_t enc_features; 2531 netdev_features_t enc_features;
2487 int outer_hlen; 2532 int udp_offset, outer_hlen;
2533 unsigned int oldlen;
2534 bool need_csum;
2535
2536 oldlen = (u16)~skb->len;
2488 2537
2489 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 2538 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
2490 goto out; 2539 goto out;
@@ -2496,6 +2545,10 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2496 skb->mac_len = skb_inner_network_offset(skb); 2545 skb->mac_len = skb_inner_network_offset(skb);
2497 skb->protocol = htons(ETH_P_TEB); 2546 skb->protocol = htons(ETH_P_TEB);
2498 2547
2548 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
2549 if (need_csum)
2550 skb->encap_hdr_csum = 1;
2551
2499 /* segment inner packet. */ 2552 /* segment inner packet. */
2500 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 2553 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2501 segs = skb_mac_gso_segment(skb, enc_features); 2554 segs = skb_mac_gso_segment(skb, enc_features);
@@ -2506,10 +2559,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2506 } 2559 }
2507 2560
2508 outer_hlen = skb_tnl_header_len(skb); 2561 outer_hlen = skb_tnl_header_len(skb);
2562 udp_offset = outer_hlen - tnl_hlen;
2509 skb = segs; 2563 skb = segs;
2510 do { 2564 do {
2511 struct udphdr *uh; 2565 struct udphdr *uh;
2512 int udp_offset = outer_hlen - tnl_hlen; 2566 int len;
2513 2567
2514 skb_reset_inner_headers(skb); 2568 skb_reset_inner_headers(skb);
2515 skb->encapsulation = 1; 2569 skb->encapsulation = 1;
@@ -2520,31 +2574,20 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2520 skb_reset_mac_header(skb); 2574 skb_reset_mac_header(skb);
2521 skb_set_network_header(skb, mac_len); 2575 skb_set_network_header(skb, mac_len);
2522 skb_set_transport_header(skb, udp_offset); 2576 skb_set_transport_header(skb, udp_offset);
2577 len = skb->len - udp_offset;
2523 uh = udp_hdr(skb); 2578 uh = udp_hdr(skb);
2524 uh->len = htons(skb->len - udp_offset); 2579 uh->len = htons(len);
2525
2526 /* csum segment if tunnel sets skb with csum. */
2527 if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) {
2528 struct iphdr *iph = ip_hdr(skb);
2529 2580
2530 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2581 if (need_csum) {
2531 skb->len - udp_offset, 2582 __be32 delta = htonl(oldlen + len);
2532 IPPROTO_UDP, 0);
2533 uh->check = csum_fold(skb_checksum(skb, udp_offset,
2534 skb->len - udp_offset, 0));
2535 if (uh->check == 0)
2536 uh->check = CSUM_MANGLED_0;
2537 2583
2538 } else if (protocol == htons(ETH_P_IPV6)) { 2584 uh->check = ~csum_fold((__force __wsum)
2539 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 2585 ((__force u32)uh->check +
2540 u32 len = skb->len - udp_offset; 2586 (__force u32)delta));
2587 uh->check = gso_make_checksum(skb, ~uh->check);
2541 2588
2542 uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2543 len, IPPROTO_UDP, 0);
2544 uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0));
2545 if (uh->check == 0) 2589 if (uh->check == 0)
2546 uh->check = CSUM_MANGLED_0; 2590 uh->check = CSUM_MANGLED_0;
2547 skb->ip_summed = CHECKSUM_NONE;
2548 } 2591 }
2549 2592
2550 skb->protocol = protocol; 2593 skb->protocol = protocol;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 88b4023ecfcf..546d2d439dda 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -56,7 +56,8 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
56 __wsum csum; 56 __wsum csum;
57 57
58 if (skb->encapsulation && 58 if (skb->encapsulation &&
59 skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { 59 (skb_shinfo(skb)->gso_type &
60 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
60 segs = skb_udp_tunnel_segment(skb, features); 61 segs = skb_udp_tunnel_segment(skb, features);
61 goto out; 62 goto out;
62 } 63 }
@@ -71,8 +72,10 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
71 72
72 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 73 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
73 SKB_GSO_UDP_TUNNEL | 74 SKB_GSO_UDP_TUNNEL |
75 SKB_GSO_UDP_TUNNEL_CSUM |
74 SKB_GSO_IPIP | 76 SKB_GSO_IPIP |
75 SKB_GSO_GRE | SKB_GSO_MPLS) || 77 SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
78 SKB_GSO_MPLS) ||
76 !(type & (SKB_GSO_UDP)))) 79 !(type & (SKB_GSO_UDP))))
77 goto out; 80 goto out;
78 81
@@ -197,6 +200,7 @@ unflush:
197 } 200 }
198 201
199 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 202 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
203 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
200 pp = uo_priv->offload->callbacks.gro_receive(head, skb); 204 pp = uo_priv->offload->callbacks.gro_receive(head, skb);
201 205
202out_unlock: 206out_unlock:
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 2c46acd4cc36..3b3efbda48e1 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -70,7 +70,6 @@ static struct inet_protosw udplite4_protosw = {
70 .protocol = IPPROTO_UDPLITE, 70 .protocol = IPPROTO_UDPLITE,
71 .prot = &udplite_prot, 71 .prot = &udplite_prot,
72 .ops = &inet_dgram_ops, 72 .ops = &inet_dgram_ops,
73 .no_check = 0, /* must checksum (RFC 3828) */
74 .flags = INET_PROTOSW_PERMANENT, 73 .flags = INET_PROTOSW_PERMANENT,
75}; 74};
76 75
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 05f2b484954f..91771a7c802f 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -58,12 +58,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
58 58
59 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 59 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
60 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); 60 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
61 ip_select_ident(skb, dst->child, NULL);
62 61
63 top_iph->ttl = ip4_dst_hoplimit(dst->child); 62 top_iph->ttl = ip4_dst_hoplimit(dst->child);
64 63
65 top_iph->saddr = x->props.saddr.a4; 64 top_iph->saddr = x->props.saddr.a4;
66 top_iph->daddr = x->id.daddr.a4; 65 top_iph->daddr = x->id.daddr.a4;
66 ip_select_ident(skb, NULL);
67 67
68 return 0; 68 return 0;
69} 69}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 186a8ecf92fa..d5f6bd9a210a 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -25,7 +25,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
26 goto out; 26 goto out;
27 27
28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
29 goto out; 29 goto out;
30 30
31 mtu = dst_mtu(skb_dst(skb)); 31 mtu = dst_mtu(skb_dst(skb));
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6c7fa0853fc7..5667b3003af9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -275,19 +275,14 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
275{ 275{
276 int i; 276 int i;
277 277
278 if (snmp_mib_init((void __percpu **)idev->stats.ipv6, 278 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
279 sizeof(struct ipstats_mib), 279 if (!idev->stats.ipv6)
280 __alignof__(struct ipstats_mib)) < 0)
281 goto err_ip; 280 goto err_ip;
282 281
283 for_each_possible_cpu(i) { 282 for_each_possible_cpu(i) {
284 struct ipstats_mib *addrconf_stats; 283 struct ipstats_mib *addrconf_stats;
285 addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i); 284 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
286 u64_stats_init(&addrconf_stats->syncp); 285 u64_stats_init(&addrconf_stats->syncp);
287#if SNMP_ARRAY_SZ == 2
288 addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
289 u64_stats_init(&addrconf_stats->syncp);
290#endif
291 } 286 }
292 287
293 288
@@ -305,7 +300,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
305err_icmpmsg: 300err_icmpmsg:
306 kfree(idev->stats.icmpv6dev); 301 kfree(idev->stats.icmpv6dev);
307err_icmp: 302err_icmp:
308 snmp_mib_free((void __percpu **)idev->stats.ipv6); 303 free_percpu(idev->stats.ipv6);
309err_ip: 304err_ip:
310 return -ENOMEM; 305 return -ENOMEM;
311} 306}
@@ -2504,8 +2499,8 @@ static int inet6_addr_add(struct net *net, int ifindex,
2504 return PTR_ERR(ifp); 2499 return PTR_ERR(ifp);
2505} 2500}
2506 2501
2507static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx, 2502static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2508 unsigned int plen) 2503 const struct in6_addr *pfx, unsigned int plen)
2509{ 2504{
2510 struct inet6_ifaddr *ifp; 2505 struct inet6_ifaddr *ifp;
2511 struct inet6_dev *idev; 2506 struct inet6_dev *idev;
@@ -2528,7 +2523,12 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p
2528 in6_ifa_hold(ifp); 2523 in6_ifa_hold(ifp);
2529 read_unlock_bh(&idev->lock); 2524 read_unlock_bh(&idev->lock);
2530 2525
2526 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2527 (ifa_flags & IFA_F_MANAGETEMPADDR))
2528 manage_tempaddrs(idev, ifp, 0, 0, false,
2529 jiffies);
2531 ipv6_del_addr(ifp); 2530 ipv6_del_addr(ifp);
2531 addrconf_verify_rtnl();
2532 return 0; 2532 return 0;
2533 } 2533 }
2534 } 2534 }
@@ -2568,7 +2568,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
2568 return -EFAULT; 2568 return -EFAULT;
2569 2569
2570 rtnl_lock(); 2570 rtnl_lock();
2571 err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, 2571 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2572 ireq.ifr6_prefixlen); 2572 ireq.ifr6_prefixlen);
2573 rtnl_unlock(); 2573 rtnl_unlock();
2574 return err; 2574 return err;
@@ -2813,18 +2813,6 @@ static void addrconf_gre_config(struct net_device *dev)
2813} 2813}
2814#endif 2814#endif
2815 2815
2816static inline int
2817ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
2818{
2819 struct in6_addr lladdr;
2820
2821 if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
2822 addrconf_add_linklocal(idev, &lladdr);
2823 return 0;
2824 }
2825 return -1;
2826}
2827
2828static int addrconf_notify(struct notifier_block *this, unsigned long event, 2816static int addrconf_notify(struct notifier_block *this, unsigned long event,
2829 void *ptr) 2817 void *ptr)
2830{ 2818{
@@ -3743,6 +3731,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3743 struct ifaddrmsg *ifm; 3731 struct ifaddrmsg *ifm;
3744 struct nlattr *tb[IFA_MAX+1]; 3732 struct nlattr *tb[IFA_MAX+1];
3745 struct in6_addr *pfx, *peer_pfx; 3733 struct in6_addr *pfx, *peer_pfx;
3734 u32 ifa_flags;
3746 int err; 3735 int err;
3747 3736
3748 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3737 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3754,7 +3743,13 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3754 if (pfx == NULL) 3743 if (pfx == NULL)
3755 return -EINVAL; 3744 return -EINVAL;
3756 3745
3757 return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen); 3746 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
3747
3748 /* We ignore other flags so far. */
3749 ifa_flags &= IFA_F_MANAGETEMPADDR;
3750
3751 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
3752 ifm->ifa_prefixlen);
3758} 3753}
3759 3754
3760static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, 3755static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
@@ -4363,7 +4358,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
4363 memset(&stats[items], 0, pad); 4358 memset(&stats[items], 0, pad);
4364} 4359}
4365 4360
4366static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib, 4361static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
4367 int items, int bytes, size_t syncpoff) 4362 int items, int bytes, size_t syncpoff)
4368{ 4363{
4369 int i; 4364 int i;
@@ -4383,7 +4378,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
4383{ 4378{
4384 switch (attrtype) { 4379 switch (attrtype) {
4385 case IFLA_INET6_STATS: 4380 case IFLA_INET6_STATS:
4386 __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6, 4381 __snmp6_fill_stats64(stats, idev->stats.ipv6,
4387 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp)); 4382 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
4388 break; 4383 break;
4389 case IFLA_INET6_ICMP6STATS: 4384 case IFLA_INET6_ICMP6STATS:
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 4c11cbcf8308..e6960457f625 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -123,7 +123,7 @@ static void snmp6_free_dev(struct inet6_dev *idev)
123{ 123{
124 kfree(idev->stats.icmpv6msgdev); 124 kfree(idev->stats.icmpv6msgdev);
125 kfree(idev->stats.icmpv6dev); 125 kfree(idev->stats.icmpv6dev);
126 snmp_mib_free((void __percpu **)idev->stats.ipv6); 126 free_percpu(idev->stats.ipv6);
127} 127}
128 128
129/* Nobody refers to this device, we may destroy it. */ 129/* Nobody refers to this device, we may destroy it. */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d935889f1008..7cb4392690dd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -106,7 +106,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
106 struct inet_protosw *answer; 106 struct inet_protosw *answer;
107 struct proto *answer_prot; 107 struct proto *answer_prot;
108 unsigned char answer_flags; 108 unsigned char answer_flags;
109 char answer_no_check;
110 int try_loading_module = 0; 109 int try_loading_module = 0;
111 int err; 110 int err;
112 111
@@ -162,7 +161,6 @@ lookup_protocol:
162 161
163 sock->ops = answer->ops; 162 sock->ops = answer->ops;
164 answer_prot = answer->prot; 163 answer_prot = answer->prot;
165 answer_no_check = answer->no_check;
166 answer_flags = answer->flags; 164 answer_flags = answer->flags;
167 rcu_read_unlock(); 165 rcu_read_unlock();
168 166
@@ -176,7 +174,6 @@ lookup_protocol:
176 sock_init_data(sock, sk); 174 sock_init_data(sock, sk);
177 175
178 err = 0; 176 err = 0;
179 sk->sk_no_check = answer_no_check;
180 if (INET_PROTOSW_REUSE & answer_flags) 177 if (INET_PROTOSW_REUSE & answer_flags)
181 sk->sk_reuse = SK_CAN_REUSE; 178 sk->sk_reuse = SK_CAN_REUSE;
182 179
@@ -715,33 +712,25 @@ static int __net_init ipv6_init_mibs(struct net *net)
715{ 712{
716 int i; 713 int i;
717 714
718 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, 715 net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib);
719 sizeof(struct udp_mib), 716 if (!net->mib.udp_stats_in6)
720 __alignof__(struct udp_mib)) < 0)
721 return -ENOMEM; 717 return -ENOMEM;
722 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, 718 net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib);
723 sizeof(struct udp_mib), 719 if (!net->mib.udplite_stats_in6)
724 __alignof__(struct udp_mib)) < 0)
725 goto err_udplite_mib; 720 goto err_udplite_mib;
726 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, 721 net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib);
727 sizeof(struct ipstats_mib), 722 if (!net->mib.ipv6_statistics)
728 __alignof__(struct ipstats_mib)) < 0)
729 goto err_ip_mib; 723 goto err_ip_mib;
730 724
731 for_each_possible_cpu(i) { 725 for_each_possible_cpu(i) {
732 struct ipstats_mib *af_inet6_stats; 726 struct ipstats_mib *af_inet6_stats;
733 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i); 727 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i);
734 u64_stats_init(&af_inet6_stats->syncp); 728 u64_stats_init(&af_inet6_stats->syncp);
735#if SNMP_ARRAY_SZ == 2
736 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
737 u64_stats_init(&af_inet6_stats->syncp);
738#endif
739 } 729 }
740 730
741 731
742 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, 732 net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
743 sizeof(struct icmpv6_mib), 733 if (!net->mib.icmpv6_statistics)
744 __alignof__(struct icmpv6_mib)) < 0)
745 goto err_icmp_mib; 734 goto err_icmp_mib;
746 net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib), 735 net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
747 GFP_KERNEL); 736 GFP_KERNEL);
@@ -750,22 +739,22 @@ static int __net_init ipv6_init_mibs(struct net *net)
750 return 0; 739 return 0;
751 740
752err_icmpmsg_mib: 741err_icmpmsg_mib:
753 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 742 free_percpu(net->mib.icmpv6_statistics);
754err_icmp_mib: 743err_icmp_mib:
755 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 744 free_percpu(net->mib.ipv6_statistics);
756err_ip_mib: 745err_ip_mib:
757 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 746 free_percpu(net->mib.udplite_stats_in6);
758err_udplite_mib: 747err_udplite_mib:
759 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 748 free_percpu(net->mib.udp_stats_in6);
760 return -ENOMEM; 749 return -ENOMEM;
761} 750}
762 751
763static void ipv6_cleanup_mibs(struct net *net) 752static void ipv6_cleanup_mibs(struct net *net)
764{ 753{
765 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 754 free_percpu(net->mib.udp_stats_in6);
766 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 755 free_percpu(net->mib.udplite_stats_in6);
767 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 756 free_percpu(net->mib.ipv6_statistics);
768 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 757 free_percpu(net->mib.icmpv6_statistics);
769 kfree(net->mib.icmpv6msg_statistics); 758 kfree(net->mib.icmpv6msg_statistics);
770} 759}
771 760
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7b326529e6a2..f6c84a6eb238 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
400 int len; 400 int len;
401 int hlimit; 401 int hlimit;
402 int err = 0; 402 int err = 0;
403 u32 mark = IP6_REPLY_MARK(net, skb->mark);
403 404
404 if ((u8 *)hdr < skb->head || 405 if ((u8 *)hdr < skb->head ||
405 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) 406 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
@@ -466,6 +467,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
466 fl6.daddr = hdr->saddr; 467 fl6.daddr = hdr->saddr;
467 if (saddr) 468 if (saddr)
468 fl6.saddr = *saddr; 469 fl6.saddr = *saddr;
470 fl6.flowi6_mark = mark;
469 fl6.flowi6_oif = iif; 471 fl6.flowi6_oif = iif;
470 fl6.fl6_icmp_type = type; 472 fl6.fl6_icmp_type = type;
471 fl6.fl6_icmp_code = code; 473 fl6.fl6_icmp_code = code;
@@ -474,6 +476,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
474 sk = icmpv6_xmit_lock(net); 476 sk = icmpv6_xmit_lock(net);
475 if (sk == NULL) 477 if (sk == NULL)
476 return; 478 return;
479 sk->sk_mark = mark;
477 np = inet6_sk(sk); 480 np = inet6_sk(sk);
478 481
479 if (!icmpv6_xrlim_allow(sk, type, &fl6)) 482 if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -493,12 +496,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
493 if (IS_ERR(dst)) 496 if (IS_ERR(dst))
494 goto out; 497 goto out;
495 498
496 if (ipv6_addr_is_multicast(&fl6.daddr)) 499 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
497 hlimit = np->mcast_hops;
498 else
499 hlimit = np->hop_limit;
500 if (hlimit < 0)
501 hlimit = ip6_dst_hoplimit(dst);
502 500
503 msg.skb = skb; 501 msg.skb = skb;
504 msg.offset = skb_network_offset(skb); 502 msg.offset = skb_network_offset(skb);
@@ -556,6 +554,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
556 int err = 0; 554 int err = 0;
557 int hlimit; 555 int hlimit;
558 u8 tclass; 556 u8 tclass;
557 u32 mark = IP6_REPLY_MARK(net, skb->mark);
559 558
560 saddr = &ipv6_hdr(skb)->daddr; 559 saddr = &ipv6_hdr(skb)->daddr;
561 560
@@ -574,11 +573,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
574 fl6.saddr = *saddr; 573 fl6.saddr = *saddr;
575 fl6.flowi6_oif = skb->dev->ifindex; 574 fl6.flowi6_oif = skb->dev->ifindex;
576 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; 575 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
576 fl6.flowi6_mark = mark;
577 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 577 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
578 578
579 sk = icmpv6_xmit_lock(net); 579 sk = icmpv6_xmit_lock(net);
580 if (sk == NULL) 580 if (sk == NULL)
581 return; 581 return;
582 sk->sk_mark = mark;
582 np = inet6_sk(sk); 583 np = inet6_sk(sk);
583 584
584 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 585 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -593,12 +594,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
593 if (IS_ERR(dst)) 594 if (IS_ERR(dst))
594 goto out; 595 goto out;
595 596
596 if (ipv6_addr_is_multicast(&fl6.daddr)) 597 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
597 hlimit = np->mcast_hops;
598 else
599 hlimit = np->hop_limit;
600 if (hlimit < 0)
601 hlimit = ip6_dst_hoplimit(dst);
602 598
603 idev = __in6_dev_get(skb->dev); 599 idev = __in6_dev_get(skb->dev);
604 600
@@ -702,22 +698,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
702 saddr = &ipv6_hdr(skb)->saddr; 698 saddr = &ipv6_hdr(skb)->saddr;
703 daddr = &ipv6_hdr(skb)->daddr; 699 daddr = &ipv6_hdr(skb)->daddr;
704 700
705 /* Perform checksum. */ 701 if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
706 switch (skb->ip_summed) { 702 LIMIT_NETDEBUG(KERN_DEBUG
707 case CHECKSUM_COMPLETE: 703 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
708 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 704 saddr, daddr);
709 skb->csum)) 705 goto csum_error;
710 break;
711 /* fall through */
712 case CHECKSUM_NONE:
713 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
714 IPPROTO_ICMPV6, 0));
715 if (__skb_checksum_complete(skb)) {
716 LIMIT_NETDEBUG(KERN_DEBUG
717 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
718 saddr, daddr);
719 goto csum_error;
720 }
721 } 706 }
722 707
723 if (!pskb_pull(skb, sizeof(*hdr))) 708 if (!pskb_pull(skb, sizeof(*hdr)))
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index d4ade34ab375..a245e5ddffbd 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -81,7 +81,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
81 final_p = fl6_update_dst(fl6, np->opt, &final); 81 final_p = fl6_update_dst(fl6, np->opt, &final);
82 fl6->saddr = ireq->ir_v6_loc_addr; 82 fl6->saddr = ireq->ir_v6_loc_addr;
83 fl6->flowi6_oif = ireq->ir_iif; 83 fl6->flowi6_oif = ireq->ir_iif;
84 fl6->flowi6_mark = sk->sk_mark; 84 fl6->flowi6_mark = ireq->ir_mark;
85 fl6->fl6_dport = ireq->ir_rmt_port; 85 fl6->fl6_dport = ireq->ir_rmt_port;
86 fl6->fl6_sport = htons(ireq->ir_num); 86 fl6->fl6_sport = htons(ireq->ir_num);
87 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 87 security_req_classify_flow(req, flowi6_to_flowi(fl6));
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index ee7a97f510cb..9a4d7322fb22 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -75,25 +75,50 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
75 return err; 75 return err;
76 } 76 }
77 77
78 if (uh->check == 0) { 78 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
79 /* RFC 2460 section 8.1 says that we SHOULD log 79 * we accept a checksum of zero here. When we find the socket
80 this error. Well, it is reasonable. 80 * for the UDP packet we'll check if that socket allows zero checksum
81 */ 81 * for IPv6 (set by socket option).
82 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 82 */
83 &ipv6_hdr(skb)->saddr, ntohs(uh->source), 83 return skb_checksum_init_zero_check(skb, proto, uh->check,
84 &ipv6_hdr(skb)->daddr, ntohs(uh->dest)); 84 ip6_compute_pseudo);
85 return 1; 85}
86 } 86EXPORT_SYMBOL(udp6_csum_init);
87 if (skb->ip_summed == CHECKSUM_COMPLETE && 87
88 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 88/* Function to set UDP checksum for an IPv6 UDP packet. This is intended
89 skb->len, proto, skb->csum)) 89 * for the simple case like when setting the checksum for a UDP tunnel.
90 skb->ip_summed = CHECKSUM_UNNECESSARY; 90 */
91void udp6_set_csum(bool nocheck, struct sk_buff *skb,
92 const struct in6_addr *saddr,
93 const struct in6_addr *daddr, int len)
94{
95 struct udphdr *uh = udp_hdr(skb);
96
97 if (nocheck)
98 uh->check = 0;
99 else if (skb_is_gso(skb))
100 uh->check = ~udp_v6_check(len, saddr, daddr, 0);
101 else if (skb_dst(skb) && skb_dst(skb)->dev &&
102 (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
91 103
92 if (!skb_csum_unnecessary(skb)) 104 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
93 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
94 &ipv6_hdr(skb)->daddr,
95 skb->len, proto, 0));
96 105
97 return 0; 106 skb->ip_summed = CHECKSUM_PARTIAL;
107 skb->csum_start = skb_transport_header(skb) - skb->head;
108 skb->csum_offset = offsetof(struct udphdr, check);
109 uh->check = ~udp_v6_check(len, saddr, daddr, 0);
110 } else {
111 __wsum csum;
112
113 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
114
115 uh->check = 0;
116 csum = skb_checksum(skb, 0, len, 0);
117 uh->check = udp_v6_check(len, saddr, daddr, csum);
118 if (uh->check == 0)
119 uh->check = CSUM_MANGLED_0;
120
121 skb->ip_summed = CHECKSUM_UNNECESSARY;
122 }
98} 123}
99EXPORT_SYMBOL(udp6_csum_init); 124EXPORT_SYMBOL(udp6_set_csum);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 87891f5f57b5..cb4459bd1d29 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -71,8 +71,7 @@ static DEFINE_RWLOCK(fib6_walker_lock);
71#define FWS_INIT FWS_L 71#define FWS_INIT FWS_L
72#endif 72#endif
73 73
74static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 74static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
75 struct rt6_info *rt);
76static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); 75static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
77static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); 76static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
78static int fib6_walk(struct fib6_walker_t *w); 77static int fib6_walk(struct fib6_walker_t *w);
@@ -941,7 +940,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
941 if (!err) { 940 if (!err) {
942 fib6_start_gc(info->nl_net, rt); 941 fib6_start_gc(info->nl_net, rt);
943 if (!(rt->rt6i_flags & RTF_CACHE)) 942 if (!(rt->rt6i_flags & RTF_CACHE))
944 fib6_prune_clones(info->nl_net, pn, rt); 943 fib6_prune_clones(info->nl_net, pn);
945 } 944 }
946 945
947out: 946out:
@@ -1375,7 +1374,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1375 pn = pn->parent; 1374 pn = pn->parent;
1376 } 1375 }
1377#endif 1376#endif
1378 fib6_prune_clones(info->nl_net, pn, rt); 1377 fib6_prune_clones(info->nl_net, pn);
1379 } 1378 }
1380 1379
1381 /* 1380 /*
@@ -1601,10 +1600,9 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1601 return 0; 1600 return 0;
1602} 1601}
1603 1602
1604static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 1603static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1605 struct rt6_info *rt)
1606{ 1604{
1607 fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt); 1605 fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
1608} 1606}
1609 1607
1610/* 1608/*
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 0961b5ef866d..4052694c6f2c 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -26,7 +26,6 @@
26#include <net/sock.h> 26#include <net/sock.h>
27 27
28#include <net/ipv6.h> 28#include <net/ipv6.h>
29#include <net/addrconf.h>
30#include <net/rawv6.h> 29#include <net/rawv6.h>
31#include <net/transp_v6.h> 30#include <net/transp_v6.h>
32 31
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 9d921462b57f..3873181ed856 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -72,6 +72,7 @@ struct ip6gre_net {
72}; 72};
73 73
74static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 74static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
75static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
75static int ip6gre_tunnel_init(struct net_device *dev); 76static int ip6gre_tunnel_init(struct net_device *dev);
76static void ip6gre_tunnel_setup(struct net_device *dev); 77static void ip6gre_tunnel_setup(struct net_device *dev);
77static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 78static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -353,10 +354,10 @@ failed_free:
353 354
354static void ip6gre_tunnel_uninit(struct net_device *dev) 355static void ip6gre_tunnel_uninit(struct net_device *dev)
355{ 356{
356 struct net *net = dev_net(dev); 357 struct ip6_tnl *t = netdev_priv(dev);
357 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 358 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
358 359
359 ip6gre_tunnel_unlink(ign, netdev_priv(dev)); 360 ip6gre_tunnel_unlink(ign, t);
360 dev_put(dev); 361 dev_put(dev);
361} 362}
362 363
@@ -467,17 +468,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
467 goto drop; 468 goto drop;
468 469
469 if (flags&GRE_CSUM) { 470 if (flags&GRE_CSUM) {
470 switch (skb->ip_summed) { 471 csum = skb_checksum_simple_validate(skb);
471 case CHECKSUM_COMPLETE:
472 csum = csum_fold(skb->csum);
473 if (!csum)
474 break;
475 /* fall through */
476 case CHECKSUM_NONE:
477 skb->csum = 0;
478 csum = __skb_checksum_complete(skb);
479 skb->ip_summed = CHECKSUM_COMPLETE;
480 }
481 offset += 4; 472 offset += 4;
482 } 473 }
483 if (flags&GRE_KEY) { 474 if (flags&GRE_KEY) {
@@ -611,8 +602,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
611 int encap_limit, 602 int encap_limit,
612 __u32 *pmtu) 603 __u32 *pmtu)
613{ 604{
614 struct net *net = dev_net(dev);
615 struct ip6_tnl *tunnel = netdev_priv(dev); 605 struct ip6_tnl *tunnel = netdev_priv(dev);
606 struct net *net = tunnel->net;
616 struct net_device *tdev; /* Device to other host */ 607 struct net_device *tdev; /* Device to other host */
617 struct ipv6hdr *ipv6h; /* Our new IP header */ 608 struct ipv6hdr *ipv6h; /* Our new IP header */
618 unsigned int max_headroom = 0; /* The extra header space needed */ 609 unsigned int max_headroom = 0; /* The extra header space needed */
@@ -979,7 +970,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
979 int strict = (ipv6_addr_type(&p->raddr) & 970 int strict = (ipv6_addr_type(&p->raddr) &
980 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 971 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
981 972
982 struct rt6_info *rt = rt6_lookup(dev_net(dev), 973 struct rt6_info *rt = rt6_lookup(t->net,
983 &p->raddr, &p->laddr, 974 &p->raddr, &p->laddr,
984 p->link, strict); 975 p->link, strict);
985 976
@@ -1063,13 +1054,12 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1063 int err = 0; 1054 int err = 0;
1064 struct ip6_tnl_parm2 p; 1055 struct ip6_tnl_parm2 p;
1065 struct __ip6_tnl_parm p1; 1056 struct __ip6_tnl_parm p1;
1066 struct ip6_tnl *t; 1057 struct ip6_tnl *t = netdev_priv(dev);
1067 struct net *net = dev_net(dev); 1058 struct net *net = t->net;
1068 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1059 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1069 1060
1070 switch (cmd) { 1061 switch (cmd) {
1071 case SIOCGETTUNNEL: 1062 case SIOCGETTUNNEL:
1072 t = NULL;
1073 if (dev == ign->fb_tunnel_dev) { 1063 if (dev == ign->fb_tunnel_dev) {
1074 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1064 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1075 err = -EFAULT; 1065 err = -EFAULT;
@@ -1077,9 +1067,9 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1077 } 1067 }
1078 ip6gre_tnl_parm_from_user(&p1, &p); 1068 ip6gre_tnl_parm_from_user(&p1, &p);
1079 t = ip6gre_tunnel_locate(net, &p1, 0); 1069 t = ip6gre_tunnel_locate(net, &p1, 0);
1070 if (t == NULL)
1071 t = netdev_priv(dev);
1080 } 1072 }
1081 if (t == NULL)
1082 t = netdev_priv(dev);
1083 memset(&p, 0, sizeof(p)); 1073 memset(&p, 0, sizeof(p));
1084 ip6gre_tnl_parm_to_user(&p, &t->parms); 1074 ip6gre_tnl_parm_to_user(&p, &t->parms);
1085 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1075 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -1242,7 +1232,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1242 dev->flags |= IFF_NOARP; 1232 dev->flags |= IFF_NOARP;
1243 dev->iflink = 0; 1233 dev->iflink = 0;
1244 dev->addr_len = sizeof(struct in6_addr); 1234 dev->addr_len = sizeof(struct in6_addr);
1245 dev->features |= NETIF_F_NETNS_LOCAL;
1246 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1235 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1247} 1236}
1248 1237
@@ -1297,11 +1286,17 @@ static struct inet6_protocol ip6gre_protocol __read_mostly = {
1297 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1286 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1298}; 1287};
1299 1288
1300static void ip6gre_destroy_tunnels(struct ip6gre_net *ign, 1289static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1301 struct list_head *head)
1302{ 1290{
1291 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1292 struct net_device *dev, *aux;
1303 int prio; 1293 int prio;
1304 1294
1295 for_each_netdev_safe(net, dev, aux)
1296 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1297 dev->rtnl_link_ops == &ip6gre_tap_ops)
1298 unregister_netdevice_queue(dev, head);
1299
1305 for (prio = 0; prio < 4; prio++) { 1300 for (prio = 0; prio < 4; prio++) {
1306 int h; 1301 int h;
1307 for (h = 0; h < HASH_SIZE; h++) { 1302 for (h = 0; h < HASH_SIZE; h++) {
@@ -1310,7 +1305,12 @@ static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
1310 t = rtnl_dereference(ign->tunnels[prio][h]); 1305 t = rtnl_dereference(ign->tunnels[prio][h]);
1311 1306
1312 while (t != NULL) { 1307 while (t != NULL) {
1313 unregister_netdevice_queue(t->dev, head); 1308 /* If dev is in the same netns, it has already
1309 * been added to the list by the previous loop.
1310 */
1311 if (!net_eq(dev_net(t->dev), net))
1312 unregister_netdevice_queue(t->dev,
1313 head);
1314 t = rtnl_dereference(t->next); 1314 t = rtnl_dereference(t->next);
1315 } 1315 }
1316 } 1316 }
@@ -1329,6 +1329,11 @@ static int __net_init ip6gre_init_net(struct net *net)
1329 goto err_alloc_dev; 1329 goto err_alloc_dev;
1330 } 1330 }
1331 dev_net_set(ign->fb_tunnel_dev, net); 1331 dev_net_set(ign->fb_tunnel_dev, net);
1332 /* FB netdevice is special: we have one, and only one per netns.
1333 * Allowing to move it to another netns is clearly unsafe.
1334 */
1335 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1336
1332 1337
1333 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1338 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1334 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1339 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
@@ -1349,12 +1354,10 @@ err_alloc_dev:
1349 1354
1350static void __net_exit ip6gre_exit_net(struct net *net) 1355static void __net_exit ip6gre_exit_net(struct net *net)
1351{ 1356{
1352 struct ip6gre_net *ign;
1353 LIST_HEAD(list); 1357 LIST_HEAD(list);
1354 1358
1355 ign = net_generic(net, ip6gre_net_id);
1356 rtnl_lock(); 1359 rtnl_lock();
1357 ip6gre_destroy_tunnels(ign, &list); 1360 ip6gre_destroy_tunnels(net, &list);
1358 unregister_netdevice_many(&list); 1361 unregister_netdevice_many(&list);
1359 rtnl_unlock(); 1362 rtnl_unlock();
1360} 1363}
@@ -1531,15 +1534,14 @@ out:
1531static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 1534static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1532 struct nlattr *data[]) 1535 struct nlattr *data[])
1533{ 1536{
1534 struct ip6_tnl *t, *nt; 1537 struct ip6_tnl *t, *nt = netdev_priv(dev);
1535 struct net *net = dev_net(dev); 1538 struct net *net = nt->net;
1536 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1539 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1537 struct __ip6_tnl_parm p; 1540 struct __ip6_tnl_parm p;
1538 1541
1539 if (dev == ign->fb_tunnel_dev) 1542 if (dev == ign->fb_tunnel_dev)
1540 return -EINVAL; 1543 return -EINVAL;
1541 1544
1542 nt = netdev_priv(dev);
1543 ip6gre_netlink_parms(data, &p); 1545 ip6gre_netlink_parms(data, &p);
1544 1546
1545 t = ip6gre_tunnel_locate(net, &p, 0); 1547 t = ip6gre_tunnel_locate(net, &p, 0);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b2f091566f88..65eda2a8af48 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -97,9 +97,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
97 SKB_GSO_DODGY | 97 SKB_GSO_DODGY |
98 SKB_GSO_TCP_ECN | 98 SKB_GSO_TCP_ECN |
99 SKB_GSO_GRE | 99 SKB_GSO_GRE |
100 SKB_GSO_GRE_CSUM |
100 SKB_GSO_IPIP | 101 SKB_GSO_IPIP |
101 SKB_GSO_SIT | 102 SKB_GSO_SIT |
102 SKB_GSO_UDP_TUNNEL | 103 SKB_GSO_UDP_TUNNEL |
104 SKB_GSO_UDP_TUNNEL_CSUM |
103 SKB_GSO_MPLS | 105 SKB_GSO_MPLS |
104 SKB_GSO_TCPV6 | 106 SKB_GSO_TCPV6 |
105 0))) 107 0)))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index fbf11562b54c..cb9df0eb4023 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -219,7 +219,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
219 skb->mark = sk->sk_mark; 219 skb->mark = sk->sk_mark;
220 220
221 mtu = dst_mtu(dst); 221 mtu = dst_mtu(dst);
222 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { 222 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
223 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), 223 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224 IPSTATS_MIB_OUT, skb->len); 224 IPSTATS_MIB_OUT, skb->len);
225 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 225 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@ -347,11 +347,11 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
347 if (skb->len <= mtu) 347 if (skb->len <= mtu)
348 return false; 348 return false;
349 349
350 /* ipv6 conntrack defrag sets max_frag_size + local_df */ 350 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
352 return true; 352 return true;
353 353
354 if (skb->local_df) 354 if (skb->ignore_df)
355 return false; 355 return false;
356 356
357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -537,6 +537,18 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 id = ip_idents_reserve(hash, 1);
549 fhdr->identification = htonl(id);
550}
551
540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 552int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
541{ 553{
542 struct sk_buff *frag; 554 struct sk_buff *frag;
@@ -559,7 +571,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
559 /* We must not fragment if the socket is set to force MTU discovery 571 /* We must not fragment if the socket is set to force MTU discovery
560 * or if the skb it not generated by a local socket. 572 * or if the skb it not generated by a local socket.
561 */ 573 */
562 if (unlikely(!skb->local_df && skb->len > mtu) || 574 if (unlikely(!skb->ignore_df && skb->len > mtu) ||
563 (IP6CB(skb)->frag_max_size && 575 (IP6CB(skb)->frag_max_size &&
564 IP6CB(skb)->frag_max_size > mtu)) { 576 IP6CB(skb)->frag_max_size > mtu)) {
565 if (skb->sk && dst_allfrag(skb_dst(skb))) 577 if (skb->sk && dst_allfrag(skb_dst(skb)))
@@ -1234,7 +1246,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1234 sizeof(struct frag_hdr) : 0) + 1246 sizeof(struct frag_hdr) : 0) +
1235 rt->rt6i_nfheader_len; 1247 rt->rt6i_nfheader_len;
1236 1248
1237 if (ip6_sk_local_df(sk)) 1249 if (ip6_sk_ignore_df(sk))
1238 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN; 1250 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1239 else 1251 else
1240 maxnonfragsize = mtu; 1252 maxnonfragsize = mtu;
@@ -1544,7 +1556,7 @@ int ip6_push_pending_frames(struct sock *sk)
1544 } 1556 }
1545 1557
1546 /* Allow local fragmentation. */ 1558 /* Allow local fragmentation. */
1547 skb->local_df = ip6_sk_local_df(sk); 1559 skb->ignore_df = ip6_sk_ignore_df(sk);
1548 1560
1549 *final_dst = fl6->daddr; 1561 *final_dst = fl6->daddr;
1550 __skb_pull(skb, skb_network_header_len(skb)); 1562 __skb_pull(skb, skb_network_header_len(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f6a66bb4114d..afa082458360 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -61,6 +61,7 @@
61MODULE_AUTHOR("Ville Nuorvala"); 61MODULE_AUTHOR("Ville Nuorvala");
62MODULE_DESCRIPTION("IPv6 tunneling device"); 62MODULE_DESCRIPTION("IPv6 tunneling device");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_ALIAS_RTNL_LINK("ip6tnl");
64MODULE_ALIAS_NETDEV("ip6tnl0"); 65MODULE_ALIAS_NETDEV("ip6tnl0");
65 66
66#ifdef IP6_TNL_DEBUG 67#ifdef IP6_TNL_DEBUG
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 6cc9f9371cc5..9aaa6bb229e4 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -795,15 +795,12 @@ static const struct net_device_ops vti6_netdev_ops = {
795 **/ 795 **/
796static void vti6_dev_setup(struct net_device *dev) 796static void vti6_dev_setup(struct net_device *dev)
797{ 797{
798 struct ip6_tnl *t;
799
800 dev->netdev_ops = &vti6_netdev_ops; 798 dev->netdev_ops = &vti6_netdev_ops;
801 dev->destructor = vti6_dev_free; 799 dev->destructor = vti6_dev_free;
802 800
803 dev->type = ARPHRD_TUNNEL6; 801 dev->type = ARPHRD_TUNNEL6;
804 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 802 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
805 dev->mtu = ETH_DATA_LEN; 803 dev->mtu = ETH_DATA_LEN;
806 t = netdev_priv(dev);
807 dev->flags |= IFF_NOARP; 804 dev->flags |= IFF_NOARP;
808 dev->addr_len = sizeof(struct in6_addr); 805 dev->addr_len = sizeof(struct in6_addr);
809 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 806 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 84c7f33d0cf8..387d8b8fc18d 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -90,17 +90,9 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
90 if (nf_ct_is_untracked(ct)) 90 if (nf_ct_is_untracked(ct))
91 return NF_ACCEPT; 91 return NF_ACCEPT;
92 92
93 nat = nfct_nat(ct); 93 nat = nf_ct_nat_ext_add(ct);
94 if (!nat) { 94 if (nat == NULL)
95 /* NAT module was loaded late. */ 95 return NF_ACCEPT;
96 if (nf_ct_is_confirmed(ct))
97 return NF_ACCEPT;
98 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
99 if (nat == NULL) {
100 pr_debug("failed to add NAT extension\n");
101 return NF_ACCEPT;
102 }
103 }
104 96
105 switch (ctinfo) { 97 switch (ctinfo) {
106 case IP_CT_RELATED: 98 case IP_CT_RELATED:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 767ab8da8218..0d5279fd852a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -451,7 +451,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
451 } 451 }
452 sub_frag_mem_limit(&fq->q, head->truesize); 452 sub_frag_mem_limit(&fq->q, head->truesize);
453 453
454 head->local_df = 1; 454 head->ignore_df = 1;
455 head->next = NULL; 455 head->next = NULL;
456 head->dev = dev; 456 head->dev = dev;
457 head->tstamp = fq->q.stamp; 457 head->tstamp = fq->q.stamp;
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 9c3297a768fd..d189fcb437fe 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -47,15 +47,9 @@ static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
47 if (ct == NULL || nf_ct_is_untracked(ct)) 47 if (ct == NULL || nf_ct_is_untracked(ct))
48 return NF_ACCEPT; 48 return NF_ACCEPT;
49 49
50 nat = nfct_nat(ct); 50 nat = nf_ct_nat_ext_add(ct);
51 if (nat == NULL) { 51 if (nat == NULL)
52 /* Conntrack module was loaded late, can't add extension. */ 52 return NF_ACCEPT;
53 if (nf_ct_is_confirmed(ct))
54 return NF_ACCEPT;
55 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
56 if (nat == NULL)
57 return NF_ACCEPT;
58 }
59 53
60 switch (ctinfo) { 54 switch (ctinfo) {
61 case IP_CT_RELATED: 55 case IP_CT_RELATED:
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index a2bbc0d08d92..5ec867e4a8b7 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -8,32 +8,6 @@
8#include <net/addrconf.h> 8#include <net/addrconf.h>
9#include <net/secure_seq.h> 9#include <net/secure_seq.h>
10 10
11void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
12{
13 static atomic_t ipv6_fragmentation_id;
14 struct in6_addr addr;
15 int ident;
16
17#if IS_ENABLED(CONFIG_IPV6)
18 struct inet_peer *peer;
19 struct net *net;
20
21 net = dev_net(rt->dst.dev);
22 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
23 if (peer) {
24 fhdr->identification = htonl(inet_getid(peer, 0));
25 inet_putpeer(peer);
26 return;
27 }
28#endif
29 ident = atomic_inc_return(&ipv6_fragmentation_id);
30
31 addr = rt->rt6i_dst.addr;
32 addr.s6_addr32[0] ^= (__force __be32)ident;
33 fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
34}
35EXPORT_SYMBOL(ipv6_select_ident);
36
37int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 11int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
38{ 12{
39 u16 offset = sizeof(struct ipv6hdr); 13 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bda74291c3e0..5b7a1ed2aba9 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -51,7 +51,6 @@ static struct inet_protosw pingv6_protosw = {
51 .protocol = IPPROTO_ICMPV6, 51 .protocol = IPPROTO_ICMPV6,
52 .prot = &pingv6_prot, 52 .prot = &pingv6_prot,
53 .ops = &inet6_dgram_ops, 53 .ops = &inet6_dgram_ops,
54 .no_check = UDP_CSUM_DEFAULT,
55 .flags = INET_PROTOSW_REUSE, 54 .flags = INET_PROTOSW_REUSE,
56}; 55};
57 56
@@ -168,12 +167,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
168 pfh.wcheck = 0; 167 pfh.wcheck = 0;
169 pfh.family = AF_INET6; 168 pfh.family = AF_INET6;
170 169
171 if (ipv6_addr_is_multicast(&fl6.daddr)) 170 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
172 hlimit = np->mcast_hops;
173 else
174 hlimit = np->hop_limit;
175 if (hlimit < 0)
176 hlimit = ip6_dst_hoplimit(dst);
177 171
178 lock_sock(sk); 172 lock_sock(sk);
179 err = ip6_append_data(sk, ping_getfrag, &pfh, len, 173 err = ip6_append_data(sk, ping_getfrag, &pfh, len,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 091d066a57b3..3317440ea341 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -186,7 +186,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
186/* can be called either with percpu mib (pcpumib != NULL), 186/* can be called either with percpu mib (pcpumib != NULL),
187 * or shared one (smib != NULL) 187 * or shared one (smib != NULL)
188 */ 188 */
189static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib, 189static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
190 atomic_long_t *smib, 190 atomic_long_t *smib,
191 const struct snmp_mib *itemlist) 191 const struct snmp_mib *itemlist)
192{ 192{
@@ -201,7 +201,7 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
201 } 201 }
202} 202}
203 203
204static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib, 204static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
205 const struct snmp_mib *itemlist, size_t syncpoff) 205 const struct snmp_mib *itemlist, size_t syncpoff)
206{ 206{
207 int i; 207 int i;
@@ -215,14 +215,14 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
215{ 215{
216 struct net *net = (struct net *)seq->private; 216 struct net *net = (struct net *)seq->private;
217 217
218 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics, 218 snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
219 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 219 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
220 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 220 snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
221 NULL, snmp6_icmp6_list); 221 NULL, snmp6_icmp6_list);
222 snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs); 222 snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
223 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, 223 snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
224 NULL, snmp6_udp6_list); 224 NULL, snmp6_udp6_list);
225 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, 225 snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
226 NULL, snmp6_udplite6_list); 226 NULL, snmp6_udplite6_list);
227 return 0; 227 return 0;
228} 228}
@@ -245,7 +245,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
245 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 245 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
246 246
247 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 247 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
248 snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6, 248 snmp6_seq_show_item64(seq, idev->stats.ipv6,
249 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 249 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
250 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs, 250 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
251 snmp6_icmp6_list); 251 snmp6_icmp6_list);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f29996e368a..b2dc60b0c764 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -873,14 +873,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
873 err = PTR_ERR(dst); 873 err = PTR_ERR(dst);
874 goto out; 874 goto out;
875 } 875 }
876 if (hlimit < 0) { 876 if (hlimit < 0)
877 if (ipv6_addr_is_multicast(&fl6.daddr)) 877 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
878 hlimit = np->mcast_hops;
879 else
880 hlimit = np->hop_limit;
881 if (hlimit < 0)
882 hlimit = ip6_dst_hoplimit(dst);
883 }
884 878
885 if (tclass < 0) 879 if (tclass < 0)
886 tclass = np->tclass; 880 tclass = np->tclass;
@@ -1328,7 +1322,6 @@ static struct inet_protosw rawv6_protosw = {
1328 .protocol = IPPROTO_IP, /* wild card */ 1322 .protocol = IPPROTO_IP, /* wild card */
1329 .prot = &rawv6_prot, 1323 .prot = &rawv6_prot,
1330 .ops = &inet6_sockraw_ops, 1324 .ops = &inet6_sockraw_ops,
1331 .no_check = UDP_CSUM_DEFAULT,
1332 .flags = INET_PROTOSW_REUSE, 1325 .flags = INET_PROTOSW_REUSE,
1333}; 1326};
1334 1327
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6ebdb7b6744c..f23fbd28a501 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1176,7 +1176,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1176 1176
1177 memset(&fl6, 0, sizeof(fl6)); 1177 memset(&fl6, 0, sizeof(fl6));
1178 fl6.flowi6_oif = oif; 1178 fl6.flowi6_oif = oif;
1179 fl6.flowi6_mark = mark; 1179 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1180 fl6.daddr = iph->daddr; 1180 fl6.daddr = iph->daddr;
1181 fl6.saddr = iph->saddr; 1181 fl6.saddr = iph->saddr;
1182 fl6.flowlabel = ip6_flowinfo(iph); 1182 fl6.flowlabel = ip6_flowinfo(iph);
@@ -1455,7 +1455,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
1455 goto out; 1455 goto out;
1456 1456
1457 net->ipv6.ip6_rt_gc_expire++; 1457 net->ipv6.ip6_rt_gc_expire++;
1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size); 1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1459 entries = dst_entries_get_slow(ops); 1459 entries = dst_entries_get_slow(ops);
1460 if (entries < ops->gc_thresh) 1460 if (entries < ops->gc_thresh)
1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 45397b2a4a0b..4f408176dc64 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
1828module_init(sit_init); 1828module_init(sit_init);
1829module_exit(sit_cleanup); 1829module_exit(sit_cleanup);
1830MODULE_LICENSE("GPL"); 1830MODULE_LICENSE("GPL");
1831MODULE_ALIAS_RTNL_LINK("sit");
1831MODULE_ALIAS_NETDEV("sit0"); 1832MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb53a5e73c1a..a822b880689b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -216,6 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
217 ireq->ir_iif = inet6_iif(skb); 217 ireq->ir_iif = inet6_iif(skb);
218 218
219 ireq->ir_mark = inet_request_mark(sk, skb);
220
219 req->expires = 0UL; 221 req->expires = 0UL;
220 req->num_retrans = 0; 222 req->num_retrans = 0;
221 ireq->ecn_ok = ecn_ok; 223 ireq->ecn_ok = ecn_ok;
@@ -242,7 +244,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
242 final_p = fl6_update_dst(&fl6, np->opt, &final); 244 final_p = fl6_update_dst(&fl6, np->opt, &final);
243 fl6.saddr = ireq->ir_v6_loc_addr; 245 fl6.saddr = ireq->ir_v6_loc_addr;
244 fl6.flowi6_oif = sk->sk_bound_dev_if; 246 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark; 247 fl6.flowi6_mark = ireq->ir_mark;
246 fl6.fl6_dport = ireq->ir_rmt_port; 248 fl6.fl6_dport = ireq->ir_rmt_port;
247 fl6.fl6_sport = inet_sk(sk)->inet_sport; 249 fl6.fl6_sport = inet_sk(sk)->inet_sport;
248 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 250 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 7f405a168822..058f3eca2e53 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
38 .mode = 0644, 38 .mode = 0644,
39 .proc_handler = proc_dointvec 39 .proc_handler = proc_dointvec
40 }, 40 },
41 {
42 .procname = "fwmark_reflect",
43 .data = &init_net.ipv6.sysctl.fwmark_reflect,
44 .maxlen = sizeof(int),
45 .mode = 0644,
46 .proc_handler = proc_dointvec
47 },
41 { } 48 { }
42}; 49};
43 50
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e289830ed6e3..229239ad96b1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -340,7 +340,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340 struct sock *sk; 340 struct sock *sk;
341 int err; 341 int err;
342 struct tcp_sock *tp; 342 struct tcp_sock *tp;
343 __u32 seq; 343 struct request_sock *fastopen;
344 __u32 seq, snd_una;
344 struct net *net = dev_net(skb->dev); 345 struct net *net = dev_net(skb->dev);
345 346
346 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, 347 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
@@ -371,8 +372,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
371 372
372 tp = tcp_sk(sk); 373 tp = tcp_sk(sk);
373 seq = ntohl(th->seq); 374 seq = ntohl(th->seq);
375 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
376 fastopen = tp->fastopen_rsk;
377 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
374 if (sk->sk_state != TCP_LISTEN && 378 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) { 379 !between(seq, snd_una, tp->snd_nxt)) {
376 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 380 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
377 goto out; 381 goto out;
378 } 382 }
@@ -436,8 +440,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
436 goto out; 440 goto out;
437 441
438 case TCP_SYN_SENT: 442 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen. 443 case TCP_SYN_RECV:
440 It can, it SYNs are crossed. --ANK */ 444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
446 */
447 if (fastopen && fastopen->sk == NULL)
448 break;
449
441 if (!sock_owned_by_user(sk)) { 450 if (!sock_owned_by_user(sk)) {
442 sk->sk_err = err; 451 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 452 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
@@ -463,7 +472,8 @@ out:
463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 472static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 struct flowi6 *fl6, 473 struct flowi6 *fl6,
465 struct request_sock *req, 474 struct request_sock *req,
466 u16 queue_mapping) 475 u16 queue_mapping,
476 struct tcp_fastopen_cookie *foc)
467{ 477{
468 struct inet_request_sock *ireq = inet_rsk(req); 478 struct inet_request_sock *ireq = inet_rsk(req);
469 struct ipv6_pinfo *np = inet6_sk(sk); 479 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 484 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
475 goto done; 485 goto done;
476 486
477 skb = tcp_make_synack(sk, dst, req, NULL); 487 skb = tcp_make_synack(sk, dst, req, foc);
478 488
479 if (skb) { 489 if (skb) {
480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 490 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -498,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
498 struct flowi6 fl6; 508 struct flowi6 fl6;
499 int res; 509 int res;
500 510
501 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); 511 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
502 if (!res) { 512 if (!res) {
503 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 513 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
504 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -802,6 +812,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
802 fl6.flowi6_oif = inet6_iif(skb); 812 fl6.flowi6_oif = inet6_iif(skb);
803 else 813 else
804 fl6.flowi6_oif = oif; 814 fl6.flowi6_oif = oif;
815 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
805 fl6.fl6_dport = t1->dest; 816 fl6.fl6_dport = t1->dest;
806 fl6.fl6_sport = t1->source; 817 fl6.fl6_sport = t1->source;
807 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 818 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -917,7 +928,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
917static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 928static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
918 struct request_sock *req) 929 struct request_sock *req)
919{ 930{
920 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, 931 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
932 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
933 */
934 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
935 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
936 tcp_rsk(req)->rcv_nxt,
921 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, 937 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
922 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 938 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
923 0, 0); 939 0, 0);
@@ -969,8 +985,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
969 struct tcp_sock *tp = tcp_sk(sk); 985 struct tcp_sock *tp = tcp_sk(sk);
970 __u32 isn = TCP_SKB_CB(skb)->when; 986 __u32 isn = TCP_SKB_CB(skb)->when;
971 struct dst_entry *dst = NULL; 987 struct dst_entry *dst = NULL;
988 struct tcp_fastopen_cookie foc = { .len = -1 };
989 bool want_cookie = false, fastopen;
972 struct flowi6 fl6; 990 struct flowi6 fl6;
973 bool want_cookie = false; 991 int err;
974 992
975 if (skb->protocol == htons(ETH_P_IP)) 993 if (skb->protocol == htons(ETH_P_IP))
976 return tcp_v4_conn_request(sk, skb); 994 return tcp_v4_conn_request(sk, skb);
@@ -1001,7 +1019,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1001 tcp_clear_options(&tmp_opt); 1019 tcp_clear_options(&tmp_opt);
1002 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1020 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1003 tmp_opt.user_mss = tp->rx_opt.user_mss; 1021 tmp_opt.user_mss = tp->rx_opt.user_mss;
1004 tcp_parse_options(skb, &tmp_opt, 0, NULL); 1022 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1005 1023
1006 if (want_cookie && !tmp_opt.saw_tstamp) 1024 if (want_cookie && !tmp_opt.saw_tstamp)
1007 tcp_clear_options(&tmp_opt); 1025 tcp_clear_options(&tmp_opt);
@@ -1016,6 +1034,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1016 TCP_ECN_create_request(req, skb, sock_net(sk)); 1034 TCP_ECN_create_request(req, skb, sock_net(sk));
1017 1035
1018 ireq->ir_iif = sk->sk_bound_dev_if; 1036 ireq->ir_iif = sk->sk_bound_dev_if;
1037 ireq->ir_mark = inet_request_mark(sk, skb);
1019 1038
1020 /* So that link locals have meaning */ 1039 /* So that link locals have meaning */
1021 if (!sk->sk_bound_dev_if && 1040 if (!sk->sk_bound_dev_if &&
@@ -1074,19 +1093,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1074 isn = tcp_v6_init_sequence(skb); 1093 isn = tcp_v6_init_sequence(skb);
1075 } 1094 }
1076have_isn: 1095have_isn:
1077 tcp_rsk(req)->snt_isn = isn;
1078 1096
1079 if (security_inet_conn_request(sk, skb, req)) 1097 if (security_inet_conn_request(sk, skb, req))
1080 goto drop_and_release; 1098 goto drop_and_release;
1081 1099
1082 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1100 if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
1083 skb_get_queue_mapping(skb)) ||
1084 want_cookie)
1085 goto drop_and_free; 1101 goto drop_and_free;
1086 1102
1103 tcp_rsk(req)->snt_isn = isn;
1087 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1104 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1088 tcp_rsk(req)->listener = NULL; 1105 tcp_openreq_init_rwin(req, sk, dst);
1089 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1106 fastopen = !want_cookie &&
1107 tcp_try_fastopen(sk, skb, req, &foc, dst);
1108 err = tcp_v6_send_synack(sk, dst, &fl6, req,
1109 skb_get_queue_mapping(skb), &foc);
1110 if (!fastopen) {
1111 if (err || want_cookie)
1112 goto drop_and_free;
1113
1114 tcp_rsk(req)->listener = NULL;
1115 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1116 }
1090 return 0; 1117 return 0;
1091 1118
1092drop_and_release: 1119drop_and_release:
@@ -1294,25 +1321,6 @@ out:
1294 return NULL; 1321 return NULL;
1295} 1322}
1296 1323
1297static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1298{
1299 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1300 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1301 &ipv6_hdr(skb)->daddr, skb->csum)) {
1302 skb->ip_summed = CHECKSUM_UNNECESSARY;
1303 return 0;
1304 }
1305 }
1306
1307 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1308 &ipv6_hdr(skb)->saddr,
1309 &ipv6_hdr(skb)->daddr, 0));
1310
1311 if (skb->len <= 76)
1312 return __skb_checksum_complete(skb);
1313 return 0;
1314}
1315
1316/* The socket must have it's spinlock held when we get 1324/* The socket must have it's spinlock held when we get
1317 * here. 1325 * here.
1318 * 1326 *
@@ -1486,7 +1494,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1486 if (!pskb_may_pull(skb, th->doff*4)) 1494 if (!pskb_may_pull(skb, th->doff*4))
1487 goto discard_it; 1495 goto discard_it;
1488 1496
1489 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) 1497 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1490 goto csum_error; 1498 goto csum_error;
1491 1499
1492 th = tcp_hdr(skb); 1500 th = tcp_hdr(skb);
@@ -1779,6 +1787,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1779 const struct inet_sock *inet = inet_sk(sp); 1787 const struct inet_sock *inet = inet_sk(sp);
1780 const struct tcp_sock *tp = tcp_sk(sp); 1788 const struct tcp_sock *tp = tcp_sk(sp);
1781 const struct inet_connection_sock *icsk = inet_csk(sp); 1789 const struct inet_connection_sock *icsk = inet_csk(sp);
1790 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1782 1791
1783 dest = &sp->sk_v6_daddr; 1792 dest = &sp->sk_v6_daddr;
1784 src = &sp->sk_v6_rcv_saddr; 1793 src = &sp->sk_v6_rcv_saddr;
@@ -1821,7 +1830,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1821 jiffies_to_clock_t(icsk->icsk_ack.ato), 1830 jiffies_to_clock_t(icsk->icsk_ack.ato),
1822 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1831 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1823 tp->snd_cwnd, 1832 tp->snd_cwnd,
1824 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh 1833 sp->sk_state == TCP_LISTEN ?
1834 (fastopenq ? fastopenq->max_qlen : 0) :
1835 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1825 ); 1836 );
1826} 1837}
1827 1838
@@ -1981,7 +1992,6 @@ static struct inet_protosw tcpv6_protosw = {
1981 .protocol = IPPROTO_TCP, 1992 .protocol = IPPROTO_TCP,
1982 .prot = &tcpv6_prot, 1993 .prot = &tcpv6_prot,
1983 .ops = &inet6_stream_ops, 1994 .ops = &inet6_stream_ops,
1984 .no_check = 0,
1985 .flags = INET_PROTOSW_PERMANENT | 1995 .flags = INET_PROTOSW_PERMANENT |
1986 INET_PROTOSW_ICSK, 1996 INET_PROTOSW_ICSK,
1987}; 1997};
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 20b63d2ab70f..95c834799288 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -634,6 +634,10 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
634 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 634 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
635 int ret; 635 int ret;
636 636
637 /* Verify checksum before giving to encap */
638 if (udp_lib_checksum_complete(skb))
639 goto csum_error;
640
637 ret = encap_rcv(sk, skb); 641 ret = encap_rcv(sk, skb);
638 if (ret <= 0) { 642 if (ret <= 0) {
639 UDP_INC_STATS_BH(sock_net(sk), 643 UDP_INC_STATS_BH(sock_net(sk),
@@ -701,35 +705,34 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
701 int dif) 705 int dif)
702{ 706{
703 struct hlist_nulls_node *node; 707 struct hlist_nulls_node *node;
704 struct sock *s = sk;
705 unsigned short num = ntohs(loc_port); 708 unsigned short num = ntohs(loc_port);
706 709
707 sk_nulls_for_each_from(s, node) { 710 sk_nulls_for_each_from(sk, node) {
708 struct inet_sock *inet = inet_sk(s); 711 struct inet_sock *inet = inet_sk(sk);
709 712
710 if (!net_eq(sock_net(s), net)) 713 if (!net_eq(sock_net(sk), net))
711 continue; 714 continue;
712 715
713 if (udp_sk(s)->udp_port_hash == num && 716 if (udp_sk(sk)->udp_port_hash == num &&
714 s->sk_family == PF_INET6) { 717 sk->sk_family == PF_INET6) {
715 if (inet->inet_dport) { 718 if (inet->inet_dport) {
716 if (inet->inet_dport != rmt_port) 719 if (inet->inet_dport != rmt_port)
717 continue; 720 continue;
718 } 721 }
719 if (!ipv6_addr_any(&s->sk_v6_daddr) && 722 if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
720 !ipv6_addr_equal(&s->sk_v6_daddr, rmt_addr)) 723 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
721 continue; 724 continue;
722 725
723 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) 726 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
724 continue; 727 continue;
725 728
726 if (!ipv6_addr_any(&s->sk_v6_rcv_saddr)) { 729 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
727 if (!ipv6_addr_equal(&s->sk_v6_rcv_saddr, loc_addr)) 730 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
728 continue; 731 continue;
729 } 732 }
730 if (!inet6_mc_check(s, loc_addr, rmt_addr)) 733 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
731 continue; 734 continue;
732 return s; 735 return sk;
733 } 736 }
734 } 737 }
735 return NULL; 738 return NULL;
@@ -760,6 +763,17 @@ static void flush_stack(struct sock **stack, unsigned int count,
760 if (unlikely(skb1)) 763 if (unlikely(skb1))
761 kfree_skb(skb1); 764 kfree_skb(skb1);
762} 765}
766
767static void udp6_csum_zero_error(struct sk_buff *skb)
768{
769 /* RFC 2460 section 8.1 says that we SHOULD log
770 * this error. Well, it is reasonable.
771 */
772 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
773 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
774 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
775}
776
763/* 777/*
764 * Note: called only from the BH handler context, 778 * Note: called only from the BH handler context,
765 * so we don't need to lock the hashes. 779 * so we don't need to lock the hashes.
@@ -779,7 +793,12 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
779 dif = inet6_iif(skb); 793 dif = inet6_iif(skb);
780 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 794 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
781 while (sk) { 795 while (sk) {
782 stack[count++] = sk; 796 /* If zero checksum and no_check is not on for
797 * the socket then skip it.
798 */
799 if (uh->check || udp_sk(sk)->no_check6_rx)
800 stack[count++] = sk;
801
783 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, 802 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
784 uh->source, saddr, dif); 803 uh->source, saddr, dif);
785 if (unlikely(count == ARRAY_SIZE(stack))) { 804 if (unlikely(count == ARRAY_SIZE(stack))) {
@@ -867,6 +886,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
867 if (sk != NULL) { 886 if (sk != NULL) {
868 int ret; 887 int ret;
869 888
889 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
890 sock_put(sk);
891 udp6_csum_zero_error(skb);
892 goto csum_error;
893 }
894
870 ret = udpv6_queue_rcv_skb(sk, skb); 895 ret = udpv6_queue_rcv_skb(sk, skb);
871 sock_put(sk); 896 sock_put(sk);
872 897
@@ -879,6 +904,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
879 return 0; 904 return 0;
880 } 905 }
881 906
907 if (!uh->check) {
908 udp6_csum_zero_error(skb);
909 goto csum_error;
910 }
911
882 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 912 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
883 goto discard; 913 goto discard;
884 914
@@ -1006,7 +1036,10 @@ static int udp_v6_push_pending_frames(struct sock *sk)
1006 1036
1007 if (is_udplite) 1037 if (is_udplite)
1008 csum = udplite_csum_outgoing(sk, skb); 1038 csum = udplite_csum_outgoing(sk, skb);
1009 else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1039 else if (up->no_check6_tx) { /* UDP csum disabled */
1040 skb->ip_summed = CHECKSUM_NONE;
1041 goto send;
1042 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1010 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, 1043 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
1011 up->len); 1044 up->len);
1012 goto send; 1045 goto send;
@@ -1232,14 +1265,8 @@ do_udp_sendmsg:
1232 goto out; 1265 goto out;
1233 } 1266 }
1234 1267
1235 if (hlimit < 0) { 1268 if (hlimit < 0)
1236 if (ipv6_addr_is_multicast(&fl6.daddr)) 1269 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1237 hlimit = np->mcast_hops;
1238 else
1239 hlimit = np->hop_limit;
1240 if (hlimit < 0)
1241 hlimit = ip6_dst_hoplimit(dst);
1242 }
1243 1270
1244 if (tclass < 0) 1271 if (tclass < 0)
1245 tclass = np->tclass; 1272 tclass = np->tclass;
@@ -1479,7 +1506,6 @@ static struct inet_protosw udpv6_protosw = {
1479 .protocol = IPPROTO_UDP, 1506 .protocol = IPPROTO_UDP,
1480 .prot = &udpv6_prot, 1507 .prot = &udpv6_prot,
1481 .ops = &inet6_dgram_ops, 1508 .ops = &inet6_dgram_ops,
1482 .no_check = UDP_CSUM_DEFAULT,
1483 .flags = INET_PROTOSW_PERMANENT, 1509 .flags = INET_PROTOSW_PERMANENT,
1484}; 1510};
1485 1511
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b261ee8b83fc..0ae3d98f83e0 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -63,7 +63,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
63 if (unlikely(type & ~(SKB_GSO_UDP | 63 if (unlikely(type & ~(SKB_GSO_UDP |
64 SKB_GSO_DODGY | 64 SKB_GSO_DODGY |
65 SKB_GSO_UDP_TUNNEL | 65 SKB_GSO_UDP_TUNNEL |
66 SKB_GSO_UDP_TUNNEL_CSUM |
66 SKB_GSO_GRE | 67 SKB_GSO_GRE |
68 SKB_GSO_GRE_CSUM |
67 SKB_GSO_IPIP | 69 SKB_GSO_IPIP |
68 SKB_GSO_SIT | 70 SKB_GSO_SIT |
69 SKB_GSO_MPLS) || 71 SKB_GSO_MPLS) ||
@@ -76,7 +78,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
76 goto out; 78 goto out;
77 } 79 }
78 80
79 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) 81 if (skb->encapsulation && skb_shinfo(skb)->gso_type &
82 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
80 segs = skb_udp_tunnel_segment(skb, features); 83 segs = skb_udp_tunnel_segment(skb, features);
81 else { 84 else {
82 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 85 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index dfcc4be46898..9cf097e206e9 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -64,7 +64,6 @@ static struct inet_protosw udplite6_protosw = {
64 .protocol = IPPROTO_UDPLITE, 64 .protocol = IPPROTO_UDPLITE,
65 .prot = &udplitev6_prot, 65 .prot = &udplitev6_prot,
66 .ops = &inet6_dgram_ops, 66 .ops = &inet6_dgram_ops,
67 .no_check = 0,
68 .flags = INET_PROTOSW_PERMANENT, 67 .flags = INET_PROTOSW_PERMANENT,
69}; 68};
70 69
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index b930d080c66f..433672d07d0b 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -78,7 +78,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
78 if (mtu < IPV6_MIN_MTU) 78 if (mtu < IPV6_MIN_MTU)
79 mtu = IPV6_MIN_MTU; 79 mtu = IPV6_MIN_MTU;
80 80
81 if (!skb->local_df && skb->len > mtu) { 81 if (!skb->ignore_df && skb->len > mtu) {
82 skb->dev = dst->dev; 82 skb->dev = dst->dev;
83 83
84 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
@@ -114,7 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 if (err) 114 if (err)
115 return err; 115 return err;
116 116
117 skb->local_df = 1; 117 skb->ignore_df = 1;
118 118
119 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
120} 120}
@@ -153,7 +153,7 @@ static int __xfrm6_output(struct sk_buff *skb)
153 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 153 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
154 xfrm6_local_rxpmtu(skb, mtu); 154 xfrm6_local_rxpmtu(skb, mtu);
155 return -EMSGSIZE; 155 return -EMSGSIZE;
156 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 156 } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
157 xfrm_local_error(skb, mtu); 157 xfrm_local_error(skb, mtu);
158 return -EMSGSIZE; 158 return -EMSGSIZE;
159 } 159 }
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 41e4e93cb3aa..91729b807c7d 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1353,7 +1353,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol,
1353 1353
1354 sk_refcnt_debug_inc(sk); 1354 sk_refcnt_debug_inc(sk);
1355 sock_init_data(sock, sk); 1355 sock_init_data(sock, sk);
1356 sk->sk_no_check = 1; /* Checksum off by default */ 1356 sk->sk_no_check_tx = 1; /* Checksum off by default */
1357 sock->ops = &ipx_dgram_ops; 1357 sock->ops = &ipx_dgram_ops;
1358 rc = 0; 1358 rc = 0;
1359out: 1359out:
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index c1f03185c5e1..67e7ad3d46b1 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -236,7 +236,8 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
236 } 236 }
237 237
238 /* Apply checksum. Not allowed on 802.3 links. */ 238 /* Apply checksum. Not allowed on 802.3 links. */
239 if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023)) 239 if (sk->sk_no_check_tx ||
240 intrfc->if_dlink_type == htons(IPX_FRAME_8023))
240 ipx->ipx_checksum = htons(0xFFFF); 241 ipx->ipx_checksum = htons(0xFFFF);
241 else 242 else
242 ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr)); 243 ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 8c9d7302c846..7a95fa4a3de1 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -682,6 +682,18 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
682 return NULL; 682 return NULL;
683} 683}
684 684
685static void __iucv_auto_name(struct iucv_sock *iucv)
686{
687 char name[12];
688
689 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
690 while (__iucv_get_sock_by_name(name)) {
691 sprintf(name, "%08x",
692 atomic_inc_return(&iucv_sk_list.autobind_name));
693 }
694 memcpy(iucv->src_name, name, 8);
695}
696
685/* Bind an unbound socket */ 697/* Bind an unbound socket */
686static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 698static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
687 int addr_len) 699 int addr_len)
@@ -724,8 +736,12 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
724 rcu_read_lock(); 736 rcu_read_lock();
725 for_each_netdev_rcu(&init_net, dev) { 737 for_each_netdev_rcu(&init_net, dev) {
726 if (!memcmp(dev->perm_addr, uid, 8)) { 738 if (!memcmp(dev->perm_addr, uid, 8)) {
727 memcpy(iucv->src_name, sa->siucv_name, 8);
728 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 739 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
740 /* Check for unitialized siucv_name */
741 if (strncmp(sa->siucv_name, " ", 8) == 0)
742 __iucv_auto_name(iucv);
743 else
744 memcpy(iucv->src_name, sa->siucv_name, 8);
729 sk->sk_bound_dev_if = dev->ifindex; 745 sk->sk_bound_dev_if = dev->ifindex;
730 iucv->hs_dev = dev; 746 iucv->hs_dev = dev;
731 dev_hold(dev); 747 dev_hold(dev);
@@ -763,7 +779,6 @@ done:
763static int iucv_sock_autobind(struct sock *sk) 779static int iucv_sock_autobind(struct sock *sk)
764{ 780{
765 struct iucv_sock *iucv = iucv_sk(sk); 781 struct iucv_sock *iucv = iucv_sk(sk);
766 char name[12];
767 int err = 0; 782 int err = 0;
768 783
769 if (unlikely(!pr_iucv)) 784 if (unlikely(!pr_iucv))
@@ -772,17 +787,9 @@ static int iucv_sock_autobind(struct sock *sk)
772 memcpy(iucv->src_user_id, iucv_userid, 8); 787 memcpy(iucv->src_user_id, iucv_userid, 8);
773 788
774 write_lock_bh(&iucv_sk_list.lock); 789 write_lock_bh(&iucv_sk_list.lock);
775 790 __iucv_auto_name(iucv);
776 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
777 while (__iucv_get_sock_by_name(name)) {
778 sprintf(name, "%08x",
779 atomic_inc_return(&iucv_sk_list.autobind_name));
780 }
781
782 write_unlock_bh(&iucv_sk_list.lock); 791 write_unlock_bh(&iucv_sk_list.lock);
783 792
784 memcpy(&iucv->src_name, name, 8);
785
786 if (!iucv->msglimit) 793 if (!iucv->msglimit)
787 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 794 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
788 795
@@ -1936,11 +1943,10 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1936 sk_acceptq_is_full(sk) || 1943 sk_acceptq_is_full(sk) ||
1937 !nsk) { 1944 !nsk) {
1938 /* error on server socket - connection refused */ 1945 /* error on server socket - connection refused */
1939 if (nsk)
1940 sk_free(nsk);
1941 afiucv_swap_src_dest(skb); 1946 afiucv_swap_src_dest(skb);
1942 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1947 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1943 err = dev_queue_xmit(skb); 1948 err = dev_queue_xmit(skb);
1949 iucv_sock_kill(nsk);
1944 bh_unlock_sock(sk); 1950 bh_unlock_sock(sk);
1945 goto out; 1951 goto out;
1946 } 1952 }
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f3c83073afc4..ba2a2f95911c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1476,9 +1476,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
1476 else 1476 else
1477 err = xfrm_state_update(x); 1477 err = xfrm_state_update(x);
1478 1478
1479 xfrm_audit_state_add(x, err ? 0 : 1, 1479 xfrm_audit_state_add(x, err ? 0 : 1, true);
1480 audit_get_loginuid(current),
1481 audit_get_sessionid(current), 0);
1482 1480
1483 if (err < 0) { 1481 if (err < 0) {
1484 x->km.state = XFRM_STATE_DEAD; 1482 x->km.state = XFRM_STATE_DEAD;
@@ -1532,9 +1530,7 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_
1532 c.event = XFRM_MSG_DELSA; 1530 c.event = XFRM_MSG_DELSA;
1533 km_state_notify(x, &c); 1531 km_state_notify(x, &c);
1534out: 1532out:
1535 xfrm_audit_state_delete(x, err ? 0 : 1, 1533 xfrm_audit_state_delete(x, err ? 0 : 1, true);
1536 audit_get_loginuid(current),
1537 audit_get_sessionid(current), 0);
1538 xfrm_state_put(x); 1534 xfrm_state_put(x);
1539 1535
1540 return err; 1536 return err;
@@ -1726,17 +1722,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1726 struct net *net = sock_net(sk); 1722 struct net *net = sock_net(sk);
1727 unsigned int proto; 1723 unsigned int proto;
1728 struct km_event c; 1724 struct km_event c;
1729 struct xfrm_audit audit_info;
1730 int err, err2; 1725 int err, err2;
1731 1726
1732 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1727 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1733 if (proto == 0) 1728 if (proto == 0)
1734 return -EINVAL; 1729 return -EINVAL;
1735 1730
1736 audit_info.loginuid = audit_get_loginuid(current); 1731 err = xfrm_state_flush(net, proto, true);
1737 audit_info.sessionid = audit_get_sessionid(current);
1738 audit_info.secid = 0;
1739 err = xfrm_state_flush(net, proto, &audit_info);
1740 err2 = unicast_flush_resp(sk, hdr); 1732 err2 = unicast_flush_resp(sk, hdr);
1741 if (err || err2) { 1733 if (err || err2) {
1742 if (err == -ESRCH) /* empty table - go quietly */ 1734 if (err == -ESRCH) /* empty table - go quietly */
@@ -2288,9 +2280,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2288 err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, 2280 err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
2289 hdr->sadb_msg_type != SADB_X_SPDUPDATE); 2281 hdr->sadb_msg_type != SADB_X_SPDUPDATE);
2290 2282
2291 xfrm_audit_policy_add(xp, err ? 0 : 1, 2283 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
2292 audit_get_loginuid(current),
2293 audit_get_sessionid(current), 0);
2294 2284
2295 if (err) 2285 if (err)
2296 goto out; 2286 goto out;
@@ -2372,9 +2362,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2372 if (xp == NULL) 2362 if (xp == NULL)
2373 return -ENOENT; 2363 return -ENOENT;
2374 2364
2375 xfrm_audit_policy_delete(xp, err ? 0 : 1, 2365 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2376 audit_get_loginuid(current),
2377 audit_get_sessionid(current), 0);
2378 2366
2379 if (err) 2367 if (err)
2380 goto out; 2368 goto out;
@@ -2553,7 +2541,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
2553 sel.sport_mask = htons(0xffff); 2541 sel.sport_mask = htons(0xffff);
2554 2542
2555 /* set destination address info of selector */ 2543 /* set destination address info of selector */
2556 sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1], 2544 sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
2557 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); 2545 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2558 sel.prefixlen_d = sa->sadb_address_prefixlen; 2546 sel.prefixlen_d = sa->sadb_address_prefixlen;
2559 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2547 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2622,9 +2610,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
2622 return -ENOENT; 2610 return -ENOENT;
2623 2611
2624 if (delete) { 2612 if (delete) {
2625 xfrm_audit_policy_delete(xp, err ? 0 : 1, 2613 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2626 audit_get_loginuid(current),
2627 audit_get_sessionid(current), 0);
2628 2614
2629 if (err) 2615 if (err)
2630 goto out; 2616 goto out;
@@ -2733,13 +2719,9 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
2733{ 2719{
2734 struct net *net = sock_net(sk); 2720 struct net *net = sock_net(sk);
2735 struct km_event c; 2721 struct km_event c;
2736 struct xfrm_audit audit_info;
2737 int err, err2; 2722 int err, err2;
2738 2723
2739 audit_info.loginuid = audit_get_loginuid(current); 2724 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
2740 audit_info.sessionid = audit_get_sessionid(current);
2741 audit_info.secid = 0;
2742 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2743 err2 = unicast_flush_resp(sk, hdr); 2725 err2 = unicast_flush_resp(sk, hdr);
2744 if (err || err2) { 2726 if (err || err2) {
2745 if (err == -ESRCH) /* empty table - old silent behavior */ 2727 if (err == -ESRCH) /* empty table - old silent behavior */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index a4e37d7158dc..bea259043205 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -495,52 +495,6 @@ out:
495 spin_unlock_bh(&session->reorder_q.lock); 495 spin_unlock_bh(&session->reorder_q.lock);
496} 496}
497 497
498static inline int l2tp_verify_udp_checksum(struct sock *sk,
499 struct sk_buff *skb)
500{
501 struct udphdr *uh = udp_hdr(skb);
502 u16 ulen = ntohs(uh->len);
503 __wsum psum;
504
505 if (sk->sk_no_check || skb_csum_unnecessary(skb))
506 return 0;
507
508#if IS_ENABLED(CONFIG_IPV6)
509 if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
510 if (!uh->check) {
511 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
512 return 1;
513 }
514 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
515 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
516 &ipv6_hdr(skb)->daddr, ulen,
517 IPPROTO_UDP, skb->csum)) {
518 skb->ip_summed = CHECKSUM_UNNECESSARY;
519 return 0;
520 }
521 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
522 &ipv6_hdr(skb)->daddr,
523 skb->len, IPPROTO_UDP,
524 0));
525 } else
526#endif
527 {
528 struct inet_sock *inet;
529 if (!uh->check)
530 return 0;
531 inet = inet_sk(sk);
532 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
533 ulen, IPPROTO_UDP, 0);
534
535 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
536 !csum_fold(csum_add(psum, skb->csum)))
537 return 0;
538 skb->csum = psum;
539 }
540
541 return __skb_checksum_complete(skb);
542}
543
544static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr) 498static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
545{ 499{
546 u32 nws; 500 u32 nws;
@@ -895,8 +849,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
895 u16 version; 849 u16 version;
896 int length; 850 int length;
897 851
898 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 852 /* UDP has verifed checksum */
899 goto discard_bad_csum;
900 853
901 /* UDP always verifies the packet length. */ 854 /* UDP always verifies the packet length. */
902 __skb_pull(skb, sizeof(struct udphdr)); 855 __skb_pull(skb, sizeof(struct udphdr));
@@ -979,14 +932,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
979 932
980 return 0; 933 return 0;
981 934
982discard_bad_csum:
983 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
984 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
985 atomic_long_inc(&tunnel->stats.rx_errors);
986 kfree_skb(skb);
987
988 return 0;
989
990error: 935error:
991 /* Put UDP header back */ 936 /* Put UDP header back */
992 __skb_push(skb, sizeof(struct udphdr)); 937 __skb_push(skb, sizeof(struct udphdr));
@@ -1128,7 +1073,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1128 } 1073 }
1129 1074
1130 /* Queue the packet to IP for output */ 1075 /* Queue the packet to IP for output */
1131 skb->local_df = 1; 1076 skb->ignore_df = 1;
1132#if IS_ENABLED(CONFIG_IPV6) 1077#if IS_ENABLED(CONFIG_IPV6)
1133 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) 1078 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
1134 error = inet6_csk_xmit(tunnel->sock, skb, NULL); 1079 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1150,31 +1095,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1150 return 0; 1095 return 0;
1151} 1096}
1152 1097
1153#if IS_ENABLED(CONFIG_IPV6)
1154static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1155 int udp_len)
1156{
1157 struct ipv6_pinfo *np = inet6_sk(sk);
1158 struct udphdr *uh = udp_hdr(skb);
1159
1160 if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1161 !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1162 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1164 uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
1165 IPPROTO_UDP, csum);
1166 if (uh->check == 0)
1167 uh->check = CSUM_MANGLED_0;
1168 } else {
1169 skb->ip_summed = CHECKSUM_PARTIAL;
1170 skb->csum_start = skb_transport_header(skb) - skb->head;
1171 skb->csum_offset = offsetof(struct udphdr, check);
1172 uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
1173 udp_len, IPPROTO_UDP, 0);
1174 }
1175}
1176#endif
1177
1178/* If caller requires the skb to have a ppp header, the header must be 1098/* If caller requires the skb to have a ppp header, the header must be
1179 * inserted in the skb data before calling this function. 1099 * inserted in the skb data before calling this function.
1180 */ 1100 */
@@ -1186,7 +1106,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1186 struct flowi *fl; 1106 struct flowi *fl;
1187 struct udphdr *uh; 1107 struct udphdr *uh;
1188 struct inet_sock *inet; 1108 struct inet_sock *inet;
1189 __wsum csum;
1190 int headroom; 1109 int headroom;
1191 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1110 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1192 int udp_len; 1111 int udp_len;
@@ -1235,33 +1154,17 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1235 uh->dest = inet->inet_dport; 1154 uh->dest = inet->inet_dport;
1236 udp_len = uhlen + hdr_len + data_len; 1155 udp_len = uhlen + hdr_len + data_len;
1237 uh->len = htons(udp_len); 1156 uh->len = htons(udp_len);
1238 uh->check = 0;
1239 1157
1240 /* Calculate UDP checksum if configured to do so */ 1158 /* Calculate UDP checksum if configured to do so */
1241#if IS_ENABLED(CONFIG_IPV6) 1159#if IS_ENABLED(CONFIG_IPV6)
1242 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1160 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1243 l2tp_xmit_ipv6_csum(sk, skb, udp_len); 1161 udp6_set_csum(udp_get_no_check6_tx(sk),
1162 skb, &inet6_sk(sk)->saddr,
1163 &sk->sk_v6_daddr, udp_len);
1244 else 1164 else
1245#endif 1165#endif
1246 if (sk->sk_no_check == UDP_CSUM_NOXMIT) 1166 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1247 skb->ip_summed = CHECKSUM_NONE; 1167 inet->inet_daddr, udp_len);
1248 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1249 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1250 skb->ip_summed = CHECKSUM_COMPLETE;
1251 csum = skb_checksum(skb, 0, udp_len, 0);
1252 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1253 inet->inet_daddr,
1254 udp_len, IPPROTO_UDP, csum);
1255 if (uh->check == 0)
1256 uh->check = CSUM_MANGLED_0;
1257 } else {
1258 skb->ip_summed = CHECKSUM_PARTIAL;
1259 skb->csum_start = skb_transport_header(skb) - skb->head;
1260 skb->csum_offset = offsetof(struct udphdr, check);
1261 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1262 inet->inet_daddr,
1263 udp_len, IPPROTO_UDP, 0);
1264 }
1265 break; 1168 break;
1266 1169
1267 case L2TP_ENCAPTYPE_IP: 1170 case L2TP_ENCAPTYPE_IP:
@@ -1490,6 +1393,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
1490 sizeof(udp6_addr), 0); 1393 sizeof(udp6_addr), 0);
1491 if (err < 0) 1394 if (err < 0)
1492 goto out; 1395 goto out;
1396
1397 if (cfg->udp6_zero_tx_checksums)
1398 udp_set_no_check6_tx(sock->sk, true);
1399 if (cfg->udp6_zero_rx_checksums)
1400 udp_set_no_check6_rx(sock->sk, true);
1493 } else 1401 } else
1494#endif 1402#endif
1495 { 1403 {
@@ -1518,7 +1426,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
1518 } 1426 }
1519 1427
1520 if (!cfg->use_udp_checksums) 1428 if (!cfg->use_udp_checksums)
1521 sock->sk->sk_no_check = UDP_CSUM_NOXMIT; 1429 sock->sk->sk_no_check_tx = 1;
1522 1430
1523 break; 1431 break;
1524 1432
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 3f93ccd6ba97..68aa9ffd4ae4 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -162,7 +162,9 @@ struct l2tp_tunnel_cfg {
162#endif 162#endif
163 u16 local_udp_port; 163 u16 local_udp_port;
164 u16 peer_udp_port; 164 u16 peer_udp_port;
165 unsigned int use_udp_checksums:1; 165 unsigned int use_udp_checksums:1,
166 udp6_zero_tx_checksums:1,
167 udp6_zero_rx_checksums:1;
166}; 168};
167 169
168struct l2tp_tunnel { 170struct l2tp_tunnel {
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 3397fe6897c0..369a9822488c 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -606,7 +606,6 @@ static struct inet_protosw l2tp_ip_protosw = {
606 .protocol = IPPROTO_L2TP, 606 .protocol = IPPROTO_L2TP,
607 .prot = &l2tp_ip_prot, 607 .prot = &l2tp_ip_prot,
608 .ops = &l2tp_ip_ops, 608 .ops = &l2tp_ip_ops,
609 .no_check = 0,
610}; 609};
611 610
612static struct net_protocol l2tp_ip_protocol __read_mostly = { 611static struct net_protocol l2tp_ip_protocol __read_mostly = {
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 7704ea9502fd..f3f98a156cee 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -605,14 +605,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
605 goto out; 605 goto out;
606 } 606 }
607 607
608 if (hlimit < 0) { 608 if (hlimit < 0)
609 if (ipv6_addr_is_multicast(&fl6.daddr)) 609 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
610 hlimit = np->mcast_hops;
611 else
612 hlimit = np->hop_limit;
613 if (hlimit < 0)
614 hlimit = ip6_dst_hoplimit(dst);
615 }
616 610
617 if (tclass < 0) 611 if (tclass < 0)
618 tclass = np->tclass; 612 tclass = np->tclass;
@@ -761,7 +755,6 @@ static struct inet_protosw l2tp_ip6_protosw = {
761 .protocol = IPPROTO_L2TP, 755 .protocol = IPPROTO_L2TP,
762 .prot = &l2tp_ip6_prot, 756 .prot = &l2tp_ip6_prot,
763 .ops = &l2tp_ip6_ops, 757 .ops = &l2tp_ip6_ops,
764 .no_check = 0,
765}; 758};
766 759
767static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { 760static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index bd7387adea9e..0ac907adb2f4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -161,6 +161,13 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
161 cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); 161 cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
162 if (info->attrs[L2TP_ATTR_UDP_CSUM]) 162 if (info->attrs[L2TP_ATTR_UDP_CSUM])
163 cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); 163 cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
164
165#if IS_ENABLED(CONFIG_IPV6)
166 if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX])
167 cfg.udp6_zero_tx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
168 if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX])
169 cfg.udp6_zero_rx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
170#endif
164 } 171 }
165 172
166 if (info->attrs[L2TP_ATTR_DEBUG]) 173 if (info->attrs[L2TP_ATTR_DEBUG])
@@ -297,8 +304,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
297 case L2TP_ENCAPTYPE_UDP: 304 case L2TP_ENCAPTYPE_UDP:
298 if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || 305 if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
299 nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || 306 nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
300 nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, 307 nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx))
301 (sk->sk_no_check != UDP_CSUM_NOXMIT)))
302 goto nla_put_failure; 308 goto nla_put_failure;
303 /* NOBREAK */ 309 /* NOBREAK */
304 case L2TP_ENCAPTYPE_IP: 310 case L2TP_ENCAPTYPE_IP:
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 9d7d840aac6d..1e46ffa69167 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -25,7 +25,8 @@ mac80211-y := \
25 wme.o \ 25 wme.o \
26 event.o \ 26 event.o \
27 chan.o \ 27 chan.o \
28 trace.o mlme.o 28 trace.o mlme.o \
29 tdls.o
29 30
30mac80211-$(CONFIG_MAC80211_LEDS) += led.o 31mac80211-$(CONFIG_MAC80211_LEDS) += led.o
31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 32mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7c7df475a401..ec24378caaaf 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -23,12 +23,13 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
23 u8 *data, size_t data_len, u8 *mic) 23 u8 *data, size_t data_len, u8 *mic)
24{ 24{
25 struct scatterlist assoc, pt, ct[2]; 25 struct scatterlist assoc, pt, ct[2];
26 struct {
27 struct aead_request req;
28 u8 priv[crypto_aead_reqsize(tfm)];
29 } aead_req;
30 26
31 memset(&aead_req, 0, sizeof(aead_req)); 27 char aead_req_data[sizeof(struct aead_request) +
28 crypto_aead_reqsize(tfm)]
29 __aligned(__alignof__(struct aead_request));
30 struct aead_request *aead_req = (void *) aead_req_data;
31
32 memset(aead_req, 0, sizeof(aead_req_data));
32 33
33 sg_init_one(&pt, data, data_len); 34 sg_init_one(&pt, data, data_len);
34 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); 35 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -36,23 +37,23 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
36 sg_set_buf(&ct[0], data, data_len); 37 sg_set_buf(&ct[0], data, data_len);
37 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN); 38 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
38 39
39 aead_request_set_tfm(&aead_req.req, tfm); 40 aead_request_set_tfm(aead_req, tfm);
40 aead_request_set_assoc(&aead_req.req, &assoc, assoc.length); 41 aead_request_set_assoc(aead_req, &assoc, assoc.length);
41 aead_request_set_crypt(&aead_req.req, &pt, ct, data_len, b_0); 42 aead_request_set_crypt(aead_req, &pt, ct, data_len, b_0);
42 43
43 crypto_aead_encrypt(&aead_req.req); 44 crypto_aead_encrypt(aead_req);
44} 45}
45 46
46int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 47int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
47 u8 *data, size_t data_len, u8 *mic) 48 u8 *data, size_t data_len, u8 *mic)
48{ 49{
49 struct scatterlist assoc, pt, ct[2]; 50 struct scatterlist assoc, pt, ct[2];
50 struct { 51 char aead_req_data[sizeof(struct aead_request) +
51 struct aead_request req; 52 crypto_aead_reqsize(tfm)]
52 u8 priv[crypto_aead_reqsize(tfm)]; 53 __aligned(__alignof__(struct aead_request));
53 } aead_req; 54 struct aead_request *aead_req = (void *) aead_req_data;
54 55
55 memset(&aead_req, 0, sizeof(aead_req)); 56 memset(aead_req, 0, sizeof(aead_req_data));
56 57
57 sg_init_one(&pt, data, data_len); 58 sg_init_one(&pt, data, data_len);
58 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); 59 sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -60,12 +61,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
60 sg_set_buf(&ct[0], data, data_len); 61 sg_set_buf(&ct[0], data, data_len);
61 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN); 62 sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
62 63
63 aead_request_set_tfm(&aead_req.req, tfm); 64 aead_request_set_tfm(aead_req, tfm);
64 aead_request_set_assoc(&aead_req.req, &assoc, assoc.length); 65 aead_request_set_assoc(aead_req, &assoc, assoc.length);
65 aead_request_set_crypt(&aead_req.req, ct, &pt, 66 aead_request_set_crypt(aead_req, ct, &pt,
66 data_len + IEEE80211_CCMP_MIC_LEN, b_0); 67 data_len + IEEE80211_CCMP_MIC_LEN, b_0);
67 68
68 return crypto_aead_decrypt(&aead_req.req); 69 return crypto_aead_decrypt(aead_req);
69} 70}
70 71
71struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]) 72struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index aaa59d719592..d7513a503be1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -109,6 +109,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
109static int ieee80211_start_p2p_device(struct wiphy *wiphy, 109static int ieee80211_start_p2p_device(struct wiphy *wiphy,
110 struct wireless_dev *wdev) 110 struct wireless_dev *wdev)
111{ 111{
112 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
113 int ret;
114
115 mutex_lock(&sdata->local->chanctx_mtx);
116 ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
117 mutex_unlock(&sdata->local->chanctx_mtx);
118 if (ret < 0)
119 return ret;
120
112 return ieee80211_do_open(wdev, true); 121 return ieee80211_do_open(wdev, true);
113} 122}
114 123
@@ -463,8 +472,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
463{ 472{
464 struct ieee80211_sub_if_data *sdata = sta->sdata; 473 struct ieee80211_sub_if_data *sdata = sta->sdata;
465 struct ieee80211_local *local = sdata->local; 474 struct ieee80211_local *local = sdata->local;
475 struct rate_control_ref *ref = local->rate_ctrl;
466 struct timespec uptime; 476 struct timespec uptime;
467 u64 packets = 0; 477 u64 packets = 0;
478 u32 thr = 0;
468 int i, ac; 479 int i, ac;
469 480
470 sinfo->generation = sdata->local->sta_generation; 481 sinfo->generation = sdata->local->sta_generation;
@@ -578,6 +589,17 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
578 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 589 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
579 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 590 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
580 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 591 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
592
593 /* check if the driver has a SW RC implementation */
594 if (ref && ref->ops->get_expected_throughput)
595 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
596 else
597 thr = drv_get_expected_throughput(local, &sta->sta);
598
599 if (thr != 0) {
600 sinfo->filled |= STATION_INFO_EXPECTED_THROUGHPUT;
601 sinfo->expected_throughput = thr;
602 }
581} 603}
582 604
583static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { 605static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
@@ -768,7 +790,7 @@ static void ieee80211_get_et_strings(struct wiphy *wiphy,
768} 790}
769 791
770static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, 792static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
771 int idx, u8 *mac, struct station_info *sinfo) 793 int idx, u8 *mac, struct station_info *sinfo)
772{ 794{
773 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 795 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
774 struct ieee80211_local *local = sdata->local; 796 struct ieee80211_local *local = sdata->local;
@@ -798,7 +820,7 @@ static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
798} 820}
799 821
800static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 822static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
801 u8 *mac, struct station_info *sinfo) 823 const u8 *mac, struct station_info *sinfo)
802{ 824{
803 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 825 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
804 struct ieee80211_local *local = sdata->local; 826 struct ieee80211_local *local = sdata->local;
@@ -972,13 +994,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
972 sdata->needed_rx_chains = sdata->local->rx_chains; 994 sdata->needed_rx_chains = sdata->local->rx_chains;
973 995
974 mutex_lock(&local->mtx); 996 mutex_lock(&local->mtx);
975 sdata->radar_required = params->radar_required;
976 err = ieee80211_vif_use_channel(sdata, &params->chandef, 997 err = ieee80211_vif_use_channel(sdata, &params->chandef,
977 IEEE80211_CHANCTX_SHARED); 998 IEEE80211_CHANCTX_SHARED);
999 if (!err)
1000 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
978 mutex_unlock(&local->mtx); 1001 mutex_unlock(&local->mtx);
979 if (err) 1002 if (err)
980 return err; 1003 return err;
981 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
982 1004
983 /* 1005 /*
984 * Apply control port protocol, this allows us to 1006 * Apply control port protocol, this allows us to
@@ -1075,6 +1097,31 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
1075 return 0; 1097 return 0;
1076} 1098}
1077 1099
1100bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local)
1101{
1102 struct ieee80211_sub_if_data *sdata;
1103
1104 lockdep_assert_held(&local->mtx);
1105
1106 rcu_read_lock();
1107 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1108 if (!ieee80211_sdata_running(sdata))
1109 continue;
1110
1111 if (!sdata->vif.csa_active)
1112 continue;
1113
1114 if (!sdata->csa_block_tx)
1115 continue;
1116
1117 rcu_read_unlock();
1118 return true;
1119 }
1120 rcu_read_unlock();
1121
1122 return false;
1123}
1124
1078static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) 1125static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1079{ 1126{
1080 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1127 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1092,7 +1139,14 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1092 old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata); 1139 old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata);
1093 1140
1094 /* abort any running channel switch */ 1141 /* abort any running channel switch */
1142 mutex_lock(&local->mtx);
1095 sdata->vif.csa_active = false; 1143 sdata->vif.csa_active = false;
1144 if (!ieee80211_csa_needs_block_tx(local))
1145 ieee80211_wake_queues_by_reason(&local->hw,
1146 IEEE80211_MAX_QUEUE_MAP,
1147 IEEE80211_QUEUE_STOP_REASON_CSA);
1148 mutex_unlock(&local->mtx);
1149
1096 kfree(sdata->u.ap.next_beacon); 1150 kfree(sdata->u.ap.next_beacon);
1097 sdata->u.ap.next_beacon = NULL; 1151 sdata->u.ap.next_beacon = NULL;
1098 1152
@@ -1131,8 +1185,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1131 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 1185 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
1132 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 1186 skb_queue_purge(&sdata->u.ap.ps.bc_buf);
1133 1187
1134 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1135 mutex_lock(&local->mtx); 1188 mutex_lock(&local->mtx);
1189 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1136 ieee80211_vif_release_channel(sdata); 1190 ieee80211_vif_release_channel(sdata);
1137 mutex_unlock(&local->mtx); 1191 mutex_unlock(&local->mtx);
1138 1192
@@ -1416,7 +1470,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1416} 1470}
1417 1471
1418static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, 1472static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1419 u8 *mac, struct station_parameters *params) 1473 const u8 *mac,
1474 struct station_parameters *params)
1420{ 1475{
1421 struct ieee80211_local *local = wiphy_priv(wiphy); 1476 struct ieee80211_local *local = wiphy_priv(wiphy);
1422 struct sta_info *sta; 1477 struct sta_info *sta;
@@ -1450,6 +1505,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1450 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) { 1505 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
1451 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 1506 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
1452 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 1507 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
1508 } else {
1509 sta->sta.tdls = true;
1453 } 1510 }
1454 1511
1455 err = sta_apply_parameters(local, sta, params); 1512 err = sta_apply_parameters(local, sta, params);
@@ -1483,7 +1540,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1483} 1540}
1484 1541
1485static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, 1542static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1486 u8 *mac) 1543 const u8 *mac)
1487{ 1544{
1488 struct ieee80211_sub_if_data *sdata; 1545 struct ieee80211_sub_if_data *sdata;
1489 1546
@@ -1497,7 +1554,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1497} 1554}
1498 1555
1499static int ieee80211_change_station(struct wiphy *wiphy, 1556static int ieee80211_change_station(struct wiphy *wiphy,
1500 struct net_device *dev, u8 *mac, 1557 struct net_device *dev, const u8 *mac,
1501 struct station_parameters *params) 1558 struct station_parameters *params)
1502{ 1559{
1503 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1560 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1566,7 +1623,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1566 1623
1567 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1624 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1568 sta->sdata->u.vlan.sta) { 1625 sta->sdata->u.vlan.sta) {
1569 rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL); 1626 RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
1570 prev_4addr = true; 1627 prev_4addr = true;
1571 } 1628 }
1572 1629
@@ -1622,7 +1679,7 @@ out_err:
1622 1679
1623#ifdef CONFIG_MAC80211_MESH 1680#ifdef CONFIG_MAC80211_MESH
1624static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, 1681static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
1625 u8 *dst, u8 *next_hop) 1682 const u8 *dst, const u8 *next_hop)
1626{ 1683{
1627 struct ieee80211_sub_if_data *sdata; 1684 struct ieee80211_sub_if_data *sdata;
1628 struct mesh_path *mpath; 1685 struct mesh_path *mpath;
@@ -1650,7 +1707,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
1650} 1707}
1651 1708
1652static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, 1709static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
1653 u8 *dst) 1710 const u8 *dst)
1654{ 1711{
1655 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1712 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1656 1713
@@ -1661,9 +1718,8 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
1661 return 0; 1718 return 0;
1662} 1719}
1663 1720
1664static int ieee80211_change_mpath(struct wiphy *wiphy, 1721static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev,
1665 struct net_device *dev, 1722 const u8 *dst, const u8 *next_hop)
1666 u8 *dst, u8 *next_hop)
1667{ 1723{
1668 struct ieee80211_sub_if_data *sdata; 1724 struct ieee80211_sub_if_data *sdata;
1669 struct mesh_path *mpath; 1725 struct mesh_path *mpath;
@@ -1755,8 +1811,8 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
1755} 1811}
1756 1812
1757static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, 1813static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1758 int idx, u8 *dst, u8 *next_hop, 1814 int idx, u8 *dst, u8 *next_hop,
1759 struct mpath_info *pinfo) 1815 struct mpath_info *pinfo)
1760{ 1816{
1761 struct ieee80211_sub_if_data *sdata; 1817 struct ieee80211_sub_if_data *sdata;
1762 struct mesh_path *mpath; 1818 struct mesh_path *mpath;
@@ -2930,7 +2986,6 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
2930 /* whatever, but channel contexts should not complain about that one */ 2986 /* whatever, but channel contexts should not complain about that one */
2931 sdata->smps_mode = IEEE80211_SMPS_OFF; 2987 sdata->smps_mode = IEEE80211_SMPS_OFF;
2932 sdata->needed_rx_chains = local->rx_chains; 2988 sdata->needed_rx_chains = local->rx_chains;
2933 sdata->radar_required = true;
2934 2989
2935 err = ieee80211_vif_use_channel(sdata, chandef, 2990 err = ieee80211_vif_use_channel(sdata, chandef,
2936 IEEE80211_CHANCTX_SHARED); 2991 IEEE80211_CHANCTX_SHARED);
@@ -3011,26 +3066,11 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
3011} 3066}
3012EXPORT_SYMBOL(ieee80211_csa_finish); 3067EXPORT_SYMBOL(ieee80211_csa_finish);
3013 3068
3014static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) 3069static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
3070 u32 *changed)
3015{ 3071{
3016 struct ieee80211_local *local = sdata->local; 3072 int err;
3017 int err, changed = 0;
3018
3019 sdata_assert_lock(sdata);
3020
3021 mutex_lock(&local->mtx);
3022 sdata->radar_required = sdata->csa_radar_required;
3023 err = ieee80211_vif_change_channel(sdata, &changed);
3024 mutex_unlock(&local->mtx);
3025 if (WARN_ON(err < 0))
3026 return;
3027
3028 if (!local->use_chanctx) {
3029 local->_oper_chandef = sdata->csa_chandef;
3030 ieee80211_hw_config(local, 0);
3031 }
3032 3073
3033 sdata->vif.csa_active = false;
3034 switch (sdata->vif.type) { 3074 switch (sdata->vif.type) {
3035 case NL80211_IFTYPE_AP: 3075 case NL80211_IFTYPE_AP:
3036 err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon); 3076 err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
@@ -3038,35 +3078,74 @@ static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
3038 sdata->u.ap.next_beacon = NULL; 3078 sdata->u.ap.next_beacon = NULL;
3039 3079
3040 if (err < 0) 3080 if (err < 0)
3041 return; 3081 return err;
3042 changed |= err; 3082 *changed |= err;
3043 break; 3083 break;
3044 case NL80211_IFTYPE_ADHOC: 3084 case NL80211_IFTYPE_ADHOC:
3045 err = ieee80211_ibss_finish_csa(sdata); 3085 err = ieee80211_ibss_finish_csa(sdata);
3046 if (err < 0) 3086 if (err < 0)
3047 return; 3087 return err;
3048 changed |= err; 3088 *changed |= err;
3049 break; 3089 break;
3050#ifdef CONFIG_MAC80211_MESH 3090#ifdef CONFIG_MAC80211_MESH
3051 case NL80211_IFTYPE_MESH_POINT: 3091 case NL80211_IFTYPE_MESH_POINT:
3052 err = ieee80211_mesh_finish_csa(sdata); 3092 err = ieee80211_mesh_finish_csa(sdata);
3053 if (err < 0) 3093 if (err < 0)
3054 return; 3094 return err;
3055 changed |= err; 3095 *changed |= err;
3056 break; 3096 break;
3057#endif 3097#endif
3058 default: 3098 default:
3059 WARN_ON(1); 3099 WARN_ON(1);
3060 return; 3100 return -EINVAL;
3061 } 3101 }
3062 3102
3103 return 0;
3104}
3105
3106static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
3107{
3108 struct ieee80211_local *local = sdata->local;
3109 u32 changed = 0;
3110 int err;
3111
3112 sdata_assert_lock(sdata);
3113 lockdep_assert_held(&local->mtx);
3114
3115 sdata->radar_required = sdata->csa_radar_required;
3116 err = ieee80211_vif_change_channel(sdata, &changed);
3117 if (err < 0)
3118 return err;
3119
3120 if (!local->use_chanctx) {
3121 local->_oper_chandef = sdata->csa_chandef;
3122 ieee80211_hw_config(local, 0);
3123 }
3124
3125 sdata->vif.csa_active = false;
3126
3127 err = ieee80211_set_after_csa_beacon(sdata, &changed);
3128 if (err)
3129 return err;
3130
3063 ieee80211_bss_info_change_notify(sdata, changed); 3131 ieee80211_bss_info_change_notify(sdata, changed);
3132 cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
3064 3133
3065 ieee80211_wake_queues_by_reason(&sdata->local->hw, 3134 if (!ieee80211_csa_needs_block_tx(local))
3135 ieee80211_wake_queues_by_reason(&local->hw,
3066 IEEE80211_MAX_QUEUE_MAP, 3136 IEEE80211_MAX_QUEUE_MAP,
3067 IEEE80211_QUEUE_STOP_REASON_CSA); 3137 IEEE80211_QUEUE_STOP_REASON_CSA);
3068 3138
3069 cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef); 3139 return 0;
3140}
3141
3142static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
3143{
3144 if (__ieee80211_csa_finalize(sdata)) {
3145 sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
3146 cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
3147 GFP_KERNEL);
3148 }
3070} 3149}
3071 3150
3072void ieee80211_csa_finalize_work(struct work_struct *work) 3151void ieee80211_csa_finalize_work(struct work_struct *work)
@@ -3074,8 +3153,11 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
3074 struct ieee80211_sub_if_data *sdata = 3153 struct ieee80211_sub_if_data *sdata =
3075 container_of(work, struct ieee80211_sub_if_data, 3154 container_of(work, struct ieee80211_sub_if_data,
3076 csa_finalize_work); 3155 csa_finalize_work);
3156 struct ieee80211_local *local = sdata->local;
3077 3157
3078 sdata_lock(sdata); 3158 sdata_lock(sdata);
3159 mutex_lock(&local->mtx);
3160
3079 /* AP might have been stopped while waiting for the lock. */ 3161 /* AP might have been stopped while waiting for the lock. */
3080 if (!sdata->vif.csa_active) 3162 if (!sdata->vif.csa_active)
3081 goto unlock; 3163 goto unlock;
@@ -3086,6 +3168,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
3086 ieee80211_csa_finalize(sdata); 3168 ieee80211_csa_finalize(sdata);
3087 3169
3088unlock: 3170unlock:
3171 mutex_unlock(&local->mtx);
3089 sdata_unlock(sdata); 3172 sdata_unlock(sdata);
3090} 3173}
3091 3174
@@ -3121,9 +3204,25 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
3121 if (params->count <= 1) 3204 if (params->count <= 1)
3122 break; 3205 break;
3123 3206
3124 sdata->csa_counter_offset_beacon = 3207 if ((params->n_counter_offsets_beacon >
3125 params->counter_offset_beacon; 3208 IEEE80211_MAX_CSA_COUNTERS_NUM) ||
3126 sdata->csa_counter_offset_presp = params->counter_offset_presp; 3209 (params->n_counter_offsets_presp >
3210 IEEE80211_MAX_CSA_COUNTERS_NUM))
3211 return -EINVAL;
3212
3213 /* make sure we don't have garbage in other counters */
3214 memset(sdata->csa_counter_offset_beacon, 0,
3215 sizeof(sdata->csa_counter_offset_beacon));
3216 memset(sdata->csa_counter_offset_presp, 0,
3217 sizeof(sdata->csa_counter_offset_presp));
3218
3219 memcpy(sdata->csa_counter_offset_beacon,
3220 params->counter_offsets_beacon,
3221 params->n_counter_offsets_beacon * sizeof(u16));
3222 memcpy(sdata->csa_counter_offset_presp,
3223 params->counter_offsets_presp,
3224 params->n_counter_offsets_presp * sizeof(u16));
3225
3127 err = ieee80211_assign_beacon(sdata, &params->beacon_csa); 3226 err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
3128 if (err < 0) { 3227 if (err < 0) {
3129 kfree(sdata->u.ap.next_beacon); 3228 kfree(sdata->u.ap.next_beacon);
@@ -3212,16 +3311,18 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
3212 return 0; 3311 return 0;
3213} 3312}
3214 3313
3215int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, 3314static int
3216 struct cfg80211_csa_settings *params) 3315__ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3316 struct cfg80211_csa_settings *params)
3217{ 3317{
3218 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3318 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3219 struct ieee80211_local *local = sdata->local; 3319 struct ieee80211_local *local = sdata->local;
3220 struct ieee80211_chanctx_conf *chanctx_conf; 3320 struct ieee80211_chanctx_conf *conf;
3221 struct ieee80211_chanctx *chanctx; 3321 struct ieee80211_chanctx *chanctx;
3222 int err, num_chanctx, changed = 0; 3322 int err, num_chanctx, changed = 0;
3223 3323
3224 sdata_assert_lock(sdata); 3324 sdata_assert_lock(sdata);
3325 lockdep_assert_held(&local->mtx);
3225 3326
3226 if (!list_empty(&local->roc_list) || local->scanning) 3327 if (!list_empty(&local->roc_list) || local->scanning)
3227 return -EBUSY; 3328 return -EBUSY;
@@ -3233,23 +3334,24 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3233 &sdata->vif.bss_conf.chandef)) 3334 &sdata->vif.bss_conf.chandef))
3234 return -EINVAL; 3335 return -EINVAL;
3235 3336
3236 rcu_read_lock(); 3337 mutex_lock(&local->chanctx_mtx);
3237 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3338 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
3238 if (!chanctx_conf) { 3339 lockdep_is_held(&local->chanctx_mtx));
3239 rcu_read_unlock(); 3340 if (!conf) {
3341 mutex_unlock(&local->chanctx_mtx);
3240 return -EBUSY; 3342 return -EBUSY;
3241 } 3343 }
3242 3344
3243 /* don't handle for multi-VIF cases */ 3345 /* don't handle for multi-VIF cases */
3244 chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); 3346 chanctx = container_of(conf, struct ieee80211_chanctx, conf);
3245 if (chanctx->refcount > 1) { 3347 if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
3246 rcu_read_unlock(); 3348 mutex_unlock(&local->chanctx_mtx);
3247 return -EBUSY; 3349 return -EBUSY;
3248 } 3350 }
3249 num_chanctx = 0; 3351 num_chanctx = 0;
3250 list_for_each_entry_rcu(chanctx, &local->chanctx_list, list) 3352 list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
3251 num_chanctx++; 3353 num_chanctx++;
3252 rcu_read_unlock(); 3354 mutex_unlock(&local->chanctx_mtx);
3253 3355
3254 if (num_chanctx > 1) 3356 if (num_chanctx > 1)
3255 return -EBUSY; 3357 return -EBUSY;
@@ -3263,15 +3365,16 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3263 return err; 3365 return err;
3264 3366
3265 sdata->csa_radar_required = params->radar_required; 3367 sdata->csa_radar_required = params->radar_required;
3266
3267 if (params->block_tx)
3268 ieee80211_stop_queues_by_reason(&local->hw,
3269 IEEE80211_MAX_QUEUE_MAP,
3270 IEEE80211_QUEUE_STOP_REASON_CSA);
3271
3272 sdata->csa_chandef = params->chandef; 3368 sdata->csa_chandef = params->chandef;
3369 sdata->csa_block_tx = params->block_tx;
3370 sdata->csa_current_counter = params->count;
3273 sdata->vif.csa_active = true; 3371 sdata->vif.csa_active = true;
3274 3372
3373 if (sdata->csa_block_tx)
3374 ieee80211_stop_queues_by_reason(&local->hw,
3375 IEEE80211_MAX_QUEUE_MAP,
3376 IEEE80211_QUEUE_STOP_REASON_CSA);
3377
3275 if (changed) { 3378 if (changed) {
3276 ieee80211_bss_info_change_notify(sdata, changed); 3379 ieee80211_bss_info_change_notify(sdata, changed);
3277 drv_channel_switch_beacon(sdata, &params->chandef); 3380 drv_channel_switch_beacon(sdata, &params->chandef);
@@ -3283,6 +3386,20 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3283 return 0; 3386 return 0;
3284} 3387}
3285 3388
3389int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3390 struct cfg80211_csa_settings *params)
3391{
3392 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3393 struct ieee80211_local *local = sdata->local;
3394 int err;
3395
3396 mutex_lock(&local->mtx);
3397 err = __ieee80211_channel_switch(wiphy, dev, params);
3398 mutex_unlock(&local->mtx);
3399
3400 return err;
3401}
3402
3286static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 3403static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3287 struct cfg80211_mgmt_tx_params *params, 3404 struct cfg80211_mgmt_tx_params *params,
3288 u64 *cookie) 3405 u64 *cookie)
@@ -3295,6 +3412,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3295 bool need_offchan = false; 3412 bool need_offchan = false;
3296 u32 flags; 3413 u32 flags;
3297 int ret; 3414 int ret;
3415 u8 *data;
3298 3416
3299 if (params->dont_wait_for_ack) 3417 if (params->dont_wait_for_ack)
3300 flags = IEEE80211_TX_CTL_NO_ACK; 3418 flags = IEEE80211_TX_CTL_NO_ACK;
@@ -3388,7 +3506,20 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3388 } 3506 }
3389 skb_reserve(skb, local->hw.extra_tx_headroom); 3507 skb_reserve(skb, local->hw.extra_tx_headroom);
3390 3508
3391 memcpy(skb_put(skb, params->len), params->buf, params->len); 3509 data = skb_put(skb, params->len);
3510 memcpy(data, params->buf, params->len);
3511
3512 /* Update CSA counters */
3513 if (sdata->vif.csa_active &&
3514 (sdata->vif.type == NL80211_IFTYPE_AP ||
3515 sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
3516 params->n_csa_offsets) {
3517 int i;
3518 u8 c = sdata->csa_current_counter;
3519
3520 for (i = 0; i < params->n_csa_offsets; i++)
3521 data[params->csa_offsets[i]] = c;
3522 }
3392 3523
3393 IEEE80211_SKB_CB(skb)->flags = flags; 3524 IEEE80211_SKB_CB(skb)->flags = flags;
3394 3525
@@ -3497,320 +3628,6 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
3497 return 0; 3628 return 0;
3498} 3629}
3499 3630
3500static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
3501{
3502 u8 *pos = (void *)skb_put(skb, 7);
3503
3504 *pos++ = WLAN_EID_EXT_CAPABILITY;
3505 *pos++ = 5; /* len */
3506 *pos++ = 0x0;
3507 *pos++ = 0x0;
3508 *pos++ = 0x0;
3509 *pos++ = 0x0;
3510 *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
3511}
3512
3513static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
3514{
3515 struct ieee80211_local *local = sdata->local;
3516 u16 capab;
3517
3518 capab = 0;
3519 if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
3520 return capab;
3521
3522 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
3523 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
3524 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
3525 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
3526
3527 return capab;
3528}
3529
3530static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
3531 u8 *peer, u8 *bssid)
3532{
3533 struct ieee80211_tdls_lnkie *lnkid;
3534
3535 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
3536
3537 lnkid->ie_type = WLAN_EID_LINK_ID;
3538 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
3539
3540 memcpy(lnkid->bssid, bssid, ETH_ALEN);
3541 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
3542 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
3543}
3544
3545static int
3546ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
3547 u8 *peer, u8 action_code, u8 dialog_token,
3548 u16 status_code, struct sk_buff *skb)
3549{
3550 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3551 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
3552 struct ieee80211_tdls_data *tf;
3553
3554 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
3555
3556 memcpy(tf->da, peer, ETH_ALEN);
3557 memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
3558 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
3559 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
3560
3561 switch (action_code) {
3562 case WLAN_TDLS_SETUP_REQUEST:
3563 tf->category = WLAN_CATEGORY_TDLS;
3564 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
3565
3566 skb_put(skb, sizeof(tf->u.setup_req));
3567 tf->u.setup_req.dialog_token = dialog_token;
3568 tf->u.setup_req.capability =
3569 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
3570
3571 ieee80211_add_srates_ie(sdata, skb, false, band);
3572 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
3573 ieee80211_tdls_add_ext_capab(skb);
3574 break;
3575 case WLAN_TDLS_SETUP_RESPONSE:
3576 tf->category = WLAN_CATEGORY_TDLS;
3577 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
3578
3579 skb_put(skb, sizeof(tf->u.setup_resp));
3580 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
3581 tf->u.setup_resp.dialog_token = dialog_token;
3582 tf->u.setup_resp.capability =
3583 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
3584
3585 ieee80211_add_srates_ie(sdata, skb, false, band);
3586 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
3587 ieee80211_tdls_add_ext_capab(skb);
3588 break;
3589 case WLAN_TDLS_SETUP_CONFIRM:
3590 tf->category = WLAN_CATEGORY_TDLS;
3591 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
3592
3593 skb_put(skb, sizeof(tf->u.setup_cfm));
3594 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
3595 tf->u.setup_cfm.dialog_token = dialog_token;
3596 break;
3597 case WLAN_TDLS_TEARDOWN:
3598 tf->category = WLAN_CATEGORY_TDLS;
3599 tf->action_code = WLAN_TDLS_TEARDOWN;
3600
3601 skb_put(skb, sizeof(tf->u.teardown));
3602 tf->u.teardown.reason_code = cpu_to_le16(status_code);
3603 break;
3604 case WLAN_TDLS_DISCOVERY_REQUEST:
3605 tf->category = WLAN_CATEGORY_TDLS;
3606 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
3607
3608 skb_put(skb, sizeof(tf->u.discover_req));
3609 tf->u.discover_req.dialog_token = dialog_token;
3610 break;
3611 default:
3612 return -EINVAL;
3613 }
3614
3615 return 0;
3616}
3617
3618static int
3619ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
3620 u8 *peer, u8 action_code, u8 dialog_token,
3621 u16 status_code, struct sk_buff *skb)
3622{
3623 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3624 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
3625 struct ieee80211_mgmt *mgmt;
3626
3627 mgmt = (void *)skb_put(skb, 24);
3628 memset(mgmt, 0, 24);
3629 memcpy(mgmt->da, peer, ETH_ALEN);
3630 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
3631 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
3632
3633 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3634 IEEE80211_STYPE_ACTION);
3635
3636 switch (action_code) {
3637 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
3638 skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
3639 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
3640 mgmt->u.action.u.tdls_discover_resp.action_code =
3641 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
3642 mgmt->u.action.u.tdls_discover_resp.dialog_token =
3643 dialog_token;
3644 mgmt->u.action.u.tdls_discover_resp.capability =
3645 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
3646
3647 ieee80211_add_srates_ie(sdata, skb, false, band);
3648 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
3649 ieee80211_tdls_add_ext_capab(skb);
3650 break;
3651 default:
3652 return -EINVAL;
3653 }
3654
3655 return 0;
3656}
3657
3658static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
3659 u8 *peer, u8 action_code, u8 dialog_token,
3660 u16 status_code, u32 peer_capability,
3661 const u8 *extra_ies, size_t extra_ies_len)
3662{
3663 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3664 struct ieee80211_local *local = sdata->local;
3665 struct sk_buff *skb = NULL;
3666 bool send_direct;
3667 int ret;
3668
3669 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
3670 return -ENOTSUPP;
3671
3672 /* make sure we are in managed mode, and associated */
3673 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
3674 !sdata->u.mgd.associated)
3675 return -EINVAL;
3676
3677 tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
3678 action_code, peer);
3679
3680 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
3681 max(sizeof(struct ieee80211_mgmt),
3682 sizeof(struct ieee80211_tdls_data)) +
3683 50 + /* supported rates */
3684 7 + /* ext capab */
3685 extra_ies_len +
3686 sizeof(struct ieee80211_tdls_lnkie));
3687 if (!skb)
3688 return -ENOMEM;
3689
3690 skb_reserve(skb, local->hw.extra_tx_headroom);
3691
3692 switch (action_code) {
3693 case WLAN_TDLS_SETUP_REQUEST:
3694 case WLAN_TDLS_SETUP_RESPONSE:
3695 case WLAN_TDLS_SETUP_CONFIRM:
3696 case WLAN_TDLS_TEARDOWN:
3697 case WLAN_TDLS_DISCOVERY_REQUEST:
3698 ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
3699 action_code, dialog_token,
3700 status_code, skb);
3701 send_direct = false;
3702 break;
3703 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
3704 ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
3705 dialog_token, status_code,
3706 skb);
3707 send_direct = true;
3708 break;
3709 default:
3710 ret = -ENOTSUPP;
3711 break;
3712 }
3713
3714 if (ret < 0)
3715 goto fail;
3716
3717 if (extra_ies_len)
3718 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
3719
3720 /* the TDLS link IE is always added last */
3721 switch (action_code) {
3722 case WLAN_TDLS_SETUP_REQUEST:
3723 case WLAN_TDLS_SETUP_CONFIRM:
3724 case WLAN_TDLS_TEARDOWN:
3725 case WLAN_TDLS_DISCOVERY_REQUEST:
3726 /* we are the initiator */
3727 ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
3728 sdata->u.mgd.bssid);
3729 break;
3730 case WLAN_TDLS_SETUP_RESPONSE:
3731 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
3732 /* we are the responder */
3733 ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
3734 sdata->u.mgd.bssid);
3735 break;
3736 default:
3737 ret = -ENOTSUPP;
3738 goto fail;
3739 }
3740
3741 if (send_direct) {
3742 ieee80211_tx_skb(sdata, skb);
3743 return 0;
3744 }
3745
3746 /*
3747 * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
3748 * we should default to AC_VI.
3749 */
3750 switch (action_code) {
3751 case WLAN_TDLS_SETUP_REQUEST:
3752 case WLAN_TDLS_SETUP_RESPONSE:
3753 skb_set_queue_mapping(skb, IEEE80211_AC_BK);
3754 skb->priority = 2;
3755 break;
3756 default:
3757 skb_set_queue_mapping(skb, IEEE80211_AC_VI);
3758 skb->priority = 5;
3759 break;
3760 }
3761
3762 /* disable bottom halves when entering the Tx path */
3763 local_bh_disable();
3764 ret = ieee80211_subif_start_xmit(skb, dev);
3765 local_bh_enable();
3766
3767 return ret;
3768
3769fail:
3770 dev_kfree_skb(skb);
3771 return ret;
3772}
3773
3774static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
3775 u8 *peer, enum nl80211_tdls_operation oper)
3776{
3777 struct sta_info *sta;
3778 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3779
3780 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
3781 return -ENOTSUPP;
3782
3783 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3784 return -EINVAL;
3785
3786 tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
3787
3788 switch (oper) {
3789 case NL80211_TDLS_ENABLE_LINK:
3790 rcu_read_lock();
3791 sta = sta_info_get(sdata, peer);
3792 if (!sta) {
3793 rcu_read_unlock();
3794 return -ENOLINK;
3795 }
3796
3797 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
3798 rcu_read_unlock();
3799 break;
3800 case NL80211_TDLS_DISABLE_LINK:
3801 return sta_info_destroy_addr(sdata, peer);
3802 case NL80211_TDLS_TEARDOWN:
3803 case NL80211_TDLS_SETUP:
3804 case NL80211_TDLS_DISCOVERY_REQ:
3805 /* We don't support in-driver setup/teardown/discovery */
3806 return -ENOTSUPP;
3807 default:
3808 return -ENOTSUPP;
3809 }
3810
3811 return 0;
3812}
3813
3814static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, 3631static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3815 const u8 *peer, u64 *cookie) 3632 const u8 *peer, u64 *cookie)
3816{ 3633{
@@ -3949,6 +3766,21 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
3949 return 0; 3766 return 0;
3950} 3767}
3951 3768
3769static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
3770 struct net_device *dev,
3771 struct cfg80211_chan_def *chandef)
3772{
3773 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3774 int ret;
3775 u32 changed = 0;
3776
3777 ret = ieee80211_vif_change_bandwidth(sdata, chandef, &changed);
3778 if (ret == 0)
3779 ieee80211_bss_info_change_notify(sdata, changed);
3780
3781 return ret;
3782}
3783
3952const struct cfg80211_ops mac80211_config_ops = { 3784const struct cfg80211_ops mac80211_config_ops = {
3953 .add_virtual_intf = ieee80211_add_iface, 3785 .add_virtual_intf = ieee80211_add_iface,
3954 .del_virtual_intf = ieee80211_del_iface, 3786 .del_virtual_intf = ieee80211_del_iface,
@@ -4029,4 +3861,5 @@ const struct cfg80211_ops mac80211_config_ops = {
4029 .start_radar_detection = ieee80211_start_radar_detection, 3861 .start_radar_detection = ieee80211_start_radar_detection,
4030 .channel_switch = ieee80211_channel_switch, 3862 .channel_switch = ieee80211_channel_switch,
4031 .set_qos_map = ieee80211_set_qos_map, 3863 .set_qos_map = ieee80211_set_qos_map,
3864 .set_ap_chanwidth = ieee80211_set_ap_chanwidth,
4032}; 3865};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 75b5dd2c9267..a310e33972de 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -9,6 +9,170 @@
9#include "ieee80211_i.h" 9#include "ieee80211_i.h"
10#include "driver-ops.h" 10#include "driver-ops.h"
11 11
12static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
13 struct ieee80211_chanctx *ctx)
14{
15 struct ieee80211_sub_if_data *sdata;
16 int num = 0;
17
18 lockdep_assert_held(&local->chanctx_mtx);
19
20 list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
21 num++;
22
23 return num;
24}
25
26static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local,
27 struct ieee80211_chanctx *ctx)
28{
29 struct ieee80211_sub_if_data *sdata;
30 int num = 0;
31
32 lockdep_assert_held(&local->chanctx_mtx);
33
34 list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
35 num++;
36
37 return num;
38}
39
40int ieee80211_chanctx_refcount(struct ieee80211_local *local,
41 struct ieee80211_chanctx *ctx)
42{
43 return ieee80211_chanctx_num_assigned(local, ctx) +
44 ieee80211_chanctx_num_reserved(local, ctx);
45}
46
47static int ieee80211_num_chanctx(struct ieee80211_local *local)
48{
49 struct ieee80211_chanctx *ctx;
50 int num = 0;
51
52 lockdep_assert_held(&local->chanctx_mtx);
53
54 list_for_each_entry(ctx, &local->chanctx_list, list)
55 num++;
56
57 return num;
58}
59
60static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
61{
62 lockdep_assert_held(&local->chanctx_mtx);
63 return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
64}
65
66static const struct cfg80211_chan_def *
67ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
68 struct ieee80211_chanctx *ctx,
69 const struct cfg80211_chan_def *compat)
70{
71 struct ieee80211_sub_if_data *sdata;
72
73 lockdep_assert_held(&local->chanctx_mtx);
74
75 list_for_each_entry(sdata, &ctx->reserved_vifs,
76 reserved_chanctx_list) {
77 if (!compat)
78 compat = &sdata->reserved_chandef;
79
80 compat = cfg80211_chandef_compatible(&sdata->reserved_chandef,
81 compat);
82 if (!compat)
83 break;
84 }
85
86 return compat;
87}
88
89static const struct cfg80211_chan_def *
90ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
91 struct ieee80211_chanctx *ctx,
92 const struct cfg80211_chan_def *compat)
93{
94 struct ieee80211_sub_if_data *sdata;
95
96 lockdep_assert_held(&local->chanctx_mtx);
97
98 list_for_each_entry(sdata, &ctx->assigned_vifs,
99 assigned_chanctx_list) {
100 if (sdata->reserved_chanctx != NULL)
101 continue;
102
103 if (!compat)
104 compat = &sdata->vif.bss_conf.chandef;
105
106 compat = cfg80211_chandef_compatible(
107 &sdata->vif.bss_conf.chandef, compat);
108 if (!compat)
109 break;
110 }
111
112 return compat;
113}
114
115static const struct cfg80211_chan_def *
116ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
117 struct ieee80211_chanctx *ctx,
118 const struct cfg80211_chan_def *compat)
119{
120 lockdep_assert_held(&local->chanctx_mtx);
121
122 compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
123 if (!compat)
124 return NULL;
125
126 compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
127 if (!compat)
128 return NULL;
129
130 return compat;
131}
132
133static bool
134ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
135 struct ieee80211_chanctx *ctx,
136 const struct cfg80211_chan_def *def)
137{
138 lockdep_assert_held(&local->chanctx_mtx);
139
140 if (ieee80211_chanctx_combined_chandef(local, ctx, def))
141 return true;
142
143 if (!list_empty(&ctx->reserved_vifs) &&
144 ieee80211_chanctx_reserved_chandef(local, ctx, def))
145 return true;
146
147 return false;
148}
149
150static struct ieee80211_chanctx *
151ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
152 const struct cfg80211_chan_def *chandef,
153 enum ieee80211_chanctx_mode mode)
154{
155 struct ieee80211_chanctx *ctx;
156
157 lockdep_assert_held(&local->chanctx_mtx);
158
159 if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
160 return NULL;
161
162 list_for_each_entry(ctx, &local->chanctx_list, list) {
163 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
164 continue;
165
166 if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
167 chandef))
168 continue;
169
170 return ctx;
171 }
172
173 return NULL;
174}
175
12static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta) 176static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
13{ 177{
14 switch (sta->bandwidth) { 178 switch (sta->bandwidth) {
@@ -190,6 +354,11 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
190 if (!compat) 354 if (!compat)
191 continue; 355 continue;
192 356
357 compat = ieee80211_chanctx_reserved_chandef(local, ctx,
358 compat);
359 if (!compat)
360 continue;
361
193 ieee80211_change_chanctx(local, ctx, compat); 362 ieee80211_change_chanctx(local, ctx, compat);
194 363
195 return ctx; 364 return ctx;
@@ -217,62 +386,91 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
217} 386}
218 387
219static struct ieee80211_chanctx * 388static struct ieee80211_chanctx *
220ieee80211_new_chanctx(struct ieee80211_local *local, 389ieee80211_alloc_chanctx(struct ieee80211_local *local,
221 const struct cfg80211_chan_def *chandef, 390 const struct cfg80211_chan_def *chandef,
222 enum ieee80211_chanctx_mode mode) 391 enum ieee80211_chanctx_mode mode)
223{ 392{
224 struct ieee80211_chanctx *ctx; 393 struct ieee80211_chanctx *ctx;
225 u32 changed;
226 int err;
227 394
228 lockdep_assert_held(&local->chanctx_mtx); 395 lockdep_assert_held(&local->chanctx_mtx);
229 396
230 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); 397 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
231 if (!ctx) 398 if (!ctx)
232 return ERR_PTR(-ENOMEM); 399 return NULL;
233 400
401 INIT_LIST_HEAD(&ctx->assigned_vifs);
402 INIT_LIST_HEAD(&ctx->reserved_vifs);
234 ctx->conf.def = *chandef; 403 ctx->conf.def = *chandef;
235 ctx->conf.rx_chains_static = 1; 404 ctx->conf.rx_chains_static = 1;
236 ctx->conf.rx_chains_dynamic = 1; 405 ctx->conf.rx_chains_dynamic = 1;
237 ctx->mode = mode; 406 ctx->mode = mode;
238 ctx->conf.radar_enabled = ieee80211_is_radar_required(local); 407 ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
239 ieee80211_recalc_chanctx_min_def(local, ctx); 408 ieee80211_recalc_chanctx_min_def(local, ctx);
409
410 return ctx;
411}
412
413static int ieee80211_add_chanctx(struct ieee80211_local *local,
414 struct ieee80211_chanctx *ctx)
415{
416 u32 changed;
417 int err;
418
419 lockdep_assert_held(&local->mtx);
420 lockdep_assert_held(&local->chanctx_mtx);
421
240 if (!local->use_chanctx) 422 if (!local->use_chanctx)
241 local->hw.conf.radar_enabled = ctx->conf.radar_enabled; 423 local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
242 424
243 /* we hold the mutex to prevent idle from changing */
244 lockdep_assert_held(&local->mtx);
245 /* turn idle off *before* setting channel -- some drivers need that */ 425 /* turn idle off *before* setting channel -- some drivers need that */
246 changed = ieee80211_idle_off(local); 426 changed = ieee80211_idle_off(local);
247 if (changed) 427 if (changed)
248 ieee80211_hw_config(local, changed); 428 ieee80211_hw_config(local, changed);
249 429
250 if (!local->use_chanctx) { 430 if (!local->use_chanctx) {
251 local->_oper_chandef = *chandef; 431 local->_oper_chandef = ctx->conf.def;
252 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 432 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
253 } else { 433 } else {
254 err = drv_add_chanctx(local, ctx); 434 err = drv_add_chanctx(local, ctx);
255 if (err) { 435 if (err) {
256 kfree(ctx);
257 ieee80211_recalc_idle(local); 436 ieee80211_recalc_idle(local);
258 return ERR_PTR(err); 437 return err;
259 } 438 }
260 } 439 }
261 440
262 /* and keep the mutex held until the new chanctx is on the list */ 441 return 0;
263 list_add_rcu(&ctx->list, &local->chanctx_list); 442}
264 443
444static struct ieee80211_chanctx *
445ieee80211_new_chanctx(struct ieee80211_local *local,
446 const struct cfg80211_chan_def *chandef,
447 enum ieee80211_chanctx_mode mode)
448{
449 struct ieee80211_chanctx *ctx;
450 int err;
451
452 lockdep_assert_held(&local->mtx);
453 lockdep_assert_held(&local->chanctx_mtx);
454
455 ctx = ieee80211_alloc_chanctx(local, chandef, mode);
456 if (!ctx)
457 return ERR_PTR(-ENOMEM);
458
459 err = ieee80211_add_chanctx(local, ctx);
460 if (err) {
461 kfree(ctx);
462 return ERR_PTR(err);
463 }
464
465 list_add_rcu(&ctx->list, &local->chanctx_list);
265 return ctx; 466 return ctx;
266} 467}
267 468
268static void ieee80211_free_chanctx(struct ieee80211_local *local, 469static void ieee80211_del_chanctx(struct ieee80211_local *local,
269 struct ieee80211_chanctx *ctx) 470 struct ieee80211_chanctx *ctx)
270{ 471{
271 bool check_single_channel = false;
272 lockdep_assert_held(&local->chanctx_mtx); 472 lockdep_assert_held(&local->chanctx_mtx);
273 473
274 WARN_ON_ONCE(ctx->refcount != 0);
275
276 if (!local->use_chanctx) { 474 if (!local->use_chanctx) {
277 struct cfg80211_chan_def *chandef = &local->_oper_chandef; 475 struct cfg80211_chan_def *chandef = &local->_oper_chandef;
278 chandef->width = NL80211_CHAN_WIDTH_20_NOHT; 476 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -282,8 +480,9 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
282 /* NOTE: Disabling radar is only valid here for 480 /* NOTE: Disabling radar is only valid here for
283 * single channel context. To be sure, check it ... 481 * single channel context. To be sure, check it ...
284 */ 482 */
285 if (local->hw.conf.radar_enabled) 483 WARN_ON(local->hw.conf.radar_enabled &&
286 check_single_channel = true; 484 !list_empty(&local->chanctx_list));
485
287 local->hw.conf.radar_enabled = false; 486 local->hw.conf.radar_enabled = false;
288 487
289 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 488 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
@@ -291,39 +490,19 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
291 drv_remove_chanctx(local, ctx); 490 drv_remove_chanctx(local, ctx);
292 } 491 }
293 492
294 list_del_rcu(&ctx->list);
295 kfree_rcu(ctx, rcu_head);
296
297 /* throw a warning if this wasn't the only channel context. */
298 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
299
300 ieee80211_recalc_idle(local); 493 ieee80211_recalc_idle(local);
301} 494}
302 495
303static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 496static void ieee80211_free_chanctx(struct ieee80211_local *local,
304 struct ieee80211_chanctx *ctx) 497 struct ieee80211_chanctx *ctx)
305{ 498{
306 struct ieee80211_local *local = sdata->local;
307 int ret;
308
309 lockdep_assert_held(&local->chanctx_mtx); 499 lockdep_assert_held(&local->chanctx_mtx);
310 500
311 ret = drv_assign_vif_chanctx(local, sdata, ctx); 501 WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0);
312 if (ret)
313 return ret;
314 502
315 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf); 503 list_del_rcu(&ctx->list);
316 ctx->refcount++; 504 ieee80211_del_chanctx(local, ctx);
317 505 kfree_rcu(ctx, rcu_head);
318 ieee80211_recalc_txpower(sdata);
319 ieee80211_recalc_chanctx_min_def(local, ctx);
320 sdata->vif.bss_conf.idle = false;
321
322 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
323 sdata->vif.type != NL80211_IFTYPE_MONITOR)
324 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
325
326 return 0;
327} 506}
328 507
329static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, 508static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
@@ -384,30 +563,58 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
384 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR); 563 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
385} 564}
386 565
387static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 566static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
388 struct ieee80211_chanctx *ctx) 567 struct ieee80211_chanctx *new_ctx)
389{ 568{
390 struct ieee80211_local *local = sdata->local; 569 struct ieee80211_local *local = sdata->local;
570 struct ieee80211_chanctx_conf *conf;
571 struct ieee80211_chanctx *curr_ctx = NULL;
572 int ret = 0;
391 573
392 lockdep_assert_held(&local->chanctx_mtx); 574 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
575 lockdep_is_held(&local->chanctx_mtx));
393 576
394 ctx->refcount--; 577 if (conf) {
395 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); 578 curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
396 579
397 sdata->vif.bss_conf.idle = true; 580 drv_unassign_vif_chanctx(local, sdata, curr_ctx);
581 conf = NULL;
582 list_del(&sdata->assigned_chanctx_list);
583 }
398 584
399 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && 585 if (new_ctx) {
400 sdata->vif.type != NL80211_IFTYPE_MONITOR) 586 ret = drv_assign_vif_chanctx(local, sdata, new_ctx);
401 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); 587 if (ret)
588 goto out;
402 589
403 drv_unassign_vif_chanctx(local, sdata, ctx); 590 conf = &new_ctx->conf;
591 list_add(&sdata->assigned_chanctx_list,
592 &new_ctx->assigned_vifs);
593 }
594
595out:
596 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
597
598 sdata->vif.bss_conf.idle = !conf;
599
600 if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
601 ieee80211_recalc_chanctx_chantype(local, curr_ctx);
602 ieee80211_recalc_smps_chanctx(local, curr_ctx);
603 ieee80211_recalc_radar_chanctx(local, curr_ctx);
604 ieee80211_recalc_chanctx_min_def(local, curr_ctx);
605 }
404 606
405 if (ctx->refcount > 0) { 607 if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
406 ieee80211_recalc_chanctx_chantype(sdata->local, ctx); 608 ieee80211_recalc_txpower(sdata);
407 ieee80211_recalc_smps_chanctx(local, ctx); 609 ieee80211_recalc_chanctx_min_def(local, new_ctx);
408 ieee80211_recalc_radar_chanctx(local, ctx);
409 ieee80211_recalc_chanctx_min_def(local, ctx);
410 } 610 }
611
612 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
613 sdata->vif.type != NL80211_IFTYPE_MONITOR)
614 ieee80211_bss_info_change_notify(sdata,
615 BSS_CHANGED_IDLE);
616
617 return ret;
411} 618}
412 619
413static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) 620static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
@@ -425,8 +632,11 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
425 632
426 ctx = container_of(conf, struct ieee80211_chanctx, conf); 633 ctx = container_of(conf, struct ieee80211_chanctx, conf);
427 634
428 ieee80211_unassign_vif_chanctx(sdata, ctx); 635 if (sdata->reserved_chanctx)
429 if (ctx->refcount == 0) 636 ieee80211_vif_unreserve_chanctx(sdata);
637
638 ieee80211_assign_vif_chanctx(sdata, NULL);
639 if (ieee80211_chanctx_refcount(local, ctx) == 0)
430 ieee80211_free_chanctx(local, ctx); 640 ieee80211_free_chanctx(local, ctx);
431} 641}
432 642
@@ -526,6 +736,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
526{ 736{
527 struct ieee80211_local *local = sdata->local; 737 struct ieee80211_local *local = sdata->local;
528 struct ieee80211_chanctx *ctx; 738 struct ieee80211_chanctx *ctx;
739 u8 radar_detect_width = 0;
529 int ret; 740 int ret;
530 741
531 lockdep_assert_held(&local->mtx); 742 lockdep_assert_held(&local->mtx);
@@ -533,6 +744,22 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
533 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); 744 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
534 745
535 mutex_lock(&local->chanctx_mtx); 746 mutex_lock(&local->chanctx_mtx);
747
748 ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
749 chandef,
750 sdata->wdev.iftype);
751 if (ret < 0)
752 goto out;
753 if (ret > 0)
754 radar_detect_width = BIT(chandef->width);
755
756 sdata->radar_required = ret;
757
758 ret = ieee80211_check_combinations(sdata, chandef, mode,
759 radar_detect_width);
760 if (ret < 0)
761 goto out;
762
536 __ieee80211_vif_release_channel(sdata); 763 __ieee80211_vif_release_channel(sdata);
537 764
538 ctx = ieee80211_find_chanctx(local, chandef, mode); 765 ctx = ieee80211_find_chanctx(local, chandef, mode);
@@ -548,7 +775,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
548 ret = ieee80211_assign_vif_chanctx(sdata, ctx); 775 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
549 if (ret) { 776 if (ret) {
550 /* if assign fails refcount stays the same */ 777 /* if assign fails refcount stays the same */
551 if (ctx->refcount == 0) 778 if (ieee80211_chanctx_refcount(local, ctx) == 0)
552 ieee80211_free_chanctx(local, ctx); 779 ieee80211_free_chanctx(local, ctx);
553 goto out; 780 goto out;
554 } 781 }
@@ -560,15 +787,47 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
560 return ret; 787 return ret;
561} 788}
562 789
790static int __ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
791 struct ieee80211_chanctx *ctx,
792 u32 *changed)
793{
794 struct ieee80211_local *local = sdata->local;
795 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
796 u32 chanctx_changed = 0;
797
798 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
799 IEEE80211_CHAN_DISABLED))
800 return -EINVAL;
801
802 if (ieee80211_chanctx_refcount(local, ctx) != 1)
803 return -EINVAL;
804
805 if (sdata->vif.bss_conf.chandef.width != chandef->width) {
806 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
807 *changed |= BSS_CHANGED_BANDWIDTH;
808 }
809
810 sdata->vif.bss_conf.chandef = *chandef;
811 ctx->conf.def = *chandef;
812
813 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
814 drv_change_chanctx(local, ctx, chanctx_changed);
815
816 ieee80211_recalc_chanctx_chantype(local, ctx);
817 ieee80211_recalc_smps_chanctx(local, ctx);
818 ieee80211_recalc_radar_chanctx(local, ctx);
819 ieee80211_recalc_chanctx_min_def(local, ctx);
820
821 return 0;
822}
823
563int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata, 824int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
564 u32 *changed) 825 u32 *changed)
565{ 826{
566 struct ieee80211_local *local = sdata->local; 827 struct ieee80211_local *local = sdata->local;
567 struct ieee80211_chanctx_conf *conf; 828 struct ieee80211_chanctx_conf *conf;
568 struct ieee80211_chanctx *ctx; 829 struct ieee80211_chanctx *ctx;
569 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
570 int ret; 830 int ret;
571 u32 chanctx_changed = 0;
572 831
573 lockdep_assert_held(&local->mtx); 832 lockdep_assert_held(&local->mtx);
574 833
@@ -576,11 +835,94 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
576 if (WARN_ON(!sdata->vif.csa_active)) 835 if (WARN_ON(!sdata->vif.csa_active))
577 return -EINVAL; 836 return -EINVAL;
578 837
579 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 838 mutex_lock(&local->chanctx_mtx);
580 IEEE80211_CHAN_DISABLED)) 839 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
840 lockdep_is_held(&local->chanctx_mtx));
841 if (!conf) {
842 ret = -EINVAL;
843 goto out;
844 }
845
846 ctx = container_of(conf, struct ieee80211_chanctx, conf);
847
848 ret = __ieee80211_vif_change_channel(sdata, ctx, changed);
849 out:
850 mutex_unlock(&local->chanctx_mtx);
851 return ret;
852}
853
854static void
855__ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
856 bool clear)
857{
858 struct ieee80211_local *local __maybe_unused = sdata->local;
859 struct ieee80211_sub_if_data *vlan;
860 struct ieee80211_chanctx_conf *conf;
861
862 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
863 return;
864
865 lockdep_assert_held(&local->mtx);
866
867 /* Check that conf exists, even when clearing this function
868 * must be called with the AP's channel context still there
869 * as it would otherwise cause VLANs to have an invalid
870 * channel context pointer for a while, possibly pointing
871 * to a channel context that has already been freed.
872 */
873 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
874 lockdep_is_held(&local->chanctx_mtx));
875 WARN_ON(!conf);
876
877 if (clear)
878 conf = NULL;
879
880 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
881 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
882}
883
884void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
885 bool clear)
886{
887 struct ieee80211_local *local = sdata->local;
888
889 mutex_lock(&local->chanctx_mtx);
890
891 __ieee80211_vif_copy_chanctx_to_vlans(sdata, clear);
892
893 mutex_unlock(&local->chanctx_mtx);
894}
895
896int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
897{
898 struct ieee80211_chanctx *ctx = sdata->reserved_chanctx;
899
900 lockdep_assert_held(&sdata->local->chanctx_mtx);
901
902 if (WARN_ON(!ctx))
581 return -EINVAL; 903 return -EINVAL;
582 904
905 list_del(&sdata->reserved_chanctx_list);
906 sdata->reserved_chanctx = NULL;
907
908 if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0)
909 ieee80211_free_chanctx(sdata->local, ctx);
910
911 return 0;
912}
913
914int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
915 const struct cfg80211_chan_def *chandef,
916 enum ieee80211_chanctx_mode mode,
917 bool radar_required)
918{
919 struct ieee80211_local *local = sdata->local;
920 struct ieee80211_chanctx_conf *conf;
921 struct ieee80211_chanctx *new_ctx, *curr_ctx;
922 int ret = 0;
923
583 mutex_lock(&local->chanctx_mtx); 924 mutex_lock(&local->chanctx_mtx);
925
584 conf = rcu_dereference_protected(sdata->vif.chanctx_conf, 926 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
585 lockdep_is_held(&local->chanctx_mtx)); 927 lockdep_is_held(&local->chanctx_mtx));
586 if (!conf) { 928 if (!conf) {
@@ -588,30 +930,108 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
588 goto out; 930 goto out;
589 } 931 }
590 932
591 ctx = container_of(conf, struct ieee80211_chanctx, conf); 933 curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
592 if (ctx->refcount != 1) { 934
935 new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
936 if (!new_ctx) {
937 if (ieee80211_chanctx_refcount(local, curr_ctx) == 1 &&
938 (local->hw.flags & IEEE80211_HW_CHANGE_RUNNING_CHANCTX)) {
939 /* if we're the only users of the chanctx and
940 * the driver supports changing a running
941 * context, reserve our current context
942 */
943 new_ctx = curr_ctx;
944 } else if (ieee80211_can_create_new_chanctx(local)) {
945 /* create a new context and reserve it */
946 new_ctx = ieee80211_new_chanctx(local, chandef, mode);
947 if (IS_ERR(new_ctx)) {
948 ret = PTR_ERR(new_ctx);
949 goto out;
950 }
951 } else {
952 ret = -EBUSY;
953 goto out;
954 }
955 }
956
957 list_add(&sdata->reserved_chanctx_list, &new_ctx->reserved_vifs);
958 sdata->reserved_chanctx = new_ctx;
959 sdata->reserved_chandef = *chandef;
960 sdata->reserved_radar_required = radar_required;
961out:
962 mutex_unlock(&local->chanctx_mtx);
963 return ret;
964}
965
966int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
967 u32 *changed)
968{
969 struct ieee80211_local *local = sdata->local;
970 struct ieee80211_chanctx *ctx;
971 struct ieee80211_chanctx *old_ctx;
972 struct ieee80211_chanctx_conf *conf;
973 int ret;
974 u32 tmp_changed = *changed;
975
976 /* TODO: need to recheck if the chandef is usable etc.? */
977
978 lockdep_assert_held(&local->mtx);
979
980 mutex_lock(&local->chanctx_mtx);
981
982 ctx = sdata->reserved_chanctx;
983 if (WARN_ON(!ctx)) {
593 ret = -EINVAL; 984 ret = -EINVAL;
594 goto out; 985 goto out;
595 } 986 }
596 987
597 if (sdata->vif.bss_conf.chandef.width != chandef->width) { 988 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
598 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH; 989 lockdep_is_held(&local->chanctx_mtx));
599 *changed |= BSS_CHANGED_BANDWIDTH; 990 if (!conf) {
991 ret = -EINVAL;
992 goto out;
600 } 993 }
601 994
602 sdata->vif.bss_conf.chandef = *chandef; 995 old_ctx = container_of(conf, struct ieee80211_chanctx, conf);
603 ctx->conf.def = *chandef;
604 996
605 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL; 997 if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
606 drv_change_chanctx(local, ctx, chanctx_changed); 998 tmp_changed |= BSS_CHANGED_BANDWIDTH;
999
1000 sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
1001
1002 /* unref our reservation */
1003 sdata->reserved_chanctx = NULL;
1004 sdata->radar_required = sdata->reserved_radar_required;
1005 list_del(&sdata->reserved_chanctx_list);
1006
1007 if (old_ctx == ctx) {
1008 /* This is our own context, just change it */
1009 ret = __ieee80211_vif_change_channel(sdata, old_ctx,
1010 &tmp_changed);
1011 if (ret)
1012 goto out;
1013 } else {
1014 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
1015 if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
1016 ieee80211_free_chanctx(local, old_ctx);
1017 if (ret) {
1018 /* if assign fails refcount stays the same */
1019 if (ieee80211_chanctx_refcount(local, ctx) == 0)
1020 ieee80211_free_chanctx(local, ctx);
1021 goto out;
1022 }
1023
1024 if (sdata->vif.type == NL80211_IFTYPE_AP)
1025 __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
1026 }
1027
1028 *changed = tmp_changed;
607 1029
608 ieee80211_recalc_chanctx_chantype(local, ctx); 1030 ieee80211_recalc_chanctx_chantype(local, ctx);
609 ieee80211_recalc_smps_chanctx(local, ctx); 1031 ieee80211_recalc_smps_chanctx(local, ctx);
610 ieee80211_recalc_radar_chanctx(local, ctx); 1032 ieee80211_recalc_radar_chanctx(local, ctx);
611 ieee80211_recalc_chanctx_min_def(local, ctx); 1033 ieee80211_recalc_chanctx_min_def(local, ctx);
612 1034out:
613 ret = 0;
614 out:
615 mutex_unlock(&local->chanctx_mtx); 1035 mutex_unlock(&local->chanctx_mtx);
616 return ret; 1036 return ret;
617} 1037}
@@ -695,40 +1115,6 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
695 mutex_unlock(&local->chanctx_mtx); 1115 mutex_unlock(&local->chanctx_mtx);
696} 1116}
697 1117
698void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
699 bool clear)
700{
701 struct ieee80211_local *local = sdata->local;
702 struct ieee80211_sub_if_data *vlan;
703 struct ieee80211_chanctx_conf *conf;
704
705 ASSERT_RTNL();
706
707 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
708 return;
709
710 mutex_lock(&local->chanctx_mtx);
711
712 /*
713 * Check that conf exists, even when clearing this function
714 * must be called with the AP's channel context still there
715 * as it would otherwise cause VLANs to have an invalid
716 * channel context pointer for a while, possibly pointing
717 * to a channel context that has already been freed.
718 */
719 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
720 lockdep_is_held(&local->chanctx_mtx));
721 WARN_ON(!conf);
722
723 if (clear)
724 conf = NULL;
725
726 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
727 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
728
729 mutex_unlock(&local->chanctx_mtx);
730}
731
732void ieee80211_iter_chan_contexts_atomic( 1118void ieee80211_iter_chan_contexts_atomic(
733 struct ieee80211_hw *hw, 1119 struct ieee80211_hw *hw,
734 void (*iter)(struct ieee80211_hw *hw, 1120 void (*iter)(struct ieee80211_hw *hw,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index fa16e54980a1..0e963bc1ceac 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -128,7 +128,7 @@ static ssize_t sta_tx_latency_stat_write(struct file *file,
128 if (!strcmp(buf, TX_LATENCY_DISABLED)) { 128 if (!strcmp(buf, TX_LATENCY_DISABLED)) {
129 if (!tx_latency) 129 if (!tx_latency)
130 goto unlock; 130 goto unlock;
131 rcu_assign_pointer(local->tx_latency, NULL); 131 RCU_INIT_POINTER(local->tx_latency, NULL);
132 synchronize_rcu(); 132 synchronize_rcu();
133 kfree(tx_latency); 133 kfree(tx_latency);
134 goto unlock; 134 goto unlock;
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 214ed4ecd739..60c35afee29d 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -1,6 +1,8 @@
1#ifndef __MAC80211_DEBUGFS_H 1#ifndef __MAC80211_DEBUGFS_H
2#define __MAC80211_DEBUGFS_H 2#define __MAC80211_DEBUGFS_H
3 3
4#include "ieee80211_i.h"
5
4#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
5void debugfs_hw_add(struct ieee80211_local *local); 7void debugfs_hw_add(struct ieee80211_local *local);
6int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count, 8int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 40a648938985..e205ebabfa50 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read(
34 ssize_t ret = -EINVAL; 34 ssize_t ret = -EINVAL;
35 35
36 read_lock(&dev_base_lock); 36 read_lock(&dev_base_lock);
37 if (sdata->dev->reg_state == NETREG_REGISTERED) 37 ret = (*format)(sdata, buf, sizeof(buf));
38 ret = (*format)(sdata, buf, sizeof(buf));
39 read_unlock(&dev_base_lock); 38 read_unlock(&dev_base_lock);
40 39
41 if (ret >= 0) 40 if (ret >= 0)
@@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write(
62 61
63 ret = -ENODEV; 62 ret = -ENODEV;
64 rtnl_lock(); 63 rtnl_lock();
65 if (sdata->dev->reg_state == NETREG_REGISTERED) 64 ret = (*write)(sdata, buf, count);
66 ret = (*write)(sdata, buf, count);
67 rtnl_unlock(); 65 rtnl_unlock();
68 66
69 return ret; 67 return ret;
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 79025e79f4d6..9f5501a9a795 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -3,6 +3,8 @@
3#ifndef __IEEE80211_DEBUGFS_NETDEV_H 3#ifndef __IEEE80211_DEBUGFS_NETDEV_H
4#define __IEEE80211_DEBUGFS_NETDEV_H 4#define __IEEE80211_DEBUGFS_NETDEV_H
5 5
6#include "ieee80211_i.h"
7
6#ifdef CONFIG_MAC80211_DEBUGFS 8#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 9void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 10void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index fc689f5d971e..bd782dcffcc7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,11 +5,11 @@
5#include "ieee80211_i.h" 5#include "ieee80211_i.h"
6#include "trace.h" 6#include "trace.h"
7 7
8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) 8static inline bool check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), 10 return !WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", 11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
12 sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); 12 sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
13} 13}
14 14
15static inline struct ieee80211_sub_if_data * 15static inline struct ieee80211_sub_if_data *
@@ -168,7 +168,8 @@ static inline int drv_change_interface(struct ieee80211_local *local,
168 168
169 might_sleep(); 169 might_sleep();
170 170
171 check_sdata_in_driver(sdata); 171 if (!check_sdata_in_driver(sdata))
172 return -EIO;
172 173
173 trace_drv_change_interface(local, sdata, type, p2p); 174 trace_drv_change_interface(local, sdata, type, p2p);
174 ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); 175 ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
@@ -181,7 +182,8 @@ static inline void drv_remove_interface(struct ieee80211_local *local,
181{ 182{
182 might_sleep(); 183 might_sleep();
183 184
184 check_sdata_in_driver(sdata); 185 if (!check_sdata_in_driver(sdata))
186 return;
185 187
186 trace_drv_remove_interface(local, sdata); 188 trace_drv_remove_interface(local, sdata);
187 local->ops->remove_interface(&local->hw, &sdata->vif); 189 local->ops->remove_interface(&local->hw, &sdata->vif);
@@ -219,7 +221,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
219 sdata->vif.type == NL80211_IFTYPE_MONITOR)) 221 sdata->vif.type == NL80211_IFTYPE_MONITOR))
220 return; 222 return;
221 223
222 check_sdata_in_driver(sdata); 224 if (!check_sdata_in_driver(sdata))
225 return;
223 226
224 trace_drv_bss_info_changed(local, sdata, info, changed); 227 trace_drv_bss_info_changed(local, sdata, info, changed);
225 if (local->ops->bss_info_changed) 228 if (local->ops->bss_info_changed)
@@ -278,7 +281,8 @@ static inline int drv_set_key(struct ieee80211_local *local,
278 might_sleep(); 281 might_sleep();
279 282
280 sdata = get_bss_sdata(sdata); 283 sdata = get_bss_sdata(sdata);
281 check_sdata_in_driver(sdata); 284 if (!check_sdata_in_driver(sdata))
285 return -EIO;
282 286
283 trace_drv_set_key(local, cmd, sdata, sta, key); 287 trace_drv_set_key(local, cmd, sdata, sta, key);
284 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); 288 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
@@ -298,7 +302,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
298 ista = &sta->sta; 302 ista = &sta->sta;
299 303
300 sdata = get_bss_sdata(sdata); 304 sdata = get_bss_sdata(sdata);
301 check_sdata_in_driver(sdata); 305 if (!check_sdata_in_driver(sdata))
306 return;
302 307
303 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); 308 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
304 if (local->ops->update_tkip_key) 309 if (local->ops->update_tkip_key)
@@ -315,7 +320,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
315 320
316 might_sleep(); 321 might_sleep();
317 322
318 check_sdata_in_driver(sdata); 323 if (!check_sdata_in_driver(sdata))
324 return -EIO;
319 325
320 trace_drv_hw_scan(local, sdata); 326 trace_drv_hw_scan(local, sdata);
321 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); 327 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
@@ -328,7 +334,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
328{ 334{
329 might_sleep(); 335 might_sleep();
330 336
331 check_sdata_in_driver(sdata); 337 if (!check_sdata_in_driver(sdata))
338 return;
332 339
333 trace_drv_cancel_hw_scan(local, sdata); 340 trace_drv_cancel_hw_scan(local, sdata);
334 local->ops->cancel_hw_scan(&local->hw, &sdata->vif); 341 local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
@@ -345,7 +352,8 @@ drv_sched_scan_start(struct ieee80211_local *local,
345 352
346 might_sleep(); 353 might_sleep();
347 354
348 check_sdata_in_driver(sdata); 355 if (!check_sdata_in_driver(sdata))
356 return -EIO;
349 357
350 trace_drv_sched_scan_start(local, sdata); 358 trace_drv_sched_scan_start(local, sdata);
351 ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, 359 ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
@@ -361,7 +369,8 @@ static inline int drv_sched_scan_stop(struct ieee80211_local *local,
361 369
362 might_sleep(); 370 might_sleep();
363 371
364 check_sdata_in_driver(sdata); 372 if (!check_sdata_in_driver(sdata))
373 return -EIO;
365 374
366 trace_drv_sched_scan_stop(local, sdata); 375 trace_drv_sched_scan_stop(local, sdata);
367 ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif); 376 ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif);
@@ -462,7 +471,8 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
462 struct ieee80211_sta *sta) 471 struct ieee80211_sta *sta)
463{ 472{
464 sdata = get_bss_sdata(sdata); 473 sdata = get_bss_sdata(sdata);
465 check_sdata_in_driver(sdata); 474 if (!check_sdata_in_driver(sdata))
475 return;
466 476
467 trace_drv_sta_notify(local, sdata, cmd, sta); 477 trace_drv_sta_notify(local, sdata, cmd, sta);
468 if (local->ops->sta_notify) 478 if (local->ops->sta_notify)
@@ -479,7 +489,8 @@ static inline int drv_sta_add(struct ieee80211_local *local,
479 might_sleep(); 489 might_sleep();
480 490
481 sdata = get_bss_sdata(sdata); 491 sdata = get_bss_sdata(sdata);
482 check_sdata_in_driver(sdata); 492 if (!check_sdata_in_driver(sdata))
493 return -EIO;
483 494
484 trace_drv_sta_add(local, sdata, sta); 495 trace_drv_sta_add(local, sdata, sta);
485 if (local->ops->sta_add) 496 if (local->ops->sta_add)
@@ -497,7 +508,8 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
497 might_sleep(); 508 might_sleep();
498 509
499 sdata = get_bss_sdata(sdata); 510 sdata = get_bss_sdata(sdata);
500 check_sdata_in_driver(sdata); 511 if (!check_sdata_in_driver(sdata))
512 return;
501 513
502 trace_drv_sta_remove(local, sdata, sta); 514 trace_drv_sta_remove(local, sdata, sta);
503 if (local->ops->sta_remove) 515 if (local->ops->sta_remove)
@@ -515,7 +527,8 @@ static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
515 might_sleep(); 527 might_sleep();
516 528
517 sdata = get_bss_sdata(sdata); 529 sdata = get_bss_sdata(sdata);
518 check_sdata_in_driver(sdata); 530 if (!check_sdata_in_driver(sdata))
531 return;
519 532
520 if (local->ops->sta_add_debugfs) 533 if (local->ops->sta_add_debugfs)
521 local->ops->sta_add_debugfs(&local->hw, &sdata->vif, 534 local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
@@ -545,7 +558,8 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
545 might_sleep(); 558 might_sleep();
546 559
547 sdata = get_bss_sdata(sdata); 560 sdata = get_bss_sdata(sdata);
548 check_sdata_in_driver(sdata); 561 if (!check_sdata_in_driver(sdata))
562 return;
549 563
550 trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta); 564 trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta);
551 if (local->ops->sta_pre_rcu_remove) 565 if (local->ops->sta_pre_rcu_remove)
@@ -566,7 +580,8 @@ int drv_sta_state(struct ieee80211_local *local,
566 might_sleep(); 580 might_sleep();
567 581
568 sdata = get_bss_sdata(sdata); 582 sdata = get_bss_sdata(sdata);
569 check_sdata_in_driver(sdata); 583 if (!check_sdata_in_driver(sdata))
584 return -EIO;
570 585
571 trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state); 586 trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
572 if (local->ops->sta_state) { 587 if (local->ops->sta_state) {
@@ -590,7 +605,8 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
590 struct ieee80211_sta *sta, u32 changed) 605 struct ieee80211_sta *sta, u32 changed)
591{ 606{
592 sdata = get_bss_sdata(sdata); 607 sdata = get_bss_sdata(sdata);
593 check_sdata_in_driver(sdata); 608 if (!check_sdata_in_driver(sdata))
609 return;
594 610
595 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && 611 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
596 (sdata->vif.type != NL80211_IFTYPE_ADHOC && 612 (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
@@ -612,7 +628,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
612 628
613 might_sleep(); 629 might_sleep();
614 630
615 check_sdata_in_driver(sdata); 631 if (!check_sdata_in_driver(sdata))
632 return -EIO;
616 633
617 trace_drv_conf_tx(local, sdata, ac, params); 634 trace_drv_conf_tx(local, sdata, ac, params);
618 if (local->ops->conf_tx) 635 if (local->ops->conf_tx)
@@ -629,7 +646,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local,
629 646
630 might_sleep(); 647 might_sleep();
631 648
632 check_sdata_in_driver(sdata); 649 if (!check_sdata_in_driver(sdata))
650 return ret;
633 651
634 trace_drv_get_tsf(local, sdata); 652 trace_drv_get_tsf(local, sdata);
635 if (local->ops->get_tsf) 653 if (local->ops->get_tsf)
@@ -644,7 +662,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local,
644{ 662{
645 might_sleep(); 663 might_sleep();
646 664
647 check_sdata_in_driver(sdata); 665 if (!check_sdata_in_driver(sdata))
666 return;
648 667
649 trace_drv_set_tsf(local, sdata, tsf); 668 trace_drv_set_tsf(local, sdata, tsf);
650 if (local->ops->set_tsf) 669 if (local->ops->set_tsf)
@@ -657,7 +676,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
657{ 676{
658 might_sleep(); 677 might_sleep();
659 678
660 check_sdata_in_driver(sdata); 679 if (!check_sdata_in_driver(sdata))
680 return;
661 681
662 trace_drv_reset_tsf(local, sdata); 682 trace_drv_reset_tsf(local, sdata);
663 if (local->ops->reset_tsf) 683 if (local->ops->reset_tsf)
@@ -689,7 +709,8 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
689 might_sleep(); 709 might_sleep();
690 710
691 sdata = get_bss_sdata(sdata); 711 sdata = get_bss_sdata(sdata);
692 check_sdata_in_driver(sdata); 712 if (!check_sdata_in_driver(sdata))
713 return -EIO;
693 714
694 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); 715 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
695 716
@@ -726,13 +747,19 @@ static inline void drv_rfkill_poll(struct ieee80211_local *local)
726} 747}
727 748
728static inline void drv_flush(struct ieee80211_local *local, 749static inline void drv_flush(struct ieee80211_local *local,
750 struct ieee80211_sub_if_data *sdata,
729 u32 queues, bool drop) 751 u32 queues, bool drop)
730{ 752{
753 struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
754
731 might_sleep(); 755 might_sleep();
732 756
757 if (sdata && !check_sdata_in_driver(sdata))
758 return;
759
733 trace_drv_flush(local, queues, drop); 760 trace_drv_flush(local, queues, drop);
734 if (local->ops->flush) 761 if (local->ops->flush)
735 local->ops->flush(&local->hw, queues, drop); 762 local->ops->flush(&local->hw, vif, queues, drop);
736 trace_drv_return_void(local); 763 trace_drv_return_void(local);
737} 764}
738 765
@@ -848,7 +875,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
848 875
849 might_sleep(); 876 might_sleep();
850 877
851 check_sdata_in_driver(sdata); 878 if (!check_sdata_in_driver(sdata))
879 return -EIO;
852 880
853 trace_drv_set_bitrate_mask(local, sdata, mask); 881 trace_drv_set_bitrate_mask(local, sdata, mask);
854 if (local->ops->set_bitrate_mask) 882 if (local->ops->set_bitrate_mask)
@@ -863,7 +891,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
863 struct ieee80211_sub_if_data *sdata, 891 struct ieee80211_sub_if_data *sdata,
864 struct cfg80211_gtk_rekey_data *data) 892 struct cfg80211_gtk_rekey_data *data)
865{ 893{
866 check_sdata_in_driver(sdata); 894 if (!check_sdata_in_driver(sdata))
895 return;
867 896
868 trace_drv_set_rekey_data(local, sdata, data); 897 trace_drv_set_rekey_data(local, sdata, data);
869 if (local->ops->set_rekey_data) 898 if (local->ops->set_rekey_data)
@@ -931,7 +960,8 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
931{ 960{
932 might_sleep(); 961 might_sleep();
933 962
934 check_sdata_in_driver(sdata); 963 if (!check_sdata_in_driver(sdata))
964 return;
935 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); 965 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
936 966
937 trace_drv_mgd_prepare_tx(local, sdata); 967 trace_drv_mgd_prepare_tx(local, sdata);
@@ -958,6 +988,9 @@ static inline int drv_add_chanctx(struct ieee80211_local *local,
958static inline void drv_remove_chanctx(struct ieee80211_local *local, 988static inline void drv_remove_chanctx(struct ieee80211_local *local,
959 struct ieee80211_chanctx *ctx) 989 struct ieee80211_chanctx *ctx)
960{ 990{
991 if (WARN_ON(!ctx->driver_present))
992 return;
993
961 trace_drv_remove_chanctx(local, ctx); 994 trace_drv_remove_chanctx(local, ctx);
962 if (local->ops->remove_chanctx) 995 if (local->ops->remove_chanctx)
963 local->ops->remove_chanctx(&local->hw, &ctx->conf); 996 local->ops->remove_chanctx(&local->hw, &ctx->conf);
@@ -983,7 +1016,8 @@ static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
983{ 1016{
984 int ret = 0; 1017 int ret = 0;
985 1018
986 check_sdata_in_driver(sdata); 1019 if (!check_sdata_in_driver(sdata))
1020 return -EIO;
987 1021
988 trace_drv_assign_vif_chanctx(local, sdata, ctx); 1022 trace_drv_assign_vif_chanctx(local, sdata, ctx);
989 if (local->ops->assign_vif_chanctx) { 1023 if (local->ops->assign_vif_chanctx) {
@@ -1001,7 +1035,8 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
1001 struct ieee80211_sub_if_data *sdata, 1035 struct ieee80211_sub_if_data *sdata,
1002 struct ieee80211_chanctx *ctx) 1036 struct ieee80211_chanctx *ctx)
1003{ 1037{
1004 check_sdata_in_driver(sdata); 1038 if (!check_sdata_in_driver(sdata))
1039 return;
1005 1040
1006 trace_drv_unassign_vif_chanctx(local, sdata, ctx); 1041 trace_drv_unassign_vif_chanctx(local, sdata, ctx);
1007 if (local->ops->unassign_vif_chanctx) { 1042 if (local->ops->unassign_vif_chanctx) {
@@ -1013,12 +1048,66 @@ static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
1013 trace_drv_return_void(local); 1048 trace_drv_return_void(local);
1014} 1049}
1015 1050
1051static inline int
1052drv_switch_vif_chanctx(struct ieee80211_local *local,
1053 struct ieee80211_vif_chanctx_switch *vifs,
1054 int n_vifs,
1055 enum ieee80211_chanctx_switch_mode mode)
1056{
1057 int ret = 0;
1058 int i;
1059
1060 if (!local->ops->switch_vif_chanctx)
1061 return -EOPNOTSUPP;
1062
1063 for (i = 0; i < n_vifs; i++) {
1064 struct ieee80211_chanctx *new_ctx =
1065 container_of(vifs[i].new_ctx,
1066 struct ieee80211_chanctx,
1067 conf);
1068 struct ieee80211_chanctx *old_ctx =
1069 container_of(vifs[i].old_ctx,
1070 struct ieee80211_chanctx,
1071 conf);
1072
1073 WARN_ON_ONCE(!old_ctx->driver_present);
1074 WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
1075 new_ctx->driver_present) ||
1076 (mode == CHANCTX_SWMODE_REASSIGN_VIF &&
1077 !new_ctx->driver_present));
1078 }
1079
1080 trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
1081 ret = local->ops->switch_vif_chanctx(&local->hw,
1082 vifs, n_vifs, mode);
1083 trace_drv_return_int(local, ret);
1084
1085 if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
1086 for (i = 0; i < n_vifs; i++) {
1087 struct ieee80211_chanctx *new_ctx =
1088 container_of(vifs[i].new_ctx,
1089 struct ieee80211_chanctx,
1090 conf);
1091 struct ieee80211_chanctx *old_ctx =
1092 container_of(vifs[i].old_ctx,
1093 struct ieee80211_chanctx,
1094 conf);
1095
1096 new_ctx->driver_present = true;
1097 old_ctx->driver_present = false;
1098 }
1099 }
1100
1101 return ret;
1102}
1103
1016static inline int drv_start_ap(struct ieee80211_local *local, 1104static inline int drv_start_ap(struct ieee80211_local *local,
1017 struct ieee80211_sub_if_data *sdata) 1105 struct ieee80211_sub_if_data *sdata)
1018{ 1106{
1019 int ret = 0; 1107 int ret = 0;
1020 1108
1021 check_sdata_in_driver(sdata); 1109 if (!check_sdata_in_driver(sdata))
1110 return -EIO;
1022 1111
1023 trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf); 1112 trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf);
1024 if (local->ops->start_ap) 1113 if (local->ops->start_ap)
@@ -1030,7 +1119,8 @@ static inline int drv_start_ap(struct ieee80211_local *local,
1030static inline void drv_stop_ap(struct ieee80211_local *local, 1119static inline void drv_stop_ap(struct ieee80211_local *local,
1031 struct ieee80211_sub_if_data *sdata) 1120 struct ieee80211_sub_if_data *sdata)
1032{ 1121{
1033 check_sdata_in_driver(sdata); 1122 if (!check_sdata_in_driver(sdata))
1123 return;
1034 1124
1035 trace_drv_stop_ap(local, sdata); 1125 trace_drv_stop_ap(local, sdata);
1036 if (local->ops->stop_ap) 1126 if (local->ops->stop_ap)
@@ -1053,7 +1143,8 @@ drv_set_default_unicast_key(struct ieee80211_local *local,
1053 struct ieee80211_sub_if_data *sdata, 1143 struct ieee80211_sub_if_data *sdata,
1054 int key_idx) 1144 int key_idx)
1055{ 1145{
1056 check_sdata_in_driver(sdata); 1146 if (!check_sdata_in_driver(sdata))
1147 return;
1057 1148
1058 WARN_ON_ONCE(key_idx < -1 || key_idx > 3); 1149 WARN_ON_ONCE(key_idx < -1 || key_idx > 3);
1059 1150
@@ -1095,7 +1186,8 @@ static inline int drv_join_ibss(struct ieee80211_local *local,
1095 int ret = 0; 1186 int ret = 0;
1096 1187
1097 might_sleep(); 1188 might_sleep();
1098 check_sdata_in_driver(sdata); 1189 if (!check_sdata_in_driver(sdata))
1190 return -EIO;
1099 1191
1100 trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf); 1192 trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf);
1101 if (local->ops->join_ibss) 1193 if (local->ops->join_ibss)
@@ -1108,7 +1200,8 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
1108 struct ieee80211_sub_if_data *sdata) 1200 struct ieee80211_sub_if_data *sdata)
1109{ 1201{
1110 might_sleep(); 1202 might_sleep();
1111 check_sdata_in_driver(sdata); 1203 if (!check_sdata_in_driver(sdata))
1204 return;
1112 1205
1113 trace_drv_leave_ibss(local, sdata); 1206 trace_drv_leave_ibss(local, sdata);
1114 if (local->ops->leave_ibss) 1207 if (local->ops->leave_ibss)
@@ -1116,4 +1209,17 @@ static inline void drv_leave_ibss(struct ieee80211_local *local,
1116 trace_drv_return_void(local); 1209 trace_drv_return_void(local);
1117} 1210}
1118 1211
1212static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
1213 struct ieee80211_sta *sta)
1214{
1215 u32 ret = 0;
1216
1217 trace_drv_get_expected_throughput(sta);
1218 if (local->ops->get_expected_throughput)
1219 ret = local->ops->get_expected_throughput(sta);
1220 trace_drv_return_u32(local, ret);
1221
1222 return ret;
1223}
1224
1119#endif /* __MAC80211_DRIVER_OPS */ 1225#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index c150b68436d7..15702ff64a4c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -31,6 +31,18 @@ static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
31 } 31 }
32} 32}
33 33
34static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa,
35 struct ieee80211_ht_cap *ht_capa_mask,
36 struct ieee80211_sta_ht_cap *ht_cap,
37 u16 flag)
38{
39 __le16 le_flag = cpu_to_le16(flag);
40
41 if ((ht_capa_mask->cap_info & le_flag) &&
42 (ht_capa->cap_info & le_flag))
43 ht_cap->cap |= flag;
44}
45
34void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 46void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
35 struct ieee80211_sta_ht_cap *ht_cap) 47 struct ieee80211_sta_ht_cap *ht_cap)
36{ 48{
@@ -59,7 +71,7 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
59 smask = (u8 *)(&ht_capa_mask->mcs.rx_mask); 71 smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
60 72
61 /* NOTE: If you add more over-rides here, update register_hw 73 /* NOTE: If you add more over-rides here, update register_hw
62 * ht_capa_mod_msk logic in main.c as well. 74 * ht_capa_mod_mask logic in main.c as well.
63 * And, if this method can ever change ht_cap.ht_supported, fix 75 * And, if this method can ever change ht_cap.ht_supported, fix
64 * the check in ieee80211_add_ht_ie. 76 * the check in ieee80211_add_ht_ie.
65 */ 77 */
@@ -86,6 +98,14 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
86 __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap, 98 __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
87 IEEE80211_HT_CAP_MAX_AMSDU); 99 IEEE80211_HT_CAP_MAX_AMSDU);
88 100
101 /* Allow user to disable LDPC */
102 __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
103 IEEE80211_HT_CAP_LDPC_CODING);
104
105 /* Allow user to enable 40 MHz intolerant bit. */
106 __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
107 IEEE80211_HT_CAP_40MHZ_INTOLERANT);
108
89 /* Allow user to decrease AMPDU factor */ 109 /* Allow user to decrease AMPDU factor */
90 if (ht_capa_mask->ampdu_params_info & 110 if (ht_capa_mask->ampdu_params_info &
91 IEEE80211_HT_AMPDU_PARM_FACTOR) { 111 IEEE80211_HT_AMPDU_PARM_FACTOR) {
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 06d28787945b..18ee0a256b1e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -143,7 +143,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
143 *pos++ = csa_settings->block_tx ? 1 : 0; 143 *pos++ = csa_settings->block_tx ? 1 : 0;
144 *pos++ = ieee80211_frequency_to_channel( 144 *pos++ = ieee80211_frequency_to_channel(
145 csa_settings->chandef.chan->center_freq); 145 csa_settings->chandef.chan->center_freq);
146 sdata->csa_counter_offset_beacon = (pos - presp->head); 146 sdata->csa_counter_offset_beacon[0] = (pos - presp->head);
147 *pos++ = csa_settings->count; 147 *pos++ = csa_settings->count;
148 } 148 }
149 149
@@ -228,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
228 struct beacon_data *presp; 228 struct beacon_data *presp;
229 enum nl80211_bss_scan_width scan_width; 229 enum nl80211_bss_scan_width scan_width;
230 bool have_higher_than_11mbit; 230 bool have_higher_than_11mbit;
231 bool radar_required = false; 231 bool radar_required;
232 int err; 232 int err;
233 233
234 sdata_assert_lock(sdata); 234 sdata_assert_lock(sdata);
@@ -253,7 +253,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
253 253
254 presp = rcu_dereference_protected(ifibss->presp, 254 presp = rcu_dereference_protected(ifibss->presp,
255 lockdep_is_held(&sdata->wdev.mtx)); 255 lockdep_is_held(&sdata->wdev.mtx));
256 rcu_assign_pointer(ifibss->presp, NULL); 256 RCU_INIT_POINTER(ifibss->presp, NULL);
257 if (presp) 257 if (presp)
258 kfree_rcu(presp, rcu_head); 258 kfree_rcu(presp, rcu_head);
259 259
@@ -262,7 +262,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
262 /* make a copy of the chandef, it could be modified below. */ 262 /* make a copy of the chandef, it could be modified below. */
263 chandef = *req_chandef; 263 chandef = *req_chandef;
264 chan = chandef.chan; 264 chan = chandef.chan;
265 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 265 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
266 NL80211_IFTYPE_ADHOC)) {
266 if (chandef.width == NL80211_CHAN_WIDTH_5 || 267 if (chandef.width == NL80211_CHAN_WIDTH_5 ||
267 chandef.width == NL80211_CHAN_WIDTH_10 || 268 chandef.width == NL80211_CHAN_WIDTH_10 ||
268 chandef.width == NL80211_CHAN_WIDTH_20_NOHT || 269 chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
@@ -274,7 +275,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
274 chandef.width = NL80211_CHAN_WIDTH_20; 275 chandef.width = NL80211_CHAN_WIDTH_20;
275 chandef.center_freq1 = chan->center_freq; 276 chandef.center_freq1 = chan->center_freq;
276 /* check again for downgraded chandef */ 277 /* check again for downgraded chandef */
277 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 278 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
279 NL80211_IFTYPE_ADHOC)) {
278 sdata_info(sdata, 280 sdata_info(sdata,
279 "Failed to join IBSS, beacons forbidden\n"); 281 "Failed to join IBSS, beacons forbidden\n");
280 return; 282 return;
@@ -282,21 +284,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
282 } 284 }
283 285
284 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 286 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
285 &chandef); 287 &chandef, NL80211_IFTYPE_ADHOC);
286 if (err < 0) { 288 if (err < 0) {
287 sdata_info(sdata, 289 sdata_info(sdata,
288 "Failed to join IBSS, invalid chandef\n"); 290 "Failed to join IBSS, invalid chandef\n");
289 return; 291 return;
290 } 292 }
291 if (err > 0) { 293 if (err > 0 && !ifibss->userspace_handles_dfs) {
292 if (!ifibss->userspace_handles_dfs) { 294 sdata_info(sdata,
293 sdata_info(sdata, 295 "Failed to join IBSS, DFS channel without control program\n");
294 "Failed to join IBSS, DFS channel without control program\n"); 296 return;
295 return;
296 }
297 radar_required = true;
298 } 297 }
299 298
299 radar_required = err;
300
300 mutex_lock(&local->mtx); 301 mutex_lock(&local->mtx);
301 if (ieee80211_vif_use_channel(sdata, &chandef, 302 if (ieee80211_vif_use_channel(sdata, &chandef,
302 ifibss->fixed_channel ? 303 ifibss->fixed_channel ?
@@ -775,7 +776,8 @@ static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
775 * unavailable. 776 * unavailable.
776 */ 777 */
777 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 778 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
778 &ifibss->chandef); 779 &ifibss->chandef,
780 NL80211_IFTYPE_ADHOC);
779 if (err > 0) 781 if (err > 0)
780 cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef, 782 cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef,
781 GFP_ATOMIC); 783 GFP_ATOMIC);
@@ -861,7 +863,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
861 goto disconnect; 863 goto disconnect;
862 } 864 }
863 865
864 if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef)) { 866 if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef,
867 NL80211_IFTYPE_ADHOC)) {
865 sdata_info(sdata, 868 sdata_info(sdata,
866 "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", 869 "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
867 ifibss->bssid, 870 ifibss->bssid,
@@ -873,17 +876,17 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
873 } 876 }
874 877
875 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 878 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
876 &params.chandef); 879 &params.chandef,
880 NL80211_IFTYPE_ADHOC);
877 if (err < 0) 881 if (err < 0)
878 goto disconnect; 882 goto disconnect;
879 if (err) { 883 if (err > 0 && !ifibss->userspace_handles_dfs) {
880 /* IBSS-DFS only allowed with a control program */ 884 /* IBSS-DFS only allowed with a control program */
881 if (!ifibss->userspace_handles_dfs) 885 goto disconnect;
882 goto disconnect;
883
884 params.radar_required = true;
885 } 886 }
886 887
888 params.radar_required = err;
889
887 if (cfg80211_chandef_identical(&params.chandef, 890 if (cfg80211_chandef_identical(&params.chandef,
888 &sdata->vif.bss_conf.chandef)) { 891 &sdata->vif.bss_conf.chandef)) {
889 ibss_dbg(sdata, 892 ibss_dbg(sdata,
@@ -1636,7 +1639,33 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1636 u32 changed = 0; 1639 u32 changed = 0;
1637 u32 rate_flags; 1640 u32 rate_flags;
1638 struct ieee80211_supported_band *sband; 1641 struct ieee80211_supported_band *sband;
1642 enum ieee80211_chanctx_mode chanmode;
1643 struct ieee80211_local *local = sdata->local;
1644 int radar_detect_width = 0;
1639 int i; 1645 int i;
1646 int ret;
1647
1648 ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
1649 &params->chandef,
1650 sdata->wdev.iftype);
1651 if (ret < 0)
1652 return ret;
1653
1654 if (ret > 0) {
1655 if (!params->userspace_handles_dfs)
1656 return -EINVAL;
1657 radar_detect_width = BIT(params->chandef.width);
1658 }
1659
1660 chanmode = (params->channel_fixed && !ret) ?
1661 IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
1662
1663 mutex_lock(&local->chanctx_mtx);
1664 ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
1665 radar_detect_width);
1666 mutex_unlock(&local->chanctx_mtx);
1667 if (ret < 0)
1668 return ret;
1640 1669
1641 if (params->bssid) { 1670 if (params->bssid) {
1642 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); 1671 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1648,10 +1677,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1648 sdata->u.ibss.control_port = params->control_port; 1677 sdata->u.ibss.control_port = params->control_port;
1649 sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs; 1678 sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs;
1650 sdata->u.ibss.basic_rates = params->basic_rates; 1679 sdata->u.ibss.basic_rates = params->basic_rates;
1680 sdata->u.ibss.last_scan_completed = jiffies;
1651 1681
1652 /* fix basic_rates if channel does not support these rates */ 1682 /* fix basic_rates if channel does not support these rates */
1653 rate_flags = ieee80211_chandef_rate_flags(&params->chandef); 1683 rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
1654 sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band]; 1684 sband = local->hw.wiphy->bands[params->chandef.chan->band];
1655 for (i = 0; i < sband->n_bitrates; i++) { 1685 for (i = 0; i < sband->n_bitrates; i++) {
1656 if ((rate_flags & sband->bitrates[i].flags) != rate_flags) 1686 if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
1657 sdata->u.ibss.basic_rates &= ~BIT(i); 1687 sdata->u.ibss.basic_rates &= ~BIT(i);
@@ -1700,9 +1730,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1700 ieee80211_bss_info_change_notify(sdata, changed); 1730 ieee80211_bss_info_change_notify(sdata, changed);
1701 1731
1702 sdata->smps_mode = IEEE80211_SMPS_OFF; 1732 sdata->smps_mode = IEEE80211_SMPS_OFF;
1703 sdata->needed_rx_chains = sdata->local->rx_chains; 1733 sdata->needed_rx_chains = local->rx_chains;
1704 1734
1705 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 1735 ieee80211_queue_work(&local->hw, &sdata->work);
1706 1736
1707 return 0; 1737 return 0;
1708} 1738}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f169b6ee94ee..ac9836e0aab3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -260,7 +260,7 @@ struct ieee80211_if_ap {
260 260
261 /* to be used after channel switch. */ 261 /* to be used after channel switch. */
262 struct cfg80211_beacon_data *next_beacon; 262 struct cfg80211_beacon_data *next_beacon;
263 struct list_head vlans; 263 struct list_head vlans; /* write-protected with RTNL and local->mtx */
264 264
265 struct ps_data ps; 265 struct ps_data ps;
266 atomic_t num_mcast_sta; /* number of stations receiving multicast */ 266 atomic_t num_mcast_sta; /* number of stations receiving multicast */
@@ -276,7 +276,7 @@ struct ieee80211_if_wds {
276}; 276};
277 277
278struct ieee80211_if_vlan { 278struct ieee80211_if_vlan {
279 struct list_head list; 279 struct list_head list; /* write-protected with RTNL and local->mtx */
280 280
281 /* used for all tx if the VLAN is configured to 4-addr mode */ 281 /* used for all tx if the VLAN is configured to 4-addr mode */
282 struct sta_info __rcu *sta; 282 struct sta_info __rcu *sta;
@@ -692,8 +692,10 @@ struct ieee80211_chanctx {
692 struct list_head list; 692 struct list_head list;
693 struct rcu_head rcu_head; 693 struct rcu_head rcu_head;
694 694
695 struct list_head assigned_vifs;
696 struct list_head reserved_vifs;
697
695 enum ieee80211_chanctx_mode mode; 698 enum ieee80211_chanctx_mode mode;
696 int refcount;
697 bool driver_present; 699 bool driver_present;
698 700
699 struct ieee80211_chanctx_conf conf; 701 struct ieee80211_chanctx_conf conf;
@@ -752,11 +754,21 @@ struct ieee80211_sub_if_data {
752 struct mac80211_qos_map __rcu *qos_map; 754 struct mac80211_qos_map __rcu *qos_map;
753 755
754 struct work_struct csa_finalize_work; 756 struct work_struct csa_finalize_work;
755 int csa_counter_offset_beacon; 757 u16 csa_counter_offset_beacon[IEEE80211_MAX_CSA_COUNTERS_NUM];
756 int csa_counter_offset_presp; 758 u16 csa_counter_offset_presp[IEEE80211_MAX_CSA_COUNTERS_NUM];
757 bool csa_radar_required; 759 bool csa_radar_required;
760 bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
758 struct cfg80211_chan_def csa_chandef; 761 struct cfg80211_chan_def csa_chandef;
759 762
763 struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
764 struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
765
766 /* context reservation -- protected with chanctx_mtx */
767 struct ieee80211_chanctx *reserved_chanctx;
768 struct cfg80211_chan_def reserved_chandef;
769 bool reserved_radar_required;
770 u8 csa_current_counter;
771
760 /* used to reconfigure hardware SM PS */ 772 /* used to reconfigure hardware SM PS */
761 struct work_struct recalc_smps; 773 struct work_struct recalc_smps;
762 774
@@ -1449,6 +1461,7 @@ __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
1449int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, 1461int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
1450 struct cfg80211_sched_scan_request *req); 1462 struct cfg80211_sched_scan_request *req);
1451int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata); 1463int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1464void ieee80211_sched_scan_end(struct ieee80211_local *local);
1452void ieee80211_sched_scan_stopped_work(struct work_struct *work); 1465void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1453 1466
1454/* off-channel helpers */ 1467/* off-channel helpers */
@@ -1463,6 +1476,7 @@ void ieee80211_sw_roc_work(struct work_struct *work);
1463void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); 1476void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
1464 1477
1465/* channel switch handling */ 1478/* channel switch handling */
1479bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local);
1466void ieee80211_csa_finalize_work(struct work_struct *work); 1480void ieee80211_csa_finalize_work(struct work_struct *work);
1467int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, 1481int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
1468 struct cfg80211_csa_settings *params); 1482 struct cfg80211_csa_settings *params);
@@ -1772,6 +1786,16 @@ ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1772 const struct cfg80211_chan_def *chandef, 1786 const struct cfg80211_chan_def *chandef,
1773 enum ieee80211_chanctx_mode mode); 1787 enum ieee80211_chanctx_mode mode);
1774int __must_check 1788int __must_check
1789ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
1790 const struct cfg80211_chan_def *chandef,
1791 enum ieee80211_chanctx_mode mode,
1792 bool radar_required);
1793int __must_check
1794ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
1795 u32 *changed);
1796int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
1797
1798int __must_check
1775ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, 1799ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
1776 const struct cfg80211_chan_def *chandef, 1800 const struct cfg80211_chan_def *chandef,
1777 u32 *changed); 1801 u32 *changed);
@@ -1783,6 +1807,8 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
1783void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata); 1807void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
1784void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, 1808void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
1785 bool clear); 1809 bool clear);
1810int ieee80211_chanctx_refcount(struct ieee80211_local *local,
1811 struct ieee80211_chanctx *ctx);
1786 1812
1787void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 1813void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
1788 struct ieee80211_chanctx *chanctx); 1814 struct ieee80211_chanctx *chanctx);
@@ -1806,6 +1832,20 @@ int ieee80211_cs_headroom(struct ieee80211_local *local,
1806 enum nl80211_iftype iftype); 1832 enum nl80211_iftype iftype);
1807void ieee80211_recalc_dtim(struct ieee80211_local *local, 1833void ieee80211_recalc_dtim(struct ieee80211_local *local,
1808 struct ieee80211_sub_if_data *sdata); 1834 struct ieee80211_sub_if_data *sdata);
1835int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
1836 const struct cfg80211_chan_def *chandef,
1837 enum ieee80211_chanctx_mode chanmode,
1838 u8 radar_detect);
1839int ieee80211_max_num_channels(struct ieee80211_local *local);
1840
1841/* TDLS */
1842int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
1843 const u8 *peer, u8 action_code, u8 dialog_token,
1844 u16 status_code, u32 peer_capability,
1845 const u8 *extra_ies, size_t extra_ies_len);
1846int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
1847 const u8 *peer, enum nl80211_tdls_operation oper);
1848
1809 1849
1810#ifdef CONFIG_MAC80211_NOINLINE 1850#ifdef CONFIG_MAC80211_NOINLINE
1811#define debug_noinline noinline 1851#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 34799e06ee01..388b863e821c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -250,6 +250,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
250{ 250{
251 struct ieee80211_local *local = sdata->local; 251 struct ieee80211_local *local = sdata->local;
252 struct ieee80211_sub_if_data *nsdata; 252 struct ieee80211_sub_if_data *nsdata;
253 int ret;
253 254
254 ASSERT_RTNL(); 255 ASSERT_RTNL();
255 256
@@ -300,7 +301,10 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
300 } 301 }
301 } 302 }
302 303
303 return 0; 304 mutex_lock(&local->chanctx_mtx);
305 ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
306 mutex_unlock(&local->chanctx_mtx);
307 return ret;
304} 308}
305 309
306static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, 310static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -395,6 +399,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
395 sdata->vif.type = NL80211_IFTYPE_MONITOR; 399 sdata->vif.type = NL80211_IFTYPE_MONITOR;
396 snprintf(sdata->name, IFNAMSIZ, "%s-monitor", 400 snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
397 wiphy_name(local->hw.wiphy)); 401 wiphy_name(local->hw.wiphy));
402 sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
398 403
399 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 404 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
400 405
@@ -423,7 +428,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
423 mutex_unlock(&local->mtx); 428 mutex_unlock(&local->mtx);
424 if (ret) { 429 if (ret) {
425 mutex_lock(&local->iflist_mtx); 430 mutex_lock(&local->iflist_mtx);
426 rcu_assign_pointer(local->monitor_sdata, NULL); 431 RCU_INIT_POINTER(local->monitor_sdata, NULL);
427 mutex_unlock(&local->iflist_mtx); 432 mutex_unlock(&local->iflist_mtx);
428 synchronize_net(); 433 synchronize_net();
429 drv_remove_interface(local, sdata); 434 drv_remove_interface(local, sdata);
@@ -452,7 +457,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
452 return; 457 return;
453 } 458 }
454 459
455 rcu_assign_pointer(local->monitor_sdata, NULL); 460 RCU_INIT_POINTER(local->monitor_sdata, NULL);
456 mutex_unlock(&local->iflist_mtx); 461 mutex_unlock(&local->iflist_mtx);
457 462
458 synchronize_net(); 463 synchronize_net();
@@ -492,7 +497,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
492 if (!sdata->bss) 497 if (!sdata->bss)
493 return -ENOLINK; 498 return -ENOLINK;
494 499
500 mutex_lock(&local->mtx);
495 list_add(&sdata->u.vlan.list, &sdata->bss->vlans); 501 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
502 mutex_unlock(&local->mtx);
496 503
497 master = container_of(sdata->bss, 504 master = container_of(sdata->bss,
498 struct ieee80211_sub_if_data, u.ap); 505 struct ieee80211_sub_if_data, u.ap);
@@ -722,8 +729,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
722 drv_stop(local); 729 drv_stop(local);
723 err_del_bss: 730 err_del_bss:
724 sdata->bss = NULL; 731 sdata->bss = NULL;
725 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 732 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
733 mutex_lock(&local->mtx);
726 list_del(&sdata->u.vlan.list); 734 list_del(&sdata->u.vlan.list);
735 mutex_unlock(&local->mtx);
736 }
727 /* might already be clear but that doesn't matter */ 737 /* might already be clear but that doesn't matter */
728 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 738 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
729 return res; 739 return res;
@@ -829,8 +839,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
829 839
830 cancel_work_sync(&sdata->recalc_smps); 840 cancel_work_sync(&sdata->recalc_smps);
831 sdata_lock(sdata); 841 sdata_lock(sdata);
842 mutex_lock(&local->mtx);
832 sdata->vif.csa_active = false; 843 sdata->vif.csa_active = false;
844 if (!ieee80211_csa_needs_block_tx(local))
845 ieee80211_wake_queues_by_reason(&local->hw,
846 IEEE80211_MAX_QUEUE_MAP,
847 IEEE80211_QUEUE_STOP_REASON_CSA);
848 mutex_unlock(&local->mtx);
833 sdata_unlock(sdata); 849 sdata_unlock(sdata);
850
834 cancel_work_sync(&sdata->csa_finalize_work); 851 cancel_work_sync(&sdata->csa_finalize_work);
835 852
836 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); 853 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -875,8 +892,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
875 892
876 switch (sdata->vif.type) { 893 switch (sdata->vif.type) {
877 case NL80211_IFTYPE_AP_VLAN: 894 case NL80211_IFTYPE_AP_VLAN:
895 mutex_lock(&local->mtx);
878 list_del(&sdata->u.vlan.list); 896 list_del(&sdata->u.vlan.list);
879 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); 897 mutex_unlock(&local->mtx);
898 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
880 /* no need to tell driver */ 899 /* no need to tell driver */
881 break; 900 break;
882 case NL80211_IFTYPE_MONITOR: 901 case NL80211_IFTYPE_MONITOR:
@@ -895,7 +914,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
895 break; 914 break;
896 case NL80211_IFTYPE_P2P_DEVICE: 915 case NL80211_IFTYPE_P2P_DEVICE:
897 /* relies on synchronize_rcu() below */ 916 /* relies on synchronize_rcu() below */
898 rcu_assign_pointer(local->p2p_sdata, NULL); 917 RCU_INIT_POINTER(local->p2p_sdata, NULL);
899 /* fall through */ 918 /* fall through */
900 default: 919 default:
901 cancel_work_sync(&sdata->work); 920 cancel_work_sync(&sdata->work);
@@ -1267,6 +1286,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1267 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); 1286 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
1268 sdata->control_port_no_encrypt = false; 1287 sdata->control_port_no_encrypt = false;
1269 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 1288 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
1289 sdata->vif.bss_conf.idle = true;
1270 1290
1271 sdata->noack_map = 0; 1291 sdata->noack_map = 0;
1272 1292
@@ -1280,6 +1300,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1280 INIT_WORK(&sdata->work, ieee80211_iface_work); 1300 INIT_WORK(&sdata->work, ieee80211_iface_work);
1281 INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); 1301 INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
1282 INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work); 1302 INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
1303 INIT_LIST_HEAD(&sdata->assigned_chanctx_list);
1304 INIT_LIST_HEAD(&sdata->reserved_chanctx_list);
1283 1305
1284 switch (type) { 1306 switch (type) {
1285 case NL80211_IFTYPE_P2P_GO: 1307 case NL80211_IFTYPE_P2P_GO:
@@ -1773,20 +1795,19 @@ static int netdev_notify(struct notifier_block *nb,
1773 struct ieee80211_sub_if_data *sdata; 1795 struct ieee80211_sub_if_data *sdata;
1774 1796
1775 if (state != NETDEV_CHANGENAME) 1797 if (state != NETDEV_CHANGENAME)
1776 return 0; 1798 return NOTIFY_DONE;
1777 1799
1778 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) 1800 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1779 return 0; 1801 return NOTIFY_DONE;
1780 1802
1781 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) 1803 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1782 return 0; 1804 return NOTIFY_DONE;
1783 1805
1784 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1806 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1785
1786 memcpy(sdata->name, dev->name, IFNAMSIZ); 1807 memcpy(sdata->name, dev->name, IFNAMSIZ);
1787
1788 ieee80211_debugfs_rename_netdev(sdata); 1808 ieee80211_debugfs_rename_netdev(sdata);
1789 return 0; 1809
1810 return NOTIFY_OK;
1790} 1811}
1791 1812
1792static struct notifier_block mac80211_netdev_notifier = { 1813static struct notifier_block mac80211_netdev_notifier = {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 6ff65a1ebaa9..16d97f044a20 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -325,7 +325,8 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
325 struct ieee80211_key *key; 325 struct ieee80211_key *key;
326 int i, j, err; 326 int i, j, err;
327 327
328 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); 328 if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS))
329 return ERR_PTR(-EINVAL);
329 330
330 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); 331 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
331 if (!key) 332 if (!key)
@@ -481,8 +482,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
481 int idx, ret; 482 int idx, ret;
482 bool pairwise; 483 bool pairwise;
483 484
484 BUG_ON(!sdata); 485 if (WARN_ON(!sdata || !key))
485 BUG_ON(!key); 486 return -EINVAL;
486 487
487 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; 488 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
488 idx = key->conf.keyidx; 489 idx = key->conf.keyidx;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4c1bf61bc778..d17c26d6e369 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -340,7 +340,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
340 340
341 sdata_unlock(sdata); 341 sdata_unlock(sdata);
342 342
343 return NOTIFY_DONE; 343 return NOTIFY_OK;
344} 344}
345#endif 345#endif
346 346
@@ -371,7 +371,7 @@ static int ieee80211_ifa6_changed(struct notifier_block *nb,
371 371
372 drv_ipv6_addr_change(local, sdata, idev); 372 drv_ipv6_addr_change(local, sdata, idev);
373 373
374 return NOTIFY_DONE; 374 return NOTIFY_OK;
375} 375}
376#endif 376#endif
377 377
@@ -446,7 +446,9 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
446 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 446 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
447 IEEE80211_HT_CAP_MAX_AMSDU | 447 IEEE80211_HT_CAP_MAX_AMSDU |
448 IEEE80211_HT_CAP_SGI_20 | 448 IEEE80211_HT_CAP_SGI_20 |
449 IEEE80211_HT_CAP_SGI_40), 449 IEEE80211_HT_CAP_SGI_40 |
450 IEEE80211_HT_CAP_LDPC_CODING |
451 IEEE80211_HT_CAP_40MHZ_INTOLERANT),
450 .mcs = { 452 .mcs = {
451 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 453 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
452 0xff, 0xff, 0xff, 0xff, 0xff, }, 454 0xff, 0xff, 0xff, 0xff, 0xff, },
@@ -954,6 +956,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
954 if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) 956 if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
955 local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; 957 local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
956 958
959 local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
960
957 result = wiphy_register(local->hw.wiphy); 961 result = wiphy_register(local->hw.wiphy);
958 if (result < 0) 962 if (result < 0)
959 goto fail_wiphy_register; 963 goto fail_wiphy_register;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index f70e9cd10552..6495a3f0428d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -366,20 +366,15 @@ int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
366 return 0; 366 return 0;
367 367
368 /* find RSN IE */ 368 /* find RSN IE */
369 data = ifmsh->ie; 369 data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len);
370 while (data < ifmsh->ie + ifmsh->ie_len) { 370 if (!data)
371 if (*data == WLAN_EID_RSN) { 371 return 0;
372 len = data[1] + 2;
373 break;
374 }
375 data++;
376 }
377 372
378 if (len) { 373 len = data[1] + 2;
379 if (skb_tailroom(skb) < len) 374
380 return -ENOMEM; 375 if (skb_tailroom(skb) < len)
381 memcpy(skb_put(skb, len), data, len); 376 return -ENOMEM;
382 } 377 memcpy(skb_put(skb, len), data, len);
383 378
384 return 0; 379 return 0;
385} 380}
@@ -684,7 +679,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
684 *pos++ = 0x0; 679 *pos++ = 0x0;
685 *pos++ = ieee80211_frequency_to_channel( 680 *pos++ = ieee80211_frequency_to_channel(
686 csa->settings.chandef.chan->center_freq); 681 csa->settings.chandef.chan->center_freq);
687 sdata->csa_counter_offset_beacon = hdr_len + 6; 682 sdata->csa_counter_offset_beacon[0] = hdr_len + 6;
688 *pos++ = csa->settings.count; 683 *pos++ = csa->settings.count;
689 *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; 684 *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
690 *pos++ = 6; 685 *pos++ = 6;
@@ -829,7 +824,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
829 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 824 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
830 bcn = rcu_dereference_protected(ifmsh->beacon, 825 bcn = rcu_dereference_protected(ifmsh->beacon,
831 lockdep_is_held(&sdata->wdev.mtx)); 826 lockdep_is_held(&sdata->wdev.mtx));
832 rcu_assign_pointer(ifmsh->beacon, NULL); 827 RCU_INIT_POINTER(ifmsh->beacon, NULL);
833 kfree_rcu(bcn, rcu_head); 828 kfree_rcu(bcn, rcu_head);
834 829
835 /* flush STAs and mpaths on this iface */ 830 /* flush STAs and mpaths on this iface */
@@ -903,14 +898,15 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
903 } 898 }
904 899
905 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, 900 err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
906 &params.chandef); 901 &params.chandef,
902 NL80211_IFTYPE_MESH_POINT);
907 if (err < 0) 903 if (err < 0)
908 return false; 904 return false;
909 if (err) { 905 if (err > 0)
910 params.radar_required = true;
911 /* TODO: DFS not (yet) supported */ 906 /* TODO: DFS not (yet) supported */
912 return false; 907 return false;
913 } 908
909 params.radar_required = err;
914 910
915 if (cfg80211_chandef_identical(&params.chandef, 911 if (cfg80211_chandef_identical(&params.chandef,
916 &sdata->vif.bss_conf.chandef)) { 912 &sdata->vif.bss_conf.chandef)) {
@@ -1068,7 +1064,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
1068 1064
1069 /* Remove the CSA and MCSP elements from the beacon */ 1065 /* Remove the CSA and MCSP elements from the beacon */
1070 tmp_csa_settings = rcu_dereference(ifmsh->csa); 1066 tmp_csa_settings = rcu_dereference(ifmsh->csa);
1071 rcu_assign_pointer(ifmsh->csa, NULL); 1067 RCU_INIT_POINTER(ifmsh->csa, NULL);
1072 if (tmp_csa_settings) 1068 if (tmp_csa_settings)
1073 kfree_rcu(tmp_csa_settings, rcu_head); 1069 kfree_rcu(tmp_csa_settings, rcu_head);
1074 ret = ieee80211_mesh_rebuild_beacon(sdata); 1070 ret = ieee80211_mesh_rebuild_beacon(sdata);
@@ -1102,7 +1098,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
1102 ret = ieee80211_mesh_rebuild_beacon(sdata); 1098 ret = ieee80211_mesh_rebuild_beacon(sdata);
1103 if (ret) { 1099 if (ret) {
1104 tmp_csa_settings = rcu_dereference(ifmsh->csa); 1100 tmp_csa_settings = rcu_dereference(ifmsh->csa);
1105 rcu_assign_pointer(ifmsh->csa, NULL); 1101 RCU_INIT_POINTER(ifmsh->csa, NULL);
1106 kfree_rcu(tmp_csa_settings, rcu_head); 1102 kfree_rcu(tmp_csa_settings, rcu_head);
1107 return ret; 1103 return ret;
1108 } 1104 }
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index f9514685d45a..94758b9c9ed4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -37,7 +37,7 @@ static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
37 return get_unaligned_le32(preq_elem + offset); 37 return get_unaligned_le32(preq_elem + offset);
38} 38}
39 39
40static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae) 40static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
41{ 41{
42 if (ae) 42 if (ae)
43 offset += 6; 43 offset += 6;
@@ -544,9 +544,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
544 if (time_after(jiffies, ifmsh->last_sn_update + 544 if (time_after(jiffies, ifmsh->last_sn_update +
545 net_traversal_jiffies(sdata)) || 545 net_traversal_jiffies(sdata)) ||
546 time_before(jiffies, ifmsh->last_sn_update)) { 546 time_before(jiffies, ifmsh->last_sn_update)) {
547 target_sn = ++ifmsh->sn; 547 ++ifmsh->sn;
548 ifmsh->last_sn_update = jiffies; 548 ifmsh->last_sn_update = jiffies;
549 } 549 }
550 target_sn = ifmsh->sn;
550 } else if (is_broadcast_ether_addr(target_addr) && 551 } else if (is_broadcast_ether_addr(target_addr) &&
551 (target_flags & IEEE80211_PREQ_TO_FLAG)) { 552 (target_flags & IEEE80211_PREQ_TO_FLAG)) {
552 rcu_read_lock(); 553 rcu_read_lock();
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7d050ed6fe5a..cf032a8db9d7 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -287,8 +287,10 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
287 struct sk_buff_head failq; 287 struct sk_buff_head failq;
288 unsigned long flags; 288 unsigned long flags;
289 289
290 BUG_ON(gate_mpath == from_mpath); 290 if (WARN_ON(gate_mpath == from_mpath))
291 BUG_ON(!gate_mpath->next_hop); 291 return;
292 if (WARN_ON(!gate_mpath->next_hop))
293 return;
292 294
293 __skb_queue_head_init(&failq); 295 __skb_queue_head_init(&failq);
294 296
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 2bc5dc25d5ad..09625d6205c3 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -171,7 +171,7 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
171 u8 cap; 171 u8 cap;
172 172
173 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); 173 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
174 BUG_ON(!rcu_read_lock_held()); 174 WARN_ON(!rcu_read_lock_held());
175 cap = beacon->meshconf->meshconf_cap; 175 cap = beacon->meshconf->meshconf_cap;
176 176
177 spin_lock_bh(&ifmsh->sync_offset_lock); 177 spin_lock_bh(&ifmsh->sync_offset_lock);
diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h
index 3b848dad9587..0e4886f881f1 100644
--- a/net/mac80211/michael.h
+++ b/net/mac80211/michael.h
@@ -11,6 +11,7 @@
11#define MICHAEL_H 11#define MICHAEL_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/ieee80211.h>
14 15
15#define MICHAEL_MIC_LEN 8 16#define MICHAEL_MIC_LEN 8
16 17
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 27600a9808ba..3345401be1b3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -975,16 +975,23 @@ static void ieee80211_chswitch_work(struct work_struct *work)
975 /* XXX: shouldn't really modify cfg80211-owned data! */ 975 /* XXX: shouldn't really modify cfg80211-owned data! */
976 ifmgd->associated->channel = sdata->csa_chandef.chan; 976 ifmgd->associated->channel = sdata->csa_chandef.chan;
977 977
978 ieee80211_bss_info_change_notify(sdata, changed);
979
980 mutex_lock(&local->mtx);
981 sdata->vif.csa_active = false;
978 /* XXX: wait for a beacon first? */ 982 /* XXX: wait for a beacon first? */
979 ieee80211_wake_queues_by_reason(&local->hw, 983 if (!ieee80211_csa_needs_block_tx(local))
984 ieee80211_wake_queues_by_reason(&local->hw,
980 IEEE80211_MAX_QUEUE_MAP, 985 IEEE80211_MAX_QUEUE_MAP,
981 IEEE80211_QUEUE_STOP_REASON_CSA); 986 IEEE80211_QUEUE_STOP_REASON_CSA);
987 mutex_unlock(&local->mtx);
982 988
983 ieee80211_bss_info_change_notify(sdata, changed);
984
985 out:
986 sdata->vif.csa_active = false;
987 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 989 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
990
991 ieee80211_sta_reset_beacon_monitor(sdata);
992 ieee80211_sta_reset_conn_monitor(sdata);
993
994out:
988 sdata_unlock(sdata); 995 sdata_unlock(sdata);
989} 996}
990 997
@@ -1089,7 +1096,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1089 } 1096 }
1090 chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf), 1097 chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
1091 struct ieee80211_chanctx, conf); 1098 struct ieee80211_chanctx, conf);
1092 if (chanctx->refcount > 1) { 1099 if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
1093 sdata_info(sdata, 1100 sdata_info(sdata,
1094 "channel switch with multiple interfaces on the same channel, disconnecting\n"); 1101 "channel switch with multiple interfaces on the same channel, disconnecting\n");
1095 ieee80211_queue_work(&local->hw, 1102 ieee80211_queue_work(&local->hw,
@@ -1100,12 +1107,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1100 mutex_unlock(&local->chanctx_mtx); 1107 mutex_unlock(&local->chanctx_mtx);
1101 1108
1102 sdata->csa_chandef = csa_ie.chandef; 1109 sdata->csa_chandef = csa_ie.chandef;
1110
1111 mutex_lock(&local->mtx);
1103 sdata->vif.csa_active = true; 1112 sdata->vif.csa_active = true;
1113 sdata->csa_block_tx = csa_ie.mode;
1104 1114
1105 if (csa_ie.mode) 1115 if (sdata->csa_block_tx)
1106 ieee80211_stop_queues_by_reason(&local->hw, 1116 ieee80211_stop_queues_by_reason(&local->hw,
1107 IEEE80211_MAX_QUEUE_MAP, 1117 IEEE80211_MAX_QUEUE_MAP,
1108 IEEE80211_QUEUE_STOP_REASON_CSA); 1118 IEEE80211_QUEUE_STOP_REASON_CSA);
1119 mutex_unlock(&local->mtx);
1109 1120
1110 if (local->ops->channel_switch) { 1121 if (local->ops->channel_switch) {
1111 /* use driver's channel switch callback */ 1122 /* use driver's channel switch callback */
@@ -1817,6 +1828,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1817 ifmgd->flags = 0; 1828 ifmgd->flags = 0;
1818 mutex_lock(&local->mtx); 1829 mutex_lock(&local->mtx);
1819 ieee80211_vif_release_channel(sdata); 1830 ieee80211_vif_release_channel(sdata);
1831
1832 sdata->vif.csa_active = false;
1833 if (!ieee80211_csa_needs_block_tx(local))
1834 ieee80211_wake_queues_by_reason(&local->hw,
1835 IEEE80211_MAX_QUEUE_MAP,
1836 IEEE80211_QUEUE_STOP_REASON_CSA);
1820 mutex_unlock(&local->mtx); 1837 mutex_unlock(&local->mtx);
1821 1838
1822 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 1839 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
@@ -2045,6 +2062,7 @@ EXPORT_SYMBOL(ieee80211_ap_probereq_get);
2045 2062
2046static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) 2063static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2047{ 2064{
2065 struct ieee80211_local *local = sdata->local;
2048 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2066 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2049 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 2067 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2050 2068
@@ -2058,10 +2076,14 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2058 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 2076 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
2059 true, frame_buf); 2077 true, frame_buf);
2060 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 2078 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
2079
2080 mutex_lock(&local->mtx);
2061 sdata->vif.csa_active = false; 2081 sdata->vif.csa_active = false;
2062 ieee80211_wake_queues_by_reason(&sdata->local->hw, 2082 if (!ieee80211_csa_needs_block_tx(local))
2083 ieee80211_wake_queues_by_reason(&local->hw,
2063 IEEE80211_MAX_QUEUE_MAP, 2084 IEEE80211_MAX_QUEUE_MAP,
2064 IEEE80211_QUEUE_STOP_REASON_CSA); 2085 IEEE80211_QUEUE_STOP_REASON_CSA);
2086 mutex_unlock(&local->mtx);
2065 2087
2066 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, 2088 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
2067 IEEE80211_DEAUTH_FRAME_LEN); 2089 IEEE80211_DEAUTH_FRAME_LEN);
@@ -3546,6 +3568,9 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
3546 if (local->quiescing) 3568 if (local->quiescing)
3547 return; 3569 return;
3548 3570
3571 if (sdata->vif.csa_active)
3572 return;
3573
3549 sdata->u.mgd.connection_loss = false; 3574 sdata->u.mgd.connection_loss = false;
3550 ieee80211_queue_work(&sdata->local->hw, 3575 ieee80211_queue_work(&sdata->local->hw,
3551 &sdata->u.mgd.beacon_connection_loss_work); 3576 &sdata->u.mgd.beacon_connection_loss_work);
@@ -3561,6 +3586,9 @@ static void ieee80211_sta_conn_mon_timer(unsigned long data)
3561 if (local->quiescing) 3586 if (local->quiescing)
3562 return; 3587 return;
3563 3588
3589 if (sdata->vif.csa_active)
3590 return;
3591
3564 ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); 3592 ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
3565} 3593}
3566 3594
@@ -3707,7 +3735,7 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3707 ieee80211_recalc_ps(local, latency_usec); 3735 ieee80211_recalc_ps(local, latency_usec);
3708 mutex_unlock(&local->iflist_mtx); 3736 mutex_unlock(&local->iflist_mtx);
3709 3737
3710 return 0; 3738 return NOTIFY_OK;
3711} 3739}
3712 3740
3713static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, 3741static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 26fd94fa0aed..1c1469c36dca 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -657,6 +657,17 @@ minstrel_free(void *priv)
657 kfree(priv); 657 kfree(priv);
658} 658}
659 659
660static u32 minstrel_get_expected_throughput(void *priv_sta)
661{
662 struct minstrel_sta_info *mi = priv_sta;
663 int idx = mi->max_tp_rate[0];
664
665 /* convert pkt per sec in kbps (1200 is the average pkt size used for
666 * computing cur_tp
667 */
668 return MINSTREL_TRUNC(mi->r[idx].cur_tp) * 1200 * 8 / 1024;
669}
670
660const struct rate_control_ops mac80211_minstrel = { 671const struct rate_control_ops mac80211_minstrel = {
661 .name = "minstrel", 672 .name = "minstrel",
662 .tx_status = minstrel_tx_status, 673 .tx_status = minstrel_tx_status,
@@ -670,6 +681,7 @@ const struct rate_control_ops mac80211_minstrel = {
670 .add_sta_debugfs = minstrel_add_sta_debugfs, 681 .add_sta_debugfs = minstrel_add_sta_debugfs,
671 .remove_sta_debugfs = minstrel_remove_sta_debugfs, 682 .remove_sta_debugfs = minstrel_remove_sta_debugfs,
672#endif 683#endif
684 .get_expected_throughput = minstrel_get_expected_throughput,
673}; 685};
674 686
675int __init 687int __init
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index bccaf854a309..85c1e74b7714 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -22,7 +22,7 @@
22#define MCS_NBITS (AVG_PKT_SIZE << 3) 22#define MCS_NBITS (AVG_PKT_SIZE << 3)
23 23
24/* Number of symbols for a packet with (bps) bits per symbol */ 24/* Number of symbols for a packet with (bps) bits per symbol */
25#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) 25#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
26 26
27/* Transmission time (nanoseconds) for a packet containing (syms) symbols */ 27/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
28#define MCS_SYMBOL_TIME(sgi, syms) \ 28#define MCS_SYMBOL_TIME(sgi, syms) \
@@ -226,8 +226,9 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 226 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
227 227
228 nsecs += minstrel_mcs_groups[group].duration[rate]; 228 nsecs += minstrel_mcs_groups[group].duration[rate];
229 tp = 1000000 * ((prob * 1000) / nsecs);
230 229
230 /* prob is scaled - see MINSTREL_FRAC above */
231 tp = 1000000 * ((prob * 1000) / nsecs);
231 mr->cur_tp = MINSTREL_TRUNC(tp); 232 mr->cur_tp = MINSTREL_TRUNC(tp);
232} 233}
233 234
@@ -1031,6 +1032,22 @@ minstrel_ht_free(void *priv)
1031 mac80211_minstrel.free(priv); 1032 mac80211_minstrel.free(priv);
1032} 1033}
1033 1034
1035static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
1036{
1037 struct minstrel_ht_sta_priv *msp = priv_sta;
1038 struct minstrel_ht_sta *mi = &msp->ht;
1039 int i, j;
1040
1041 if (!msp->is_ht)
1042 return mac80211_minstrel.get_expected_throughput(priv_sta);
1043
1044 i = mi->max_tp_rate / MCS_GROUP_RATES;
1045 j = mi->max_tp_rate % MCS_GROUP_RATES;
1046
1047 /* convert cur_tp from pkt per second in kbps */
1048 return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
1049}
1050
1034static const struct rate_control_ops mac80211_minstrel_ht = { 1051static const struct rate_control_ops mac80211_minstrel_ht = {
1035 .name = "minstrel_ht", 1052 .name = "minstrel_ht",
1036 .tx_status = minstrel_ht_tx_status, 1053 .tx_status = minstrel_ht_tx_status,
@@ -1045,6 +1062,7 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
1045 .add_sta_debugfs = minstrel_ht_add_sta_debugfs, 1062 .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
1046 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, 1063 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
1047#endif 1064#endif
1065 .get_expected_throughput = minstrel_ht_get_expected_throughput,
1048}; 1066};
1049 1067
1050 1068
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2b608b2b70ec..394e201cde6d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -54,24 +54,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
54 return skb; 54 return skb;
55} 55}
56 56
57static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len) 57static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
58{ 58{
59 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 59 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
60 struct ieee80211_hdr *hdr; 60 struct ieee80211_hdr *hdr = (void *)skb->data;
61
62 hdr = (void *)(skb->data);
63 61
64 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 62 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
65 RX_FLAG_FAILED_PLCP_CRC | 63 RX_FLAG_FAILED_PLCP_CRC |
66 RX_FLAG_AMPDU_IS_ZEROLEN)) 64 RX_FLAG_AMPDU_IS_ZEROLEN))
67 return 1; 65 return true;
66
68 if (unlikely(skb->len < 16 + present_fcs_len)) 67 if (unlikely(skb->len < 16 + present_fcs_len))
69 return 1; 68 return true;
69
70 if (ieee80211_is_ctl(hdr->frame_control) && 70 if (ieee80211_is_ctl(hdr->frame_control) &&
71 !ieee80211_is_pspoll(hdr->frame_control) && 71 !ieee80211_is_pspoll(hdr->frame_control) &&
72 !ieee80211_is_back_req(hdr->frame_control)) 72 !ieee80211_is_back_req(hdr->frame_control))
73 return 1; 73 return true;
74 return 0; 74
75 return false;
75} 76}
76 77
77static int 78static int
@@ -3191,7 +3192,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
3191} 3192}
3192 3193
3193/* 3194/*
3194 * This is the actual Rx frames handler. as it blongs to Rx path it must 3195 * This is the actual Rx frames handler. as it belongs to Rx path it must
3195 * be called with rcu_read_lock protection. 3196 * be called with rcu_read_lock protection.
3196 */ 3197 */
3197static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 3198static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 3ce7f2c8539a..f40661eb75b5 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -309,7 +309,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
309 if (local->scan_req != local->int_scan_req) 309 if (local->scan_req != local->int_scan_req)
310 cfg80211_scan_done(local->scan_req, aborted); 310 cfg80211_scan_done(local->scan_req, aborted);
311 local->scan_req = NULL; 311 local->scan_req = NULL;
312 rcu_assign_pointer(local->scan_sdata, NULL); 312 RCU_INIT_POINTER(local->scan_sdata, NULL);
313 313
314 local->scanning = 0; 314 local->scanning = 0;
315 local->scan_chandef.chan = NULL; 315 local->scan_chandef.chan = NULL;
@@ -559,7 +559,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
559 ieee80211_recalc_idle(local); 559 ieee80211_recalc_idle(local);
560 560
561 local->scan_req = NULL; 561 local->scan_req = NULL;
562 rcu_assign_pointer(local->scan_sdata, NULL); 562 RCU_INIT_POINTER(local->scan_sdata, NULL);
563 } 563 }
564 564
565 return rc; 565 return rc;
@@ -773,7 +773,7 @@ void ieee80211_scan_work(struct work_struct *work)
773 int rc; 773 int rc;
774 774
775 local->scan_req = NULL; 775 local->scan_req = NULL;
776 rcu_assign_pointer(local->scan_sdata, NULL); 776 RCU_INIT_POINTER(local->scan_sdata, NULL);
777 777
778 rc = __ieee80211_start_scan(sdata, req); 778 rc = __ieee80211_start_scan(sdata, req);
779 if (rc) { 779 if (rc) {
@@ -1014,7 +1014,7 @@ out_free:
1014 1014
1015 if (ret) { 1015 if (ret) {
1016 /* Clean in case of failure after HW restart or upon resume. */ 1016 /* Clean in case of failure after HW restart or upon resume. */
1017 rcu_assign_pointer(local->sched_scan_sdata, NULL); 1017 RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
1018 local->sched_scan_req = NULL; 1018 local->sched_scan_req = NULL;
1019 } 1019 }
1020 1020
@@ -1076,12 +1076,8 @@ void ieee80211_sched_scan_results(struct ieee80211_hw *hw)
1076} 1076}
1077EXPORT_SYMBOL(ieee80211_sched_scan_results); 1077EXPORT_SYMBOL(ieee80211_sched_scan_results);
1078 1078
1079void ieee80211_sched_scan_stopped_work(struct work_struct *work) 1079void ieee80211_sched_scan_end(struct ieee80211_local *local)
1080{ 1080{
1081 struct ieee80211_local *local =
1082 container_of(work, struct ieee80211_local,
1083 sched_scan_stopped_work);
1084
1085 mutex_lock(&local->mtx); 1081 mutex_lock(&local->mtx);
1086 1082
1087 if (!rcu_access_pointer(local->sched_scan_sdata)) { 1083 if (!rcu_access_pointer(local->sched_scan_sdata)) {
@@ -1089,7 +1085,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1089 return; 1085 return;
1090 } 1086 }
1091 1087
1092 rcu_assign_pointer(local->sched_scan_sdata, NULL); 1088 RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
1093 1089
1094 /* If sched scan was aborted by the driver. */ 1090 /* If sched scan was aborted by the driver. */
1095 local->sched_scan_req = NULL; 1091 local->sched_scan_req = NULL;
@@ -1099,6 +1095,15 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1099 cfg80211_sched_scan_stopped(local->hw.wiphy); 1095 cfg80211_sched_scan_stopped(local->hw.wiphy);
1100} 1096}
1101 1097
1098void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1099{
1100 struct ieee80211_local *local =
1101 container_of(work, struct ieee80211_local,
1102 sched_scan_stopped_work);
1103
1104 ieee80211_sched_scan_end(local);
1105}
1106
1102void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) 1107void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
1103{ 1108{
1104 struct ieee80211_local *local = hw_to_local(hw); 1109 struct ieee80211_local *local = hw_to_local(hw);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 847d92f6bef6..a9b46d8ea22f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -240,6 +240,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
240 240
241 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 241 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
242 242
243 kfree(rcu_dereference_raw(sta->sta.rates));
243 kfree(sta); 244 kfree(sta);
244} 245}
245 246
@@ -552,7 +553,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
552int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 553int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
553{ 554{
554 struct ieee80211_local *local = sta->local; 555 struct ieee80211_local *local = sta->local;
555 int err = 0; 556 int err;
556 557
557 might_sleep(); 558 might_sleep();
558 559
@@ -570,7 +571,6 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
570 571
571 return 0; 572 return 0;
572 out_free: 573 out_free:
573 BUG_ON(!err);
574 sta_info_free(local, sta); 574 sta_info_free(local, sta);
575 return err; 575 return err;
576} 576}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 60cb7a665976..ba29ebc86141 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -541,6 +541,23 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
541 */ 541 */
542#define STA_LOST_PKT_THRESHOLD 50 542#define STA_LOST_PKT_THRESHOLD 50
543 543
544static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
545{
546 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
547
548 /* This packet was aggregated but doesn't carry status info */
549 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
550 !(info->flags & IEEE80211_TX_STAT_AMPDU))
551 return;
552
553 if (++sta->lost_packets < STA_LOST_PKT_THRESHOLD)
554 return;
555
556 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
557 sta->lost_packets, GFP_ATOMIC);
558 sta->lost_packets = 0;
559}
560
544void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 561void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
545{ 562{
546 struct sk_buff *skb2; 563 struct sk_buff *skb2;
@@ -680,12 +697,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
680 if (info->flags & IEEE80211_TX_STAT_ACK) { 697 if (info->flags & IEEE80211_TX_STAT_ACK) {
681 if (sta->lost_packets) 698 if (sta->lost_packets)
682 sta->lost_packets = 0; 699 sta->lost_packets = 0;
683 } else if (++sta->lost_packets >= STA_LOST_PKT_THRESHOLD) { 700 } else {
684 cfg80211_cqm_pktloss_notify(sta->sdata->dev, 701 ieee80211_lost_packet(sta, skb);
685 sta->sta.addr,
686 sta->lost_packets,
687 GFP_ATOMIC);
688 sta->lost_packets = 0;
689 } 702 }
690 } 703 }
691 704
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
new file mode 100644
index 000000000000..652813b2d3df
--- /dev/null
+++ b/net/mac80211/tdls.c
@@ -0,0 +1,325 @@
1/*
2 * mac80211 TDLS handling code
3 *
4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2014, Intel Corporation
6 *
7 * This file is GPLv2 as found in COPYING.
8 */
9
10#include <linux/ieee80211.h>
11#include "ieee80211_i.h"
12
13static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
14{
15 u8 *pos = (void *)skb_put(skb, 7);
16
17 *pos++ = WLAN_EID_EXT_CAPABILITY;
18 *pos++ = 5; /* len */
19 *pos++ = 0x0;
20 *pos++ = 0x0;
21 *pos++ = 0x0;
22 *pos++ = 0x0;
23 *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
24}
25
26static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
27{
28 struct ieee80211_local *local = sdata->local;
29 u16 capab;
30
31 capab = 0;
32 if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
33 return capab;
34
35 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
36 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
37 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
38 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
39
40 return capab;
41}
42
43static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
44 const u8 *peer, const u8 *bssid)
45{
46 struct ieee80211_tdls_lnkie *lnkid;
47
48 lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
49
50 lnkid->ie_type = WLAN_EID_LINK_ID;
51 lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
52
53 memcpy(lnkid->bssid, bssid, ETH_ALEN);
54 memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
55 memcpy(lnkid->resp_sta, peer, ETH_ALEN);
56}
57
58static int
59ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
60 const u8 *peer, u8 action_code, u8 dialog_token,
61 u16 status_code, struct sk_buff *skb)
62{
63 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
64 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
65 struct ieee80211_tdls_data *tf;
66
67 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
68
69 memcpy(tf->da, peer, ETH_ALEN);
70 memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
71 tf->ether_type = cpu_to_be16(ETH_P_TDLS);
72 tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
73
74 switch (action_code) {
75 case WLAN_TDLS_SETUP_REQUEST:
76 tf->category = WLAN_CATEGORY_TDLS;
77 tf->action_code = WLAN_TDLS_SETUP_REQUEST;
78
79 skb_put(skb, sizeof(tf->u.setup_req));
80 tf->u.setup_req.dialog_token = dialog_token;
81 tf->u.setup_req.capability =
82 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
83
84 ieee80211_add_srates_ie(sdata, skb, false, band);
85 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
86 ieee80211_tdls_add_ext_capab(skb);
87 break;
88 case WLAN_TDLS_SETUP_RESPONSE:
89 tf->category = WLAN_CATEGORY_TDLS;
90 tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
91
92 skb_put(skb, sizeof(tf->u.setup_resp));
93 tf->u.setup_resp.status_code = cpu_to_le16(status_code);
94 tf->u.setup_resp.dialog_token = dialog_token;
95 tf->u.setup_resp.capability =
96 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
97
98 ieee80211_add_srates_ie(sdata, skb, false, band);
99 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
100 ieee80211_tdls_add_ext_capab(skb);
101 break;
102 case WLAN_TDLS_SETUP_CONFIRM:
103 tf->category = WLAN_CATEGORY_TDLS;
104 tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
105
106 skb_put(skb, sizeof(tf->u.setup_cfm));
107 tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
108 tf->u.setup_cfm.dialog_token = dialog_token;
109 break;
110 case WLAN_TDLS_TEARDOWN:
111 tf->category = WLAN_CATEGORY_TDLS;
112 tf->action_code = WLAN_TDLS_TEARDOWN;
113
114 skb_put(skb, sizeof(tf->u.teardown));
115 tf->u.teardown.reason_code = cpu_to_le16(status_code);
116 break;
117 case WLAN_TDLS_DISCOVERY_REQUEST:
118 tf->category = WLAN_CATEGORY_TDLS;
119 tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
120
121 skb_put(skb, sizeof(tf->u.discover_req));
122 tf->u.discover_req.dialog_token = dialog_token;
123 break;
124 default:
125 return -EINVAL;
126 }
127
128 return 0;
129}
130
131static int
132ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
133 const u8 *peer, u8 action_code, u8 dialog_token,
134 u16 status_code, struct sk_buff *skb)
135{
136 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
137 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
138 struct ieee80211_mgmt *mgmt;
139
140 mgmt = (void *)skb_put(skb, 24);
141 memset(mgmt, 0, 24);
142 memcpy(mgmt->da, peer, ETH_ALEN);
143 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
144 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
145
146 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
147 IEEE80211_STYPE_ACTION);
148
149 switch (action_code) {
150 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
151 skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
152 mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
153 mgmt->u.action.u.tdls_discover_resp.action_code =
154 WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
155 mgmt->u.action.u.tdls_discover_resp.dialog_token =
156 dialog_token;
157 mgmt->u.action.u.tdls_discover_resp.capability =
158 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
159
160 ieee80211_add_srates_ie(sdata, skb, false, band);
161 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
162 ieee80211_tdls_add_ext_capab(skb);
163 break;
164 default:
165 return -EINVAL;
166 }
167
168 return 0;
169}
170
171int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
172 const u8 *peer, u8 action_code, u8 dialog_token,
173 u16 status_code, u32 peer_capability,
174 const u8 *extra_ies, size_t extra_ies_len)
175{
176 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
177 struct ieee80211_local *local = sdata->local;
178 struct sk_buff *skb = NULL;
179 bool send_direct;
180 int ret;
181
182 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
183 return -ENOTSUPP;
184
185 /* make sure we are in managed mode, and associated */
186 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
187 !sdata->u.mgd.associated)
188 return -EINVAL;
189
190 tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
191 action_code, peer);
192
193 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
194 max(sizeof(struct ieee80211_mgmt),
195 sizeof(struct ieee80211_tdls_data)) +
196 50 + /* supported rates */
197 7 + /* ext capab */
198 extra_ies_len +
199 sizeof(struct ieee80211_tdls_lnkie));
200 if (!skb)
201 return -ENOMEM;
202
203 skb_reserve(skb, local->hw.extra_tx_headroom);
204
205 switch (action_code) {
206 case WLAN_TDLS_SETUP_REQUEST:
207 case WLAN_TDLS_SETUP_RESPONSE:
208 case WLAN_TDLS_SETUP_CONFIRM:
209 case WLAN_TDLS_TEARDOWN:
210 case WLAN_TDLS_DISCOVERY_REQUEST:
211 ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
212 action_code, dialog_token,
213 status_code, skb);
214 send_direct = false;
215 break;
216 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
217 ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
218 dialog_token, status_code,
219 skb);
220 send_direct = true;
221 break;
222 default:
223 ret = -ENOTSUPP;
224 break;
225 }
226
227 if (ret < 0)
228 goto fail;
229
230 if (extra_ies_len)
231 memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
232
233 /* the TDLS link IE is always added last */
234 switch (action_code) {
235 case WLAN_TDLS_SETUP_REQUEST:
236 case WLAN_TDLS_SETUP_CONFIRM:
237 case WLAN_TDLS_TEARDOWN:
238 case WLAN_TDLS_DISCOVERY_REQUEST:
239 /* we are the initiator */
240 ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
241 sdata->u.mgd.bssid);
242 break;
243 case WLAN_TDLS_SETUP_RESPONSE:
244 case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
245 /* we are the responder */
246 ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
247 sdata->u.mgd.bssid);
248 break;
249 default:
250 ret = -ENOTSUPP;
251 goto fail;
252 }
253
254 if (send_direct) {
255 ieee80211_tx_skb(sdata, skb);
256 return 0;
257 }
258
259 /*
260 * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
261 * we should default to AC_VI.
262 */
263 switch (action_code) {
264 case WLAN_TDLS_SETUP_REQUEST:
265 case WLAN_TDLS_SETUP_RESPONSE:
266 skb_set_queue_mapping(skb, IEEE80211_AC_BK);
267 skb->priority = 2;
268 break;
269 default:
270 skb_set_queue_mapping(skb, IEEE80211_AC_VI);
271 skb->priority = 5;
272 break;
273 }
274
275 /* disable bottom halves when entering the Tx path */
276 local_bh_disable();
277 ret = ieee80211_subif_start_xmit(skb, dev);
278 local_bh_enable();
279
280 return ret;
281
282fail:
283 dev_kfree_skb(skb);
284 return ret;
285}
286
287int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
288 const u8 *peer, enum nl80211_tdls_operation oper)
289{
290 struct sta_info *sta;
291 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
292
293 if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
294 return -ENOTSUPP;
295
296 if (sdata->vif.type != NL80211_IFTYPE_STATION)
297 return -EINVAL;
298
299 tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
300
301 switch (oper) {
302 case NL80211_TDLS_ENABLE_LINK:
303 rcu_read_lock();
304 sta = sta_info_get(sdata, peer);
305 if (!sta) {
306 rcu_read_unlock();
307 return -ENOLINK;
308 }
309
310 set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
311 rcu_read_unlock();
312 break;
313 case NL80211_TDLS_DISABLE_LINK:
314 return sta_info_destroy_addr(sdata, peer);
315 case NL80211_TDLS_TEARDOWN:
316 case NL80211_TDLS_SETUP:
317 case NL80211_TDLS_DISCOVERY_REQ:
318 /* We don't support in-driver setup/teardown/discovery */
319 return -ENOTSUPP;
320 default:
321 return -ENOTSUPP;
322 }
323
324 return 0;
325}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index cec5b60487a4..cfe1a0688b5c 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -184,6 +184,20 @@ TRACE_EVENT(drv_return_bool,
184 "true" : "false") 184 "true" : "false")
185); 185);
186 186
187TRACE_EVENT(drv_return_u32,
188 TP_PROTO(struct ieee80211_local *local, u32 ret),
189 TP_ARGS(local, ret),
190 TP_STRUCT__entry(
191 LOCAL_ENTRY
192 __field(u32, ret)
193 ),
194 TP_fast_assign(
195 LOCAL_ASSIGN;
196 __entry->ret = ret;
197 ),
198 TP_printk(LOCAL_PR_FMT " - %u", LOCAL_PR_ARG, __entry->ret)
199);
200
187TRACE_EVENT(drv_return_u64, 201TRACE_EVENT(drv_return_u64,
188 TP_PROTO(struct ieee80211_local *local, u64 ret), 202 TP_PROTO(struct ieee80211_local *local, u64 ret),
189 TP_ARGS(local, ret), 203 TP_ARGS(local, ret),
@@ -1375,6 +1389,91 @@ TRACE_EVENT(drv_change_chanctx,
1375 ) 1389 )
1376); 1390);
1377 1391
1392#if !defined(__TRACE_VIF_ENTRY)
1393#define __TRACE_VIF_ENTRY
1394struct trace_vif_entry {
1395 enum nl80211_iftype vif_type;
1396 bool p2p;
1397 char vif_name[IFNAMSIZ];
1398} __packed;
1399
1400struct trace_chandef_entry {
1401 u32 control_freq;
1402 u32 chan_width;
1403 u32 center_freq1;
1404 u32 center_freq2;
1405} __packed;
1406
1407struct trace_switch_entry {
1408 struct trace_vif_entry vif;
1409 struct trace_chandef_entry old_chandef;
1410 struct trace_chandef_entry new_chandef;
1411} __packed;
1412
1413#define SWITCH_ENTRY_ASSIGN(to, from) local_vifs[i].to = vifs[i].from
1414#endif
1415
1416TRACE_EVENT(drv_switch_vif_chanctx,
1417 TP_PROTO(struct ieee80211_local *local,
1418 struct ieee80211_vif_chanctx_switch *vifs,
1419 int n_vifs, enum ieee80211_chanctx_switch_mode mode),
1420 TP_ARGS(local, vifs, n_vifs, mode),
1421
1422 TP_STRUCT__entry(
1423 LOCAL_ENTRY
1424 __field(int, n_vifs)
1425 __field(u32, mode)
1426 __dynamic_array(u8, vifs,
1427 sizeof(struct trace_switch_entry) * n_vifs)
1428 ),
1429
1430 TP_fast_assign(
1431 LOCAL_ASSIGN;
1432 __entry->n_vifs = n_vifs;
1433 __entry->mode = mode;
1434 {
1435 struct trace_switch_entry *local_vifs =
1436 __get_dynamic_array(vifs);
1437 int i;
1438
1439 for (i = 0; i < n_vifs; i++) {
1440 struct ieee80211_sub_if_data *sdata;
1441
1442 sdata = container_of(vifs[i].vif,
1443 struct ieee80211_sub_if_data,
1444 vif);
1445
1446 SWITCH_ENTRY_ASSIGN(vif.vif_type, vif->type);
1447 SWITCH_ENTRY_ASSIGN(vif.p2p, vif->p2p);
1448 strncpy(local_vifs[i].vif.vif_name,
1449 sdata->name,
1450 sizeof(local_vifs[i].vif.vif_name));
1451 SWITCH_ENTRY_ASSIGN(old_chandef.control_freq,
1452 old_ctx->def.chan->center_freq);
1453 SWITCH_ENTRY_ASSIGN(old_chandef.chan_width,
1454 old_ctx->def.width);
1455 SWITCH_ENTRY_ASSIGN(old_chandef.center_freq1,
1456 old_ctx->def.center_freq1);
1457 SWITCH_ENTRY_ASSIGN(old_chandef.center_freq2,
1458 old_ctx->def.center_freq2);
1459 SWITCH_ENTRY_ASSIGN(new_chandef.control_freq,
1460 new_ctx->def.chan->center_freq);
1461 SWITCH_ENTRY_ASSIGN(new_chandef.chan_width,
1462 new_ctx->def.width);
1463 SWITCH_ENTRY_ASSIGN(new_chandef.center_freq1,
1464 new_ctx->def.center_freq1);
1465 SWITCH_ENTRY_ASSIGN(new_chandef.center_freq2,
1466 new_ctx->def.center_freq2);
1467 }
1468 }
1469 ),
1470
1471 TP_printk(
1472 LOCAL_PR_FMT " n_vifs:%d mode:%d",
1473 LOCAL_PR_ARG, __entry->n_vifs, __entry->mode
1474 )
1475);
1476
1378DECLARE_EVENT_CLASS(local_sdata_chanctx, 1477DECLARE_EVENT_CLASS(local_sdata_chanctx,
1379 TP_PROTO(struct ieee80211_local *local, 1478 TP_PROTO(struct ieee80211_local *local,
1380 struct ieee80211_sub_if_data *sdata, 1479 struct ieee80211_sub_if_data *sdata,
@@ -1499,6 +1598,24 @@ DEFINE_EVENT(local_sdata_evt, drv_leave_ibss,
1499 TP_ARGS(local, sdata) 1598 TP_ARGS(local, sdata)
1500); 1599);
1501 1600
1601TRACE_EVENT(drv_get_expected_throughput,
1602 TP_PROTO(struct ieee80211_sta *sta),
1603
1604 TP_ARGS(sta),
1605
1606 TP_STRUCT__entry(
1607 STA_ENTRY
1608 ),
1609
1610 TP_fast_assign(
1611 STA_ASSIGN;
1612 ),
1613
1614 TP_printk(
1615 STA_PR_FMT, STA_PR_ARG
1616 )
1617);
1618
1502/* 1619/*
1503 * Tracing for API calls that drivers call. 1620 * Tracing for API calls that drivers call.
1504 */ 1621 */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 19d36d4117e0..5214686d9fd1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2328,7 +2328,8 @@ void ieee80211_tx_pending(unsigned long data)
2328/* functions for drivers to get certain frames */ 2328/* functions for drivers to get certain frames */
2329 2329
2330static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, 2330static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2331 struct ps_data *ps, struct sk_buff *skb) 2331 struct ps_data *ps, struct sk_buff *skb,
2332 bool is_template)
2332{ 2333{
2333 u8 *pos, *tim; 2334 u8 *pos, *tim;
2334 int aid0 = 0; 2335 int aid0 = 0;
@@ -2341,11 +2342,12 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2341 * checking byte-for-byte */ 2342 * checking byte-for-byte */
2342 have_bits = !bitmap_empty((unsigned long *)ps->tim, 2343 have_bits = !bitmap_empty((unsigned long *)ps->tim,
2343 IEEE80211_MAX_AID+1); 2344 IEEE80211_MAX_AID+1);
2344 2345 if (!is_template) {
2345 if (ps->dtim_count == 0) 2346 if (ps->dtim_count == 0)
2346 ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1; 2347 ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
2347 else 2348 else
2348 ps->dtim_count--; 2349 ps->dtim_count--;
2350 }
2349 2351
2350 tim = pos = (u8 *) skb_put(skb, 6); 2352 tim = pos = (u8 *) skb_put(skb, 6);
2351 *pos++ = WLAN_EID_TIM; 2353 *pos++ = WLAN_EID_TIM;
@@ -2391,7 +2393,8 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2391} 2393}
2392 2394
2393static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, 2395static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2394 struct ps_data *ps, struct sk_buff *skb) 2396 struct ps_data *ps, struct sk_buff *skb,
2397 bool is_template)
2395{ 2398{
2396 struct ieee80211_local *local = sdata->local; 2399 struct ieee80211_local *local = sdata->local;
2397 2400
@@ -2403,24 +2406,24 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2403 * of the tim bitmap in mac80211 and the driver. 2406 * of the tim bitmap in mac80211 and the driver.
2404 */ 2407 */
2405 if (local->tim_in_locked_section) { 2408 if (local->tim_in_locked_section) {
2406 __ieee80211_beacon_add_tim(sdata, ps, skb); 2409 __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
2407 } else { 2410 } else {
2408 spin_lock_bh(&local->tim_lock); 2411 spin_lock_bh(&local->tim_lock);
2409 __ieee80211_beacon_add_tim(sdata, ps, skb); 2412 __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
2410 spin_unlock_bh(&local->tim_lock); 2413 spin_unlock_bh(&local->tim_lock);
2411 } 2414 }
2412 2415
2413 return 0; 2416 return 0;
2414} 2417}
2415 2418
2416static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata, 2419static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
2417 struct beacon_data *beacon) 2420 struct beacon_data *beacon)
2418{ 2421{
2419 struct probe_resp *resp; 2422 struct probe_resp *resp;
2420 int counter_offset_beacon = sdata->csa_counter_offset_beacon;
2421 int counter_offset_presp = sdata->csa_counter_offset_presp;
2422 u8 *beacon_data; 2423 u8 *beacon_data;
2423 size_t beacon_data_len; 2424 size_t beacon_data_len;
2425 int i;
2426 u8 count = sdata->csa_current_counter;
2424 2427
2425 switch (sdata->vif.type) { 2428 switch (sdata->vif.type) {
2426 case NL80211_IFTYPE_AP: 2429 case NL80211_IFTYPE_AP:
@@ -2438,40 +2441,57 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
2438 default: 2441 default:
2439 return; 2442 return;
2440 } 2443 }
2441 if (WARN_ON(counter_offset_beacon >= beacon_data_len))
2442 return;
2443 2444
2444 /* Warn if the driver did not check for/react to csa 2445 for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
2445 * completeness. A beacon with CSA counter set to 0 should 2446 u16 counter_offset_beacon =
2446 * never occur, because a counter of 1 means switch just 2447 sdata->csa_counter_offset_beacon[i];
2447 * before the next beacon. 2448 u16 counter_offset_presp = sdata->csa_counter_offset_presp[i];
2448 */
2449 if (WARN_ON(beacon_data[counter_offset_beacon] == 1))
2450 return;
2451 2449
2452 beacon_data[counter_offset_beacon]--; 2450 if (counter_offset_beacon) {
2451 if (WARN_ON(counter_offset_beacon >= beacon_data_len))
2452 return;
2453 2453
2454 if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) { 2454 beacon_data[counter_offset_beacon] = count;
2455 rcu_read_lock(); 2455 }
2456 resp = rcu_dereference(sdata->u.ap.probe_resp); 2456
2457 if (sdata->vif.type == NL80211_IFTYPE_AP &&
2458 counter_offset_presp) {
2459 rcu_read_lock();
2460 resp = rcu_dereference(sdata->u.ap.probe_resp);
2457 2461
2458 /* if nl80211 accepted the offset, this should not happen. */ 2462 /* If nl80211 accepted the offset, this should
2459 if (WARN_ON(!resp)) { 2463 * not happen.
2464 */
2465 if (WARN_ON(!resp)) {
2466 rcu_read_unlock();
2467 return;
2468 }
2469 resp->data[counter_offset_presp] = count;
2460 rcu_read_unlock(); 2470 rcu_read_unlock();
2461 return;
2462 } 2471 }
2463 resp->data[counter_offset_presp]--;
2464 rcu_read_unlock();
2465 } 2472 }
2466} 2473}
2467 2474
2475u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
2476{
2477 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2478
2479 sdata->csa_current_counter--;
2480
2481 /* the counter should never reach 0 */
2482 WARN_ON(!sdata->csa_current_counter);
2483
2484 return sdata->csa_current_counter;
2485}
2486EXPORT_SYMBOL(ieee80211_csa_update_counter);
2487
2468bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) 2488bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
2469{ 2489{
2470 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 2490 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2471 struct beacon_data *beacon = NULL; 2491 struct beacon_data *beacon = NULL;
2472 u8 *beacon_data; 2492 u8 *beacon_data;
2473 size_t beacon_data_len; 2493 size_t beacon_data_len;
2474 int counter_beacon = sdata->csa_counter_offset_beacon; 2494 int counter_beacon = sdata->csa_counter_offset_beacon[0];
2475 int ret = false; 2495 int ret = false;
2476 2496
2477 if (!ieee80211_sdata_running(sdata)) 2497 if (!ieee80211_sdata_running(sdata))
@@ -2521,9 +2541,11 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
2521} 2541}
2522EXPORT_SYMBOL(ieee80211_csa_is_complete); 2542EXPORT_SYMBOL(ieee80211_csa_is_complete);
2523 2543
2524struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, 2544static struct sk_buff *
2525 struct ieee80211_vif *vif, 2545__ieee80211_beacon_get(struct ieee80211_hw *hw,
2526 u16 *tim_offset, u16 *tim_length) 2546 struct ieee80211_vif *vif,
2547 struct ieee80211_mutable_offsets *offs,
2548 bool is_template)
2527{ 2549{
2528 struct ieee80211_local *local = hw_to_local(hw); 2550 struct ieee80211_local *local = hw_to_local(hw);
2529 struct sk_buff *skb = NULL; 2551 struct sk_buff *skb = NULL;
@@ -2532,6 +2554,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2532 enum ieee80211_band band; 2554 enum ieee80211_band band;
2533 struct ieee80211_tx_rate_control txrc; 2555 struct ieee80211_tx_rate_control txrc;
2534 struct ieee80211_chanctx_conf *chanctx_conf; 2556 struct ieee80211_chanctx_conf *chanctx_conf;
2557 int csa_off_base = 0;
2535 2558
2536 rcu_read_lock(); 2559 rcu_read_lock();
2537 2560
@@ -2541,18 +2564,20 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2541 if (!ieee80211_sdata_running(sdata) || !chanctx_conf) 2564 if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
2542 goto out; 2565 goto out;
2543 2566
2544 if (tim_offset) 2567 if (offs)
2545 *tim_offset = 0; 2568 memset(offs, 0, sizeof(*offs));
2546 if (tim_length)
2547 *tim_length = 0;
2548 2569
2549 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2570 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2550 struct ieee80211_if_ap *ap = &sdata->u.ap; 2571 struct ieee80211_if_ap *ap = &sdata->u.ap;
2551 struct beacon_data *beacon = rcu_dereference(ap->beacon); 2572 struct beacon_data *beacon = rcu_dereference(ap->beacon);
2552 2573
2553 if (beacon) { 2574 if (beacon) {
2554 if (sdata->vif.csa_active) 2575 if (sdata->vif.csa_active) {
2555 ieee80211_update_csa(sdata, beacon); 2576 if (!is_template)
2577 ieee80211_csa_update_counter(vif);
2578
2579 ieee80211_set_csa(sdata, beacon);
2580 }
2556 2581
2557 /* 2582 /*
2558 * headroom, head length, 2583 * headroom, head length,
@@ -2569,12 +2594,16 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2569 memcpy(skb_put(skb, beacon->head_len), beacon->head, 2594 memcpy(skb_put(skb, beacon->head_len), beacon->head,
2570 beacon->head_len); 2595 beacon->head_len);
2571 2596
2572 ieee80211_beacon_add_tim(sdata, &ap->ps, skb); 2597 ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
2598 is_template);
2573 2599
2574 if (tim_offset) 2600 if (offs) {
2575 *tim_offset = beacon->head_len; 2601 offs->tim_offset = beacon->head_len;
2576 if (tim_length) 2602 offs->tim_length = skb->len - beacon->head_len;
2577 *tim_length = skb->len - beacon->head_len; 2603
2604 /* for AP the csa offsets are from tail */
2605 csa_off_base = skb->len;
2606 }
2578 2607
2579 if (beacon->tail) 2608 if (beacon->tail)
2580 memcpy(skb_put(skb, beacon->tail_len), 2609 memcpy(skb_put(skb, beacon->tail_len),
@@ -2589,9 +2618,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2589 if (!presp) 2618 if (!presp)
2590 goto out; 2619 goto out;
2591 2620
2592 if (sdata->vif.csa_active) 2621 if (sdata->vif.csa_active) {
2593 ieee80211_update_csa(sdata, presp); 2622 if (!is_template)
2623 ieee80211_csa_update_counter(vif);
2594 2624
2625 ieee80211_set_csa(sdata, presp);
2626 }
2595 2627
2596 skb = dev_alloc_skb(local->tx_headroom + presp->head_len + 2628 skb = dev_alloc_skb(local->tx_headroom + presp->head_len +
2597 local->hw.extra_beacon_tailroom); 2629 local->hw.extra_beacon_tailroom);
@@ -2611,8 +2643,17 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2611 if (!bcn) 2643 if (!bcn)
2612 goto out; 2644 goto out;
2613 2645
2614 if (sdata->vif.csa_active) 2646 if (sdata->vif.csa_active) {
2615 ieee80211_update_csa(sdata, bcn); 2647 if (!is_template)
2648 /* TODO: For mesh csa_counter is in TU, so
2649 * decrementing it by one isn't correct, but
2650 * for now we leave it consistent with overall
2651 * mac80211's behavior.
2652 */
2653 ieee80211_csa_update_counter(vif);
2654
2655 ieee80211_set_csa(sdata, bcn);
2656 }
2616 2657
2617 if (ifmsh->sync_ops) 2658 if (ifmsh->sync_ops)
2618 ifmsh->sync_ops->adjust_tbtt(sdata, bcn); 2659 ifmsh->sync_ops->adjust_tbtt(sdata, bcn);
@@ -2626,13 +2667,33 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2626 goto out; 2667 goto out;
2627 skb_reserve(skb, local->tx_headroom); 2668 skb_reserve(skb, local->tx_headroom);
2628 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len); 2669 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
2629 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb); 2670 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
2671
2672 if (offs) {
2673 offs->tim_offset = bcn->head_len;
2674 offs->tim_length = skb->len - bcn->head_len;
2675 }
2676
2630 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len); 2677 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
2631 } else { 2678 } else {
2632 WARN_ON(1); 2679 WARN_ON(1);
2633 goto out; 2680 goto out;
2634 } 2681 }
2635 2682
2683 /* CSA offsets */
2684 if (offs) {
2685 int i;
2686
2687 for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
2688 u16 csa_off = sdata->csa_counter_offset_beacon[i];
2689
2690 if (!csa_off)
2691 continue;
2692
2693 offs->csa_counter_offs[i] = csa_off_base + csa_off;
2694 }
2695 }
2696
2636 band = chanctx_conf->def.chan->band; 2697 band = chanctx_conf->def.chan->band;
2637 2698
2638 info = IEEE80211_SKB_CB(skb); 2699 info = IEEE80211_SKB_CB(skb);
@@ -2663,6 +2724,32 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2663 out: 2724 out:
2664 rcu_read_unlock(); 2725 rcu_read_unlock();
2665 return skb; 2726 return skb;
2727
2728}
2729
2730struct sk_buff *
2731ieee80211_beacon_get_template(struct ieee80211_hw *hw,
2732 struct ieee80211_vif *vif,
2733 struct ieee80211_mutable_offsets *offs)
2734{
2735 return __ieee80211_beacon_get(hw, vif, offs, true);
2736}
2737EXPORT_SYMBOL(ieee80211_beacon_get_template);
2738
2739struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2740 struct ieee80211_vif *vif,
2741 u16 *tim_offset, u16 *tim_length)
2742{
2743 struct ieee80211_mutable_offsets offs = {};
2744 struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
2745
2746 if (tim_offset)
2747 *tim_offset = offs.tim_offset;
2748
2749 if (tim_length)
2750 *tim_length = offs.tim_length;
2751
2752 return bcn;
2666} 2753}
2667EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2754EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2668 2755
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3c365837e910..6886601afe1c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -554,7 +554,7 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
554 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, 554 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
555 IEEE80211_QUEUE_STOP_REASON_FLUSH); 555 IEEE80211_QUEUE_STOP_REASON_FLUSH);
556 556
557 drv_flush(local, queues, false); 557 drv_flush(local, sdata, queues, false);
558 558
559 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, 559 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
560 IEEE80211_QUEUE_STOP_REASON_FLUSH); 560 IEEE80211_QUEUE_STOP_REASON_FLUSH);
@@ -1457,6 +1457,44 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1457 drv_stop(local); 1457 drv_stop(local);
1458} 1458}
1459 1459
1460static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
1461{
1462 struct ieee80211_sub_if_data *sdata;
1463 struct ieee80211_chanctx *ctx;
1464
1465 /*
1466 * We get here if during resume the device can't be restarted properly.
1467 * We might also get here if this happens during HW reset, which is a
1468 * slightly different situation and we need to drop all connections in
1469 * the latter case.
1470 *
1471 * Ask cfg80211 to turn off all interfaces, this will result in more
1472 * warnings but at least we'll then get into a clean stopped state.
1473 */
1474
1475 local->resuming = false;
1476 local->suspended = false;
1477 local->started = false;
1478
1479 /* scheduled scan clearly can't be running any more, but tell
1480 * cfg80211 and clear local state
1481 */
1482 ieee80211_sched_scan_end(local);
1483
1484 list_for_each_entry(sdata, &local->interfaces, list)
1485 sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
1486
1487 /* Mark channel contexts as not being in the driver any more to avoid
1488 * removing them from the driver during the shutdown process...
1489 */
1490 mutex_lock(&local->chanctx_mtx);
1491 list_for_each_entry(ctx, &local->chanctx_list, list)
1492 ctx->driver_present = false;
1493 mutex_unlock(&local->chanctx_mtx);
1494
1495 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1496}
1497
1460static void ieee80211_assign_chanctx(struct ieee80211_local *local, 1498static void ieee80211_assign_chanctx(struct ieee80211_local *local,
1461 struct ieee80211_sub_if_data *sdata) 1499 struct ieee80211_sub_if_data *sdata)
1462{ 1500{
@@ -1520,9 +1558,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1520 */ 1558 */
1521 res = drv_start(local); 1559 res = drv_start(local);
1522 if (res) { 1560 if (res) {
1523 WARN(local->suspended, "Hardware became unavailable " 1561 if (local->suspended)
1524 "upon resume. This could be a software issue " 1562 WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
1525 "prior to suspend or a hardware issue.\n"); 1563 else
1564 WARN(1, "Hardware became unavailable during restart.\n");
1565 ieee80211_handle_reconfig_failure(local);
1526 return res; 1566 return res;
1527 } 1567 }
1528 1568
@@ -1546,7 +1586,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1546 WARN_ON(local->resuming); 1586 WARN_ON(local->resuming);
1547 res = drv_add_interface(local, sdata); 1587 res = drv_add_interface(local, sdata);
1548 if (WARN_ON(res)) { 1588 if (WARN_ON(res)) {
1549 rcu_assign_pointer(local->monitor_sdata, NULL); 1589 RCU_INIT_POINTER(local->monitor_sdata, NULL);
1550 synchronize_net(); 1590 synchronize_net();
1551 kfree(sdata); 1591 kfree(sdata);
1552 } 1592 }
@@ -1565,17 +1605,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1565 list_for_each_entry(ctx, &local->chanctx_list, list) 1605 list_for_each_entry(ctx, &local->chanctx_list, list)
1566 WARN_ON(drv_add_chanctx(local, ctx)); 1606 WARN_ON(drv_add_chanctx(local, ctx));
1567 mutex_unlock(&local->chanctx_mtx); 1607 mutex_unlock(&local->chanctx_mtx);
1568 }
1569 1608
1570 list_for_each_entry(sdata, &local->interfaces, list) { 1609 list_for_each_entry(sdata, &local->interfaces, list) {
1571 if (!ieee80211_sdata_running(sdata)) 1610 if (!ieee80211_sdata_running(sdata))
1572 continue; 1611 continue;
1573 ieee80211_assign_chanctx(local, sdata); 1612 ieee80211_assign_chanctx(local, sdata);
1574 } 1613 }
1575 1614
1576 sdata = rtnl_dereference(local->monitor_sdata); 1615 sdata = rtnl_dereference(local->monitor_sdata);
1577 if (sdata && ieee80211_sdata_running(sdata)) 1616 if (sdata && ieee80211_sdata_running(sdata))
1578 ieee80211_assign_chanctx(local, sdata); 1617 ieee80211_assign_chanctx(local, sdata);
1618 }
1579 1619
1580 /* add STAs back */ 1620 /* add STAs back */
1581 mutex_lock(&local->sta_mtx); 1621 mutex_lock(&local->sta_mtx);
@@ -1671,13 +1711,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1671 } 1711 }
1672 break; 1712 break;
1673 case NL80211_IFTYPE_WDS: 1713 case NL80211_IFTYPE_WDS:
1674 break;
1675 case NL80211_IFTYPE_AP_VLAN: 1714 case NL80211_IFTYPE_AP_VLAN:
1676 case NL80211_IFTYPE_MONITOR: 1715 case NL80211_IFTYPE_MONITOR:
1677 /* ignore virtual */
1678 break;
1679 case NL80211_IFTYPE_P2P_DEVICE: 1716 case NL80211_IFTYPE_P2P_DEVICE:
1680 changed = BSS_CHANGED_IDLE; 1717 /* nothing to do */
1681 break; 1718 break;
1682 case NL80211_IFTYPE_UNSPECIFIED: 1719 case NL80211_IFTYPE_UNSPECIFIED:
1683 case NUM_NL80211_IFTYPES: 1720 case NUM_NL80211_IFTYPES:
@@ -2797,3 +2834,121 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
2797 2834
2798 ps->dtim_count = dtim_count; 2835 ps->dtim_count = dtim_count;
2799} 2836}
2837
2838int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
2839 const struct cfg80211_chan_def *chandef,
2840 enum ieee80211_chanctx_mode chanmode,
2841 u8 radar_detect)
2842{
2843 struct ieee80211_local *local = sdata->local;
2844 struct ieee80211_sub_if_data *sdata_iter;
2845 enum nl80211_iftype iftype = sdata->wdev.iftype;
2846 int num[NUM_NL80211_IFTYPES];
2847 struct ieee80211_chanctx *ctx;
2848 int num_different_channels = 0;
2849 int total = 1;
2850
2851 lockdep_assert_held(&local->chanctx_mtx);
2852
2853 if (WARN_ON(hweight32(radar_detect) > 1))
2854 return -EINVAL;
2855
2856 if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
2857 !chandef->chan))
2858 return -EINVAL;
2859
2860 if (chandef)
2861 num_different_channels = 1;
2862
2863 if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
2864 return -EINVAL;
2865
2866 /* Always allow software iftypes */
2867 if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
2868 if (radar_detect)
2869 return -EINVAL;
2870 return 0;
2871 }
2872
2873 memset(num, 0, sizeof(num));
2874
2875 if (iftype != NL80211_IFTYPE_UNSPECIFIED)
2876 num[iftype] = 1;
2877
2878 list_for_each_entry(ctx, &local->chanctx_list, list) {
2879 if (ctx->conf.radar_enabled)
2880 radar_detect |= BIT(ctx->conf.def.width);
2881 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
2882 num_different_channels++;
2883 continue;
2884 }
2885 if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
2886 cfg80211_chandef_compatible(chandef,
2887 &ctx->conf.def))
2888 continue;
2889 num_different_channels++;
2890 }
2891
2892 list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
2893 struct wireless_dev *wdev_iter;
2894
2895 wdev_iter = &sdata_iter->wdev;
2896
2897 if (sdata_iter == sdata ||
2898 rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
2899 local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
2900 continue;
2901
2902 num[wdev_iter->iftype]++;
2903 total++;
2904 }
2905
2906 if (total == 1 && !radar_detect)
2907 return 0;
2908
2909 return cfg80211_check_combinations(local->hw.wiphy,
2910 num_different_channels,
2911 radar_detect, num);
2912}
2913
2914static void
2915ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
2916 void *data)
2917{
2918 u32 *max_num_different_channels = data;
2919
2920 *max_num_different_channels = max(*max_num_different_channels,
2921 c->num_different_channels);
2922}
2923
2924int ieee80211_max_num_channels(struct ieee80211_local *local)
2925{
2926 struct ieee80211_sub_if_data *sdata;
2927 int num[NUM_NL80211_IFTYPES] = {};
2928 struct ieee80211_chanctx *ctx;
2929 int num_different_channels = 0;
2930 u8 radar_detect = 0;
2931 u32 max_num_different_channels = 1;
2932 int err;
2933
2934 lockdep_assert_held(&local->chanctx_mtx);
2935
2936 list_for_each_entry(ctx, &local->chanctx_list, list) {
2937 num_different_channels++;
2938
2939 if (ctx->conf.radar_enabled)
2940 radar_detect |= BIT(ctx->conf.def.width);
2941 }
2942
2943 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2944 num[sdata->wdev.iftype]++;
2945
2946 err = cfg80211_iter_combinations(local->hw.wiphy,
2947 num_different_channels, radar_detect,
2948 num, ieee80211_iter_max_chans,
2949 &max_num_different_channels);
2950 if (err < 0)
2951 return err;
2952
2953 return max_num_different_channels;
2954}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b8600e3c29c8..9b3dcc201145 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -406,7 +406,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
406 406
407 if (info->control.hw_key && 407 if (info->control.hw_key &&
408 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && 408 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
409 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { 409 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
410 !((info->control.hw_key->flags &
411 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
412 ieee80211_is_mgmt(hdr->frame_control))) {
410 /* 413 /*
411 * hwaccel has no need for preallocated room for CCMP 414 * hwaccel has no need for preallocated room for CCMP
412 * header or MIC fields 415 * header or MIC fields
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
index b33dd76d4307..1818a99b3081 100644
--- a/net/mac802154/Kconfig
+++ b/net/mac802154/Kconfig
@@ -2,6 +2,10 @@ config MAC802154
2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" 2 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
3 depends on IEEE802154 3 depends on IEEE802154
4 select CRC_CCITT 4 select CRC_CCITT
5 select CRYPTO_AUTHENC
6 select CRYPTO_CCM
7 select CRYPTO_CTR
8 select CRYPTO_AES
5 ---help--- 9 ---help---
6 This option enables the hardware independent IEEE 802.15.4 10 This option enables the hardware independent IEEE 802.15.4
7 networking stack for SoftMAC devices (the ones implementing 11 networking stack for SoftMAC devices (the ones implementing
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index 15d62df52182..9723d6f3f3e5 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_MAC802154) += mac802154.o 1obj-$(CONFIG_MAC802154) += mac802154.o
2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o 2mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o \
3 monitor.o wpan.o llsec.o
3 4
4ccflags-y += -D__CHECK_ENDIAN__ 5ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
new file mode 100644
index 000000000000..1456f73b02b9
--- /dev/null
+++ b/net/mac802154/llsec.c
@@ -0,0 +1,1070 @@
1/*
2 * Copyright (C) 2014 Fraunhofer ITWM
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Written by:
14 * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
15 */
16
17#include <linux/err.h>
18#include <linux/bug.h>
19#include <linux/completion.h>
20#include <net/ieee802154.h>
21#include <crypto/algapi.h>
22
23#include "mac802154.h"
24#include "llsec.h"
25
26static void llsec_key_put(struct mac802154_llsec_key *key);
27static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
28 const struct ieee802154_llsec_key_id *b);
29
30static void llsec_dev_free(struct mac802154_llsec_device *dev);
31
32void mac802154_llsec_init(struct mac802154_llsec *sec)
33{
34 memset(sec, 0, sizeof(*sec));
35
36 memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
37
38 INIT_LIST_HEAD(&sec->table.security_levels);
39 INIT_LIST_HEAD(&sec->table.devices);
40 INIT_LIST_HEAD(&sec->table.keys);
41 hash_init(sec->devices_short);
42 hash_init(sec->devices_hw);
43 rwlock_init(&sec->lock);
44}
45
46void mac802154_llsec_destroy(struct mac802154_llsec *sec)
47{
48 struct ieee802154_llsec_seclevel *sl, *sn;
49 struct ieee802154_llsec_device *dev, *dn;
50 struct ieee802154_llsec_key_entry *key, *kn;
51
52 list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
53 struct mac802154_llsec_seclevel *msl;
54
55 msl = container_of(sl, struct mac802154_llsec_seclevel, level);
56 list_del(&sl->list);
57 kfree(msl);
58 }
59
60 list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
61 struct mac802154_llsec_device *mdev;
62
63 mdev = container_of(dev, struct mac802154_llsec_device, dev);
64 list_del(&dev->list);
65 llsec_dev_free(mdev);
66 }
67
68 list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
69 struct mac802154_llsec_key *mkey;
70
71 mkey = container_of(key->key, struct mac802154_llsec_key, key);
72 list_del(&key->list);
73 llsec_key_put(mkey);
74 kfree(key);
75 }
76}
77
78
79
80int mac802154_llsec_get_params(struct mac802154_llsec *sec,
81 struct ieee802154_llsec_params *params)
82{
83 read_lock_bh(&sec->lock);
84 *params = sec->params;
85 read_unlock_bh(&sec->lock);
86
87 return 0;
88}
89
90int mac802154_llsec_set_params(struct mac802154_llsec *sec,
91 const struct ieee802154_llsec_params *params,
92 int changed)
93{
94 write_lock_bh(&sec->lock);
95
96 if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
97 sec->params.enabled = params->enabled;
98 if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
99 sec->params.frame_counter = params->frame_counter;
100 if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
101 sec->params.out_level = params->out_level;
102 if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
103 sec->params.out_key = params->out_key;
104 if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
105 sec->params.default_key_source = params->default_key_source;
106 if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
107 sec->params.pan_id = params->pan_id;
108 if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
109 sec->params.hwaddr = params->hwaddr;
110 if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
111 sec->params.coord_hwaddr = params->coord_hwaddr;
112 if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
113 sec->params.coord_shortaddr = params->coord_shortaddr;
114
115 write_unlock_bh(&sec->lock);
116
117 return 0;
118}
119
120
121
122static struct mac802154_llsec_key*
123llsec_key_alloc(const struct ieee802154_llsec_key *template)
124{
125 const int authsizes[3] = { 4, 8, 16 };
126 struct mac802154_llsec_key *key;
127 int i;
128
129 key = kzalloc(sizeof(*key), GFP_KERNEL);
130 if (!key)
131 return NULL;
132
133 kref_init(&key->ref);
134 key->key = *template;
135
136 BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
137
138 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
139 key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
140 CRYPTO_ALG_ASYNC);
141 if (!key->tfm[i])
142 goto err_tfm;
143 if (crypto_aead_setkey(key->tfm[i], template->key,
144 IEEE802154_LLSEC_KEY_SIZE))
145 goto err_tfm;
146 if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
147 goto err_tfm;
148 }
149
150 key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
151 if (!key->tfm0)
152 goto err_tfm;
153
154 if (crypto_blkcipher_setkey(key->tfm0, template->key,
155 IEEE802154_LLSEC_KEY_SIZE))
156 goto err_tfm0;
157
158 return key;
159
160err_tfm0:
161 crypto_free_blkcipher(key->tfm0);
162err_tfm:
163 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
164 if (key->tfm[i])
165 crypto_free_aead(key->tfm[i]);
166
167 kfree(key);
168 return NULL;
169}
170
171static void llsec_key_release(struct kref *ref)
172{
173 struct mac802154_llsec_key *key;
174 int i;
175
176 key = container_of(ref, struct mac802154_llsec_key, ref);
177
178 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
179 crypto_free_aead(key->tfm[i]);
180
181 crypto_free_blkcipher(key->tfm0);
182 kfree(key);
183}
184
185static struct mac802154_llsec_key*
186llsec_key_get(struct mac802154_llsec_key *key)
187{
188 kref_get(&key->ref);
189 return key;
190}
191
192static void llsec_key_put(struct mac802154_llsec_key *key)
193{
194 kref_put(&key->ref, llsec_key_release);
195}
196
197static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
198 const struct ieee802154_llsec_key_id *b)
199{
200 if (a->mode != b->mode)
201 return false;
202
203 if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
204 return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
205
206 if (a->id != b->id)
207 return false;
208
209 switch (a->mode) {
210 case IEEE802154_SCF_KEY_INDEX:
211 return true;
212 case IEEE802154_SCF_KEY_SHORT_INDEX:
213 return a->short_source == b->short_source;
214 case IEEE802154_SCF_KEY_HW_INDEX:
215 return a->extended_source == b->extended_source;
216 }
217
218 return false;
219}
220
221int mac802154_llsec_key_add(struct mac802154_llsec *sec,
222 const struct ieee802154_llsec_key_id *id,
223 const struct ieee802154_llsec_key *key)
224{
225 struct mac802154_llsec_key *mkey = NULL;
226 struct ieee802154_llsec_key_entry *pos, *new;
227
228 if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
229 key->cmd_frame_ids)
230 return -EINVAL;
231
232 list_for_each_entry(pos, &sec->table.keys, list) {
233 if (llsec_key_id_equal(&pos->id, id))
234 return -EEXIST;
235
236 if (memcmp(pos->key->key, key->key,
237 IEEE802154_LLSEC_KEY_SIZE))
238 continue;
239
240 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
241
242 /* Don't allow multiple instances of the same AES key to have
243 * different allowed frame types/command frame ids, as this is
244 * not possible in the 802.15.4 PIB.
245 */
246 if (pos->key->frame_types != key->frame_types ||
247 pos->key->cmd_frame_ids != key->cmd_frame_ids)
248 return -EEXIST;
249
250 break;
251 }
252
253 new = kzalloc(sizeof(*new), GFP_KERNEL);
254 if (!new)
255 return -ENOMEM;
256
257 if (!mkey)
258 mkey = llsec_key_alloc(key);
259 else
260 mkey = llsec_key_get(mkey);
261
262 if (!mkey)
263 goto fail;
264
265 new->id = *id;
266 new->key = &mkey->key;
267
268 list_add_rcu(&new->list, &sec->table.keys);
269
270 return 0;
271
272fail:
273 kfree(new);
274 return -ENOMEM;
275}
276
277int mac802154_llsec_key_del(struct mac802154_llsec *sec,
278 const struct ieee802154_llsec_key_id *key)
279{
280 struct ieee802154_llsec_key_entry *pos;
281
282 list_for_each_entry(pos, &sec->table.keys, list) {
283 struct mac802154_llsec_key *mkey;
284
285 mkey = container_of(pos->key, struct mac802154_llsec_key, key);
286
287 if (llsec_key_id_equal(&pos->id, key)) {
288 list_del_rcu(&pos->list);
289 llsec_key_put(mkey);
290 return 0;
291 }
292 }
293
294 return -ENOENT;
295}
296
297
298
299static bool llsec_dev_use_shortaddr(__le16 short_addr)
300{
301 return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
302 short_addr != cpu_to_le16(0xffff);
303}
304
305static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
306{
307 return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
308}
309
310static u64 llsec_dev_hash_long(__le64 hwaddr)
311{
312 return (__force u64) hwaddr;
313}
314
315static struct mac802154_llsec_device*
316llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
317 __le16 pan_id)
318{
319 struct mac802154_llsec_device *dev;
320 u32 key = llsec_dev_hash_short(short_addr, pan_id);
321
322 hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
323 if (dev->dev.short_addr == short_addr &&
324 dev->dev.pan_id == pan_id)
325 return dev;
326 }
327
328 return NULL;
329}
330
331static struct mac802154_llsec_device*
332llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
333{
334 struct mac802154_llsec_device *dev;
335 u64 key = llsec_dev_hash_long(hwaddr);
336
337 hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
338 if (dev->dev.hwaddr == hwaddr)
339 return dev;
340 }
341
342 return NULL;
343}
344
345static void llsec_dev_free(struct mac802154_llsec_device *dev)
346{
347 struct ieee802154_llsec_device_key *pos, *pn;
348 struct mac802154_llsec_device_key *devkey;
349
350 list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
351 devkey = container_of(pos, struct mac802154_llsec_device_key,
352 devkey);
353
354 list_del(&pos->list);
355 kfree(devkey);
356 }
357
358 kfree(dev);
359}
360
361int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
362 const struct ieee802154_llsec_device *dev)
363{
364 struct mac802154_llsec_device *entry;
365 u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
366 u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
367
368 BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
369
370 if ((llsec_dev_use_shortaddr(dev->short_addr) &&
371 llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
372 llsec_dev_find_long(sec, dev->hwaddr))
373 return -EEXIST;
374
375 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
376 if (!entry)
377 return -ENOMEM;
378
379 entry->dev = *dev;
380 spin_lock_init(&entry->lock);
381 INIT_LIST_HEAD(&entry->dev.keys);
382
383 if (llsec_dev_use_shortaddr(dev->short_addr))
384 hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
385 else
386 INIT_HLIST_NODE(&entry->bucket_s);
387
388 hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
389 list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
390
391 return 0;
392}
393
394static void llsec_dev_free_rcu(struct rcu_head *rcu)
395{
396 llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
397}
398
399int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
400{
401 struct mac802154_llsec_device *pos;
402
403 pos = llsec_dev_find_long(sec, device_addr);
404 if (!pos)
405 return -ENOENT;
406
407 hash_del_rcu(&pos->bucket_s);
408 hash_del_rcu(&pos->bucket_hw);
409 call_rcu(&pos->rcu, llsec_dev_free_rcu);
410
411 return 0;
412}
413
414
415
416static struct mac802154_llsec_device_key*
417llsec_devkey_find(struct mac802154_llsec_device *dev,
418 const struct ieee802154_llsec_key_id *key)
419{
420 struct ieee802154_llsec_device_key *devkey;
421
422 list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
423 if (!llsec_key_id_equal(key, &devkey->key_id))
424 continue;
425
426 return container_of(devkey, struct mac802154_llsec_device_key,
427 devkey);
428 }
429
430 return NULL;
431}
432
433int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
434 __le64 dev_addr,
435 const struct ieee802154_llsec_device_key *key)
436{
437 struct mac802154_llsec_device *dev;
438 struct mac802154_llsec_device_key *devkey;
439
440 dev = llsec_dev_find_long(sec, dev_addr);
441
442 if (!dev)
443 return -ENOENT;
444
445 if (llsec_devkey_find(dev, &key->key_id))
446 return -EEXIST;
447
448 devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
449 if (!devkey)
450 return -ENOMEM;
451
452 devkey->devkey = *key;
453 list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
454 return 0;
455}
456
457int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
458 __le64 dev_addr,
459 const struct ieee802154_llsec_device_key *key)
460{
461 struct mac802154_llsec_device *dev;
462 struct mac802154_llsec_device_key *devkey;
463
464 dev = llsec_dev_find_long(sec, dev_addr);
465
466 if (!dev)
467 return -ENOENT;
468
469 devkey = llsec_devkey_find(dev, &key->key_id);
470 if (!devkey)
471 return -ENOENT;
472
473 list_del_rcu(&devkey->devkey.list);
474 kfree_rcu(devkey, rcu);
475 return 0;
476}
477
478
479
480static struct mac802154_llsec_seclevel*
481llsec_find_seclevel(const struct mac802154_llsec *sec,
482 const struct ieee802154_llsec_seclevel *sl)
483{
484 struct ieee802154_llsec_seclevel *pos;
485
486 list_for_each_entry(pos, &sec->table.security_levels, list) {
487 if (pos->frame_type != sl->frame_type ||
488 (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
489 pos->cmd_frame_id != sl->cmd_frame_id) ||
490 pos->device_override != sl->device_override ||
491 pos->sec_levels != sl->sec_levels)
492 continue;
493
494 return container_of(pos, struct mac802154_llsec_seclevel,
495 level);
496 }
497
498 return NULL;
499}
500
501int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
502 const struct ieee802154_llsec_seclevel *sl)
503{
504 struct mac802154_llsec_seclevel *entry;
505
506 if (llsec_find_seclevel(sec, sl))
507 return -EEXIST;
508
509 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
510 if (!entry)
511 return -ENOMEM;
512
513 entry->level = *sl;
514
515 list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
516
517 return 0;
518}
519
520int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
521 const struct ieee802154_llsec_seclevel *sl)
522{
523 struct mac802154_llsec_seclevel *pos;
524
525 pos = llsec_find_seclevel(sec, sl);
526 if (!pos)
527 return -ENOENT;
528
529 list_del_rcu(&pos->level.list);
530 kfree_rcu(pos, rcu);
531
532 return 0;
533}
534
535
536
537static int llsec_recover_addr(struct mac802154_llsec *sec,
538 struct ieee802154_addr *addr)
539{
540 __le16 caddr = sec->params.coord_shortaddr;
541 addr->pan_id = sec->params.pan_id;
542
543 if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
544 return -EINVAL;
545 } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
546 addr->extended_addr = sec->params.coord_hwaddr;
547 addr->mode = IEEE802154_ADDR_LONG;
548 } else {
549 addr->short_addr = sec->params.coord_shortaddr;
550 addr->mode = IEEE802154_ADDR_SHORT;
551 }
552
553 return 0;
554}
555
556static struct mac802154_llsec_key*
557llsec_lookup_key(struct mac802154_llsec *sec,
558 const struct ieee802154_hdr *hdr,
559 const struct ieee802154_addr *addr,
560 struct ieee802154_llsec_key_id *key_id)
561{
562 struct ieee802154_addr devaddr = *addr;
563 u8 key_id_mode = hdr->sec.key_id_mode;
564 struct ieee802154_llsec_key_entry *key_entry;
565 struct mac802154_llsec_key *key;
566
567 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
568 devaddr.mode == IEEE802154_ADDR_NONE) {
569 if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
570 devaddr.extended_addr = sec->params.coord_hwaddr;
571 devaddr.mode = IEEE802154_ADDR_LONG;
572 } else if (llsec_recover_addr(sec, &devaddr) < 0) {
573 return NULL;
574 }
575 }
576
577 list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
578 const struct ieee802154_llsec_key_id *id = &key_entry->id;
579
580 if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
581 continue;
582
583 if (id->mode != key_id_mode)
584 continue;
585
586 if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
587 if (ieee802154_addr_equal(&devaddr, &id->device_addr))
588 goto found;
589 } else {
590 if (id->id != hdr->sec.key_id)
591 continue;
592
593 if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
594 (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
595 id->short_source == hdr->sec.short_src) ||
596 (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
597 id->extended_source == hdr->sec.extended_src))
598 goto found;
599 }
600 }
601
602 return NULL;
603
604found:
605 key = container_of(key_entry->key, struct mac802154_llsec_key, key);
606 if (key_id)
607 *key_id = key_entry->id;
608 return llsec_key_get(key);
609}
610
611
612static void llsec_geniv(u8 iv[16], __le64 addr,
613 const struct ieee802154_sechdr *sec)
614{
615 __be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
616 __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
617
618 iv[0] = 1; /* L' = L - 1 = 1 */
619 memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
620 memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
621 iv[13] = sec->level;
622 iv[14] = 0;
623 iv[15] = 1;
624}
625
626static int
627llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
628 const struct ieee802154_hdr *hdr,
629 struct mac802154_llsec_key *key)
630{
631 u8 iv[16];
632 struct scatterlist src;
633 struct blkcipher_desc req = {
634 .tfm = key->tfm0,
635 .info = iv,
636 .flags = 0,
637 };
638
639 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
640 sg_init_one(&src, skb->data, skb->len);
641 return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
642}
643
644static struct crypto_aead*
645llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
646{
647 int i;
648
649 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
650 if (crypto_aead_authsize(key->tfm[i]) == authlen)
651 return key->tfm[i];
652
653 BUG();
654}
655
656static int
657llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
658 const struct ieee802154_hdr *hdr,
659 struct mac802154_llsec_key *key)
660{
661 u8 iv[16];
662 unsigned char *data;
663 int authlen, assoclen, datalen, rc;
664 struct scatterlist src, assoc[2], dst[2];
665 struct aead_request *req;
666
667 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
668 llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
669
670 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
671 if (!req)
672 return -ENOMEM;
673
674 sg_init_table(assoc, 2);
675 sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
676 assoclen = skb->mac_len;
677
678 data = skb_mac_header(skb) + skb->mac_len;
679 datalen = skb_tail_pointer(skb) - data;
680
681 if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
682 sg_set_buf(&assoc[1], data, 0);
683 } else {
684 sg_set_buf(&assoc[1], data, datalen);
685 assoclen += datalen;
686 datalen = 0;
687 }
688
689 sg_init_one(&src, data, datalen);
690
691 sg_init_table(dst, 2);
692 sg_set_buf(&dst[0], data, datalen);
693 sg_set_buf(&dst[1], skb_put(skb, authlen), authlen);
694
695 aead_request_set_callback(req, 0, NULL, NULL);
696 aead_request_set_assoc(req, assoc, assoclen);
697 aead_request_set_crypt(req, &src, dst, datalen, iv);
698
699 rc = crypto_aead_encrypt(req);
700
701 kfree(req);
702
703 return rc;
704}
705
706static int llsec_do_encrypt(struct sk_buff *skb,
707 const struct mac802154_llsec *sec,
708 const struct ieee802154_hdr *hdr,
709 struct mac802154_llsec_key *key)
710{
711 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
712 return llsec_do_encrypt_unauth(skb, sec, hdr, key);
713 else
714 return llsec_do_encrypt_auth(skb, sec, hdr, key);
715}
716
717int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
718{
719 struct ieee802154_hdr hdr;
720 int rc, authlen, hlen;
721 struct mac802154_llsec_key *key;
722 u32 frame_ctr;
723
724 hlen = ieee802154_hdr_pull(skb, &hdr);
725
726 if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
727 return -EINVAL;
728
729 if (!hdr.fc.security_enabled || hdr.sec.level == 0) {
730 skb_push(skb, hlen);
731 return 0;
732 }
733
734 authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
735
736 if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
737 return -EMSGSIZE;
738
739 rcu_read_lock();
740
741 read_lock_bh(&sec->lock);
742
743 if (!sec->params.enabled) {
744 rc = -EINVAL;
745 goto fail_read;
746 }
747
748 key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
749 if (!key) {
750 rc = -ENOKEY;
751 goto fail_read;
752 }
753
754 read_unlock_bh(&sec->lock);
755
756 write_lock_bh(&sec->lock);
757
758 frame_ctr = be32_to_cpu(sec->params.frame_counter);
759 hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
760 if (frame_ctr == 0xFFFFFFFF) {
761 write_unlock_bh(&sec->lock);
762 llsec_key_put(key);
763 rc = -EOVERFLOW;
764 goto fail;
765 }
766
767 sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
768
769 write_unlock_bh(&sec->lock);
770
771 rcu_read_unlock();
772
773 skb->mac_len = ieee802154_hdr_push(skb, &hdr);
774 skb_reset_mac_header(skb);
775
776 rc = llsec_do_encrypt(skb, sec, &hdr, key);
777 llsec_key_put(key);
778
779 return rc;
780
781fail_read:
782 read_unlock_bh(&sec->lock);
783fail:
784 rcu_read_unlock();
785 return rc;
786}
787
788
789
790static struct mac802154_llsec_device*
791llsec_lookup_dev(struct mac802154_llsec *sec,
792 const struct ieee802154_addr *addr)
793{
794 struct ieee802154_addr devaddr = *addr;
795 struct mac802154_llsec_device *dev = NULL;
796
797 if (devaddr.mode == IEEE802154_ADDR_NONE &&
798 llsec_recover_addr(sec, &devaddr) < 0)
799 return NULL;
800
801 if (devaddr.mode == IEEE802154_ADDR_SHORT) {
802 u32 key = llsec_dev_hash_short(devaddr.short_addr,
803 devaddr.pan_id);
804
805 hash_for_each_possible_rcu(sec->devices_short, dev,
806 bucket_s, key) {
807 if (dev->dev.pan_id == devaddr.pan_id &&
808 dev->dev.short_addr == devaddr.short_addr)
809 return dev;
810 }
811 } else {
812 u64 key = llsec_dev_hash_long(devaddr.extended_addr);
813
814 hash_for_each_possible_rcu(sec->devices_hw, dev,
815 bucket_hw, key) {
816 if (dev->dev.hwaddr == devaddr.extended_addr)
817 return dev;
818 }
819 }
820
821 return NULL;
822}
823
824static int
825llsec_lookup_seclevel(const struct mac802154_llsec *sec,
826 u8 frame_type, u8 cmd_frame_id,
827 struct ieee802154_llsec_seclevel *rlevel)
828{
829 struct ieee802154_llsec_seclevel *level;
830
831 list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
832 if (level->frame_type == frame_type &&
833 (frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
834 level->cmd_frame_id == cmd_frame_id)) {
835 *rlevel = *level;
836 return 0;
837 }
838 }
839
840 return -EINVAL;
841}
842
843static int
844llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
845 const struct ieee802154_hdr *hdr,
846 struct mac802154_llsec_key *key, __le64 dev_addr)
847{
848 u8 iv[16];
849 unsigned char *data;
850 int datalen;
851 struct scatterlist src;
852 struct blkcipher_desc req = {
853 .tfm = key->tfm0,
854 .info = iv,
855 .flags = 0,
856 };
857
858 llsec_geniv(iv, dev_addr, &hdr->sec);
859 data = skb_mac_header(skb) + skb->mac_len;
860 datalen = skb_tail_pointer(skb) - data;
861
862 sg_init_one(&src, data, datalen);
863
864 return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
865}
866
867static int
868llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
869 const struct ieee802154_hdr *hdr,
870 struct mac802154_llsec_key *key, __le64 dev_addr)
871{
872 u8 iv[16];
873 unsigned char *data;
874 int authlen, datalen, assoclen, rc;
875 struct scatterlist src, assoc[2];
876 struct aead_request *req;
877
878 authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
879 llsec_geniv(iv, dev_addr, &hdr->sec);
880
881 req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
882 if (!req)
883 return -ENOMEM;
884
885 sg_init_table(assoc, 2);
886 sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
887 assoclen = skb->mac_len;
888
889 data = skb_mac_header(skb) + skb->mac_len;
890 datalen = skb_tail_pointer(skb) - data;
891
892 if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
893 sg_set_buf(&assoc[1], data, 0);
894 } else {
895 sg_set_buf(&assoc[1], data, datalen - authlen);
896 assoclen += datalen - authlen;
897 data += datalen - authlen;
898 datalen = authlen;
899 }
900
901 sg_init_one(&src, data, datalen);
902
903 aead_request_set_callback(req, 0, NULL, NULL);
904 aead_request_set_assoc(req, assoc, assoclen);
905 aead_request_set_crypt(req, &src, &src, datalen, iv);
906
907 rc = crypto_aead_decrypt(req);
908
909 kfree(req);
910 skb_trim(skb, skb->len - authlen);
911
912 return rc;
913}
914
915static int
916llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
917 const struct ieee802154_hdr *hdr,
918 struct mac802154_llsec_key *key, __le64 dev_addr)
919{
920 if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
921 return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
922 else
923 return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
924}
925
926static int
927llsec_update_devkey_record(struct mac802154_llsec_device *dev,
928 const struct ieee802154_llsec_key_id *in_key)
929{
930 struct mac802154_llsec_device_key *devkey;
931
932 devkey = llsec_devkey_find(dev, in_key);
933
934 if (!devkey) {
935 struct mac802154_llsec_device_key *next;
936
937 next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
938 if (!next)
939 return -ENOMEM;
940
941 next->devkey.key_id = *in_key;
942
943 spin_lock_bh(&dev->lock);
944
945 devkey = llsec_devkey_find(dev, in_key);
946 if (!devkey)
947 list_add_rcu(&next->devkey.list, &dev->dev.keys);
948 else
949 kfree(next);
950
951 spin_unlock_bh(&dev->lock);
952 }
953
954 return 0;
955}
956
957static int
958llsec_update_devkey_info(struct mac802154_llsec_device *dev,
959 const struct ieee802154_llsec_key_id *in_key,
960 u32 frame_counter)
961{
962 struct mac802154_llsec_device_key *devkey = NULL;
963
964 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
965 devkey = llsec_devkey_find(dev, in_key);
966 if (!devkey)
967 return -ENOENT;
968 }
969
970 if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
971 int rc = llsec_update_devkey_record(dev, in_key);
972
973 if (rc < 0)
974 return rc;
975 }
976
977 spin_lock_bh(&dev->lock);
978
979 if ((!devkey && frame_counter < dev->dev.frame_counter) ||
980 (devkey && frame_counter < devkey->devkey.frame_counter)) {
981 spin_unlock_bh(&dev->lock);
982 return -EINVAL;
983 }
984
985 if (devkey)
986 devkey->devkey.frame_counter = frame_counter + 1;
987 else
988 dev->dev.frame_counter = frame_counter + 1;
989
990 spin_unlock_bh(&dev->lock);
991
992 return 0;
993}
994
995int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
996{
997 struct ieee802154_hdr hdr;
998 struct mac802154_llsec_key *key;
999 struct ieee802154_llsec_key_id key_id;
1000 struct mac802154_llsec_device *dev;
1001 struct ieee802154_llsec_seclevel seclevel;
1002 int err;
1003 __le64 dev_addr;
1004 u32 frame_ctr;
1005
1006 if (ieee802154_hdr_peek(skb, &hdr) < 0)
1007 return -EINVAL;
1008 if (!hdr.fc.security_enabled)
1009 return 0;
1010 if (hdr.fc.version == 0)
1011 return -EINVAL;
1012
1013 read_lock_bh(&sec->lock);
1014 if (!sec->params.enabled) {
1015 read_unlock_bh(&sec->lock);
1016 return -EINVAL;
1017 }
1018 read_unlock_bh(&sec->lock);
1019
1020 rcu_read_lock();
1021
1022 key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
1023 if (!key) {
1024 err = -ENOKEY;
1025 goto fail;
1026 }
1027
1028 dev = llsec_lookup_dev(sec, &hdr.source);
1029 if (!dev) {
1030 err = -EINVAL;
1031 goto fail_dev;
1032 }
1033
1034 if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
1035 err = -EINVAL;
1036 goto fail_dev;
1037 }
1038
1039 if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
1040 (hdr.sec.level == 0 && seclevel.device_override &&
1041 !dev->dev.seclevel_exempt)) {
1042 err = -EINVAL;
1043 goto fail_dev;
1044 }
1045
1046 frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
1047
1048 if (frame_ctr == 0xffffffff) {
1049 err = -EOVERFLOW;
1050 goto fail_dev;
1051 }
1052
1053 err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
1054 if (err)
1055 goto fail_dev;
1056
1057 dev_addr = dev->dev.hwaddr;
1058
1059 rcu_read_unlock();
1060
1061 err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
1062 llsec_key_put(key);
1063 return err;
1064
1065fail_dev:
1066 llsec_key_put(key);
1067fail:
1068 rcu_read_unlock();
1069 return err;
1070}
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
new file mode 100644
index 000000000000..950578e1d7be
--- /dev/null
+++ b/net/mac802154/llsec.h
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) 2014 Fraunhofer ITWM
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Written by:
14 * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
15 */
16
17#ifndef MAC802154_LLSEC_H
18#define MAC802154_LLSEC_H
19
20#include <linux/slab.h>
21#include <linux/hashtable.h>
22#include <linux/crypto.h>
23#include <linux/kref.h>
24#include <linux/spinlock.h>
25#include <net/af_ieee802154.h>
26#include <net/ieee802154_netdev.h>
27
28struct mac802154_llsec_key {
29 struct ieee802154_llsec_key key;
30
31 /* one tfm for each authsize (4/8/16) */
32 struct crypto_aead *tfm[3];
33 struct crypto_blkcipher *tfm0;
34
35 struct kref ref;
36};
37
38struct mac802154_llsec_device_key {
39 struct ieee802154_llsec_device_key devkey;
40
41 struct rcu_head rcu;
42};
43
44struct mac802154_llsec_device {
45 struct ieee802154_llsec_device dev;
46
47 struct hlist_node bucket_s;
48 struct hlist_node bucket_hw;
49
50 /* protects dev.frame_counter and the elements of dev.keys */
51 spinlock_t lock;
52
53 struct rcu_head rcu;
54};
55
56struct mac802154_llsec_seclevel {
57 struct ieee802154_llsec_seclevel level;
58
59 struct rcu_head rcu;
60};
61
62struct mac802154_llsec {
63 struct ieee802154_llsec_params params;
64 struct ieee802154_llsec_table table;
65
66 DECLARE_HASHTABLE(devices_short, 6);
67 DECLARE_HASHTABLE(devices_hw, 6);
68
69 /* protects params, all other fields are fine with RCU */
70 rwlock_t lock;
71};
72
73void mac802154_llsec_init(struct mac802154_llsec *sec);
74void mac802154_llsec_destroy(struct mac802154_llsec *sec);
75
76int mac802154_llsec_get_params(struct mac802154_llsec *sec,
77 struct ieee802154_llsec_params *params);
78int mac802154_llsec_set_params(struct mac802154_llsec *sec,
79 const struct ieee802154_llsec_params *params,
80 int changed);
81
82int mac802154_llsec_key_add(struct mac802154_llsec *sec,
83 const struct ieee802154_llsec_key_id *id,
84 const struct ieee802154_llsec_key *key);
85int mac802154_llsec_key_del(struct mac802154_llsec *sec,
86 const struct ieee802154_llsec_key_id *key);
87
88int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
89 const struct ieee802154_llsec_device *dev);
90int mac802154_llsec_dev_del(struct mac802154_llsec *sec,
91 __le64 device_addr);
92
93int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
94 __le64 dev_addr,
95 const struct ieee802154_llsec_device_key *key);
96int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
97 __le64 dev_addr,
98 const struct ieee802154_llsec_device_key *key);
99
100int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
101 const struct ieee802154_llsec_seclevel *sl);
102int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
103 const struct ieee802154_llsec_seclevel *sl);
104
105int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
106int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
107
108#endif /* MAC802154_LLSEC_H */
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index 28ef59c566e6..762a6f849c6b 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -23,8 +23,12 @@
23#ifndef MAC802154_H 23#ifndef MAC802154_H
24#define MAC802154_H 24#define MAC802154_H
25 25
26#include <linux/mutex.h>
27#include <net/mac802154.h>
26#include <net/ieee802154_netdev.h> 28#include <net/ieee802154_netdev.h>
27 29
30#include "llsec.h"
31
28/* mac802154 device private data */ 32/* mac802154 device private data */
29struct mac802154_priv { 33struct mac802154_priv {
30 struct ieee802154_dev hw; 34 struct ieee802154_dev hw;
@@ -90,6 +94,13 @@ struct mac802154_sub_if_data {
90 u8 bsn; 94 u8 bsn;
91 /* MAC DSN field */ 95 /* MAC DSN field */
92 u8 dsn; 96 u8 dsn;
97
98 /* protects sec from concurrent access by netlink. access by
99 * encrypt/decrypt/header_create safe without additional protection.
100 */
101 struct mutex sec_mtx;
102
103 struct mac802154_llsec sec;
93}; 104};
94 105
95#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) 106#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
@@ -125,4 +136,37 @@ int mac802154_set_mac_params(struct net_device *dev,
125void mac802154_get_mac_params(struct net_device *dev, 136void mac802154_get_mac_params(struct net_device *dev,
126 struct ieee802154_mac_params *params); 137 struct ieee802154_mac_params *params);
127 138
139int mac802154_get_params(struct net_device *dev,
140 struct ieee802154_llsec_params *params);
141int mac802154_set_params(struct net_device *dev,
142 const struct ieee802154_llsec_params *params,
143 int changed);
144
145int mac802154_add_key(struct net_device *dev,
146 const struct ieee802154_llsec_key_id *id,
147 const struct ieee802154_llsec_key *key);
148int mac802154_del_key(struct net_device *dev,
149 const struct ieee802154_llsec_key_id *id);
150
151int mac802154_add_dev(struct net_device *dev,
152 const struct ieee802154_llsec_device *llsec_dev);
153int mac802154_del_dev(struct net_device *dev, __le64 dev_addr);
154
155int mac802154_add_devkey(struct net_device *dev,
156 __le64 device_addr,
157 const struct ieee802154_llsec_device_key *key);
158int mac802154_del_devkey(struct net_device *dev,
159 __le64 device_addr,
160 const struct ieee802154_llsec_device_key *key);
161
162int mac802154_add_seclevel(struct net_device *dev,
163 const struct ieee802154_llsec_seclevel *sl);
164int mac802154_del_seclevel(struct net_device *dev,
165 const struct ieee802154_llsec_seclevel *sl);
166
167void mac802154_lock_table(struct net_device *dev);
168void mac802154_get_table(struct net_device *dev,
169 struct ieee802154_llsec_table **t);
170void mac802154_unlock_table(struct net_device *dev);
171
128#endif /* MAC802154_H */ 172#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index d40c0928bc62..bf809131eef7 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -40,6 +40,9 @@ static int mac802154_mlme_start_req(struct net_device *dev,
40 u8 pan_coord, u8 blx, 40 u8 pan_coord, u8 blx,
41 u8 coord_realign) 41 u8 coord_realign)
42{ 42{
43 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
44 int rc = 0;
45
43 BUG_ON(addr->mode != IEEE802154_ADDR_SHORT); 46 BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
44 47
45 mac802154_dev_set_pan_id(dev, addr->pan_id); 48 mac802154_dev_set_pan_id(dev, addr->pan_id);
@@ -47,12 +50,31 @@ static int mac802154_mlme_start_req(struct net_device *dev,
47 mac802154_dev_set_ieee_addr(dev); 50 mac802154_dev_set_ieee_addr(dev);
48 mac802154_dev_set_page_channel(dev, page, channel); 51 mac802154_dev_set_page_channel(dev, page, channel);
49 52
53 if (ops->llsec) {
54 struct ieee802154_llsec_params params;
55 int changed = 0;
56
57 params.coord_shortaddr = addr->short_addr;
58 changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
59
60 params.pan_id = addr->pan_id;
61 changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
62
63 params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
64 changed |= IEEE802154_LLSEC_PARAM_HWADDR;
65
66 params.coord_hwaddr = params.hwaddr;
67 changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
68
69 rc = ops->llsec->set_params(dev, &params, changed);
70 }
71
50 /* FIXME: add validation for unused parameters to be sane 72 /* FIXME: add validation for unused parameters to be sane
51 * for SoftMAC 73 * for SoftMAC
52 */ 74 */
53 ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS); 75 ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
54 76
55 return 0; 77 return rc;
56} 78}
57 79
58static struct wpan_phy *mac802154_get_phy(const struct net_device *dev) 80static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
@@ -64,6 +86,22 @@ static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
64 return to_phy(get_device(&priv->hw->phy->dev)); 86 return to_phy(get_device(&priv->hw->phy->dev));
65} 87}
66 88
89static struct ieee802154_llsec_ops mac802154_llsec_ops = {
90 .get_params = mac802154_get_params,
91 .set_params = mac802154_set_params,
92 .add_key = mac802154_add_key,
93 .del_key = mac802154_del_key,
94 .add_dev = mac802154_add_dev,
95 .del_dev = mac802154_del_dev,
96 .add_devkey = mac802154_add_devkey,
97 .del_devkey = mac802154_del_devkey,
98 .add_seclevel = mac802154_add_seclevel,
99 .del_seclevel = mac802154_del_seclevel,
100 .lock_table = mac802154_lock_table,
101 .get_table = mac802154_get_table,
102 .unlock_table = mac802154_unlock_table,
103};
104
67struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { 105struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
68 .get_phy = mac802154_get_phy, 106 .get_phy = mac802154_get_phy,
69}; 107};
@@ -75,6 +113,8 @@ struct ieee802154_mlme_ops mac802154_mlme_wpan = {
75 .get_short_addr = mac802154_dev_get_short_addr, 113 .get_short_addr = mac802154_dev_get_short_addr,
76 .get_dsn = mac802154_dev_get_dsn, 114 .get_dsn = mac802154_dev_get_dsn,
77 115
116 .llsec = &mac802154_llsec_ops,
117
78 .set_mac_params = mac802154_set_mac_params, 118 .set_mac_params = mac802154_set_mac_params,
79 .get_mac_params = mac802154_get_mac_params, 119 .get_mac_params = mac802154_get_mac_params,
80}; 120};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index f0991f2344d4..15aa2f2b03a7 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -213,3 +213,190 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
213 } else 213 } else
214 mutex_unlock(&priv->hw->phy->pib_lock); 214 mutex_unlock(&priv->hw->phy->pib_lock);
215} 215}
216
217
218int mac802154_get_params(struct net_device *dev,
219 struct ieee802154_llsec_params *params)
220{
221 struct mac802154_sub_if_data *priv = netdev_priv(dev);
222 int res;
223
224 BUG_ON(dev->type != ARPHRD_IEEE802154);
225
226 mutex_lock(&priv->sec_mtx);
227 res = mac802154_llsec_get_params(&priv->sec, params);
228 mutex_unlock(&priv->sec_mtx);
229
230 return res;
231}
232
233int mac802154_set_params(struct net_device *dev,
234 const struct ieee802154_llsec_params *params,
235 int changed)
236{
237 struct mac802154_sub_if_data *priv = netdev_priv(dev);
238 int res;
239
240 BUG_ON(dev->type != ARPHRD_IEEE802154);
241
242 mutex_lock(&priv->sec_mtx);
243 res = mac802154_llsec_set_params(&priv->sec, params, changed);
244 mutex_unlock(&priv->sec_mtx);
245
246 return res;
247}
248
249
250int mac802154_add_key(struct net_device *dev,
251 const struct ieee802154_llsec_key_id *id,
252 const struct ieee802154_llsec_key *key)
253{
254 struct mac802154_sub_if_data *priv = netdev_priv(dev);
255 int res;
256
257 BUG_ON(dev->type != ARPHRD_IEEE802154);
258
259 mutex_lock(&priv->sec_mtx);
260 res = mac802154_llsec_key_add(&priv->sec, id, key);
261 mutex_unlock(&priv->sec_mtx);
262
263 return res;
264}
265
266int mac802154_del_key(struct net_device *dev,
267 const struct ieee802154_llsec_key_id *id)
268{
269 struct mac802154_sub_if_data *priv = netdev_priv(dev);
270 int res;
271
272 BUG_ON(dev->type != ARPHRD_IEEE802154);
273
274 mutex_lock(&priv->sec_mtx);
275 res = mac802154_llsec_key_del(&priv->sec, id);
276 mutex_unlock(&priv->sec_mtx);
277
278 return res;
279}
280
281
282int mac802154_add_dev(struct net_device *dev,
283 const struct ieee802154_llsec_device *llsec_dev)
284{
285 struct mac802154_sub_if_data *priv = netdev_priv(dev);
286 int res;
287
288 BUG_ON(dev->type != ARPHRD_IEEE802154);
289
290 mutex_lock(&priv->sec_mtx);
291 res = mac802154_llsec_dev_add(&priv->sec, llsec_dev);
292 mutex_unlock(&priv->sec_mtx);
293
294 return res;
295}
296
297int mac802154_del_dev(struct net_device *dev, __le64 dev_addr)
298{
299 struct mac802154_sub_if_data *priv = netdev_priv(dev);
300 int res;
301
302 BUG_ON(dev->type != ARPHRD_IEEE802154);
303
304 mutex_lock(&priv->sec_mtx);
305 res = mac802154_llsec_dev_del(&priv->sec, dev_addr);
306 mutex_unlock(&priv->sec_mtx);
307
308 return res;
309}
310
311
312int mac802154_add_devkey(struct net_device *dev,
313 __le64 device_addr,
314 const struct ieee802154_llsec_device_key *key)
315{
316 struct mac802154_sub_if_data *priv = netdev_priv(dev);
317 int res;
318
319 BUG_ON(dev->type != ARPHRD_IEEE802154);
320
321 mutex_lock(&priv->sec_mtx);
322 res = mac802154_llsec_devkey_add(&priv->sec, device_addr, key);
323 mutex_unlock(&priv->sec_mtx);
324
325 return res;
326}
327
328int mac802154_del_devkey(struct net_device *dev,
329 __le64 device_addr,
330 const struct ieee802154_llsec_device_key *key)
331{
332 struct mac802154_sub_if_data *priv = netdev_priv(dev);
333 int res;
334
335 BUG_ON(dev->type != ARPHRD_IEEE802154);
336
337 mutex_lock(&priv->sec_mtx);
338 res = mac802154_llsec_devkey_del(&priv->sec, device_addr, key);
339 mutex_unlock(&priv->sec_mtx);
340
341 return res;
342}
343
344
345int mac802154_add_seclevel(struct net_device *dev,
346 const struct ieee802154_llsec_seclevel *sl)
347{
348 struct mac802154_sub_if_data *priv = netdev_priv(dev);
349 int res;
350
351 BUG_ON(dev->type != ARPHRD_IEEE802154);
352
353 mutex_lock(&priv->sec_mtx);
354 res = mac802154_llsec_seclevel_add(&priv->sec, sl);
355 mutex_unlock(&priv->sec_mtx);
356
357 return res;
358}
359
360int mac802154_del_seclevel(struct net_device *dev,
361 const struct ieee802154_llsec_seclevel *sl)
362{
363 struct mac802154_sub_if_data *priv = netdev_priv(dev);
364 int res;
365
366 BUG_ON(dev->type != ARPHRD_IEEE802154);
367
368 mutex_lock(&priv->sec_mtx);
369 res = mac802154_llsec_seclevel_del(&priv->sec, sl);
370 mutex_unlock(&priv->sec_mtx);
371
372 return res;
373}
374
375
376void mac802154_lock_table(struct net_device *dev)
377{
378 struct mac802154_sub_if_data *priv = netdev_priv(dev);
379
380 BUG_ON(dev->type != ARPHRD_IEEE802154);
381
382 mutex_lock(&priv->sec_mtx);
383}
384
385void mac802154_get_table(struct net_device *dev,
386 struct ieee802154_llsec_table **t)
387{
388 struct mac802154_sub_if_data *priv = netdev_priv(dev);
389
390 BUG_ON(dev->type != ARPHRD_IEEE802154);
391
392 *t = &priv->sec.table;
393}
394
395void mac802154_unlock_table(struct net_device *dev)
396{
397 struct mac802154_sub_if_data *priv = netdev_priv(dev);
398
399 BUG_ON(dev->type != ARPHRD_IEEE802154);
400
401 mutex_unlock(&priv->sec_mtx);
402}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
index 434a26f76a80..a68230e2b25f 100644
--- a/net/mac802154/monitor.c
+++ b/net/mac802154/monitor.c
@@ -70,7 +70,8 @@ void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
70 70
71 rcu_read_lock(); 71 rcu_read_lock();
72 list_for_each_entry_rcu(sdata, &priv->slaves, list) { 72 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
73 if (sdata->type != IEEE802154_DEV_MONITOR) 73 if (sdata->type != IEEE802154_DEV_MONITOR ||
74 !netif_running(sdata->dev))
74 continue; 75 continue;
75 76
76 skb2 = skb_clone(skb, GFP_ATOMIC); 77 skb2 = skb_clone(skb, GFP_ATOMIC);
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 03855b0677cc..7f820a108a9c 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -59,27 +59,28 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
59 skb->protocol = htons(ETH_P_IEEE802154); 59 skb->protocol = htons(ETH_P_IEEE802154);
60 skb_reset_mac_header(skb); 60 skb_reset_mac_header(skb);
61 61
62 BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
63
64 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { 62 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
65 u16 crc; 63 u16 crc;
66 64
67 if (skb->len < 2) { 65 if (skb->len < 2) {
68 pr_debug("got invalid frame\n"); 66 pr_debug("got invalid frame\n");
69 goto out; 67 goto fail;
70 } 68 }
71 crc = crc_ccitt(0, skb->data, skb->len); 69 crc = crc_ccitt(0, skb->data, skb->len);
72 if (crc) { 70 if (crc) {
73 pr_debug("CRC mismatch\n"); 71 pr_debug("CRC mismatch\n");
74 goto out; 72 goto fail;
75 } 73 }
76 skb_trim(skb, skb->len - 2); /* CRC */ 74 skb_trim(skb, skb->len - 2); /* CRC */
77 } 75 }
78 76
79 mac802154_monitors_rx(priv, skb); 77 mac802154_monitors_rx(priv, skb);
80 mac802154_wpans_rx(priv, skb); 78 mac802154_wpans_rx(priv, skb);
81out: 79
82 dev_kfree_skb(skb); 80 return;
81
82fail:
83 kfree_skb(skb);
83} 84}
84 85
85static void mac802154_rx_worker(struct work_struct *work) 86static void mac802154_rx_worker(struct work_struct *work)
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 1df7a6a57386..3c3069fd6971 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -35,6 +35,28 @@
35 35
36#include "mac802154.h" 36#include "mac802154.h"
37 37
38static int mac802154_wpan_update_llsec(struct net_device *dev)
39{
40 struct mac802154_sub_if_data *priv = netdev_priv(dev);
41 struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
42 int rc = 0;
43
44 if (ops->llsec) {
45 struct ieee802154_llsec_params params;
46 int changed = 0;
47
48 params.pan_id = priv->pan_id;
49 changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
50
51 params.hwaddr = priv->extended_addr;
52 changed |= IEEE802154_LLSEC_PARAM_HWADDR;
53
54 rc = ops->llsec->set_params(dev, &params, changed);
55 }
56
57 return rc;
58}
59
38static int 60static int
39mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 61mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40{ 62{
@@ -81,7 +103,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
81 priv->pan_id = cpu_to_le16(sa->addr.pan_id); 103 priv->pan_id = cpu_to_le16(sa->addr.pan_id);
82 priv->short_addr = cpu_to_le16(sa->addr.short_addr); 104 priv->short_addr = cpu_to_le16(sa->addr.short_addr);
83 105
84 err = 0; 106 err = mac802154_wpan_update_llsec(dev);
85 break; 107 break;
86 } 108 }
87 109
@@ -99,7 +121,7 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
99 /* FIXME: validate addr */ 121 /* FIXME: validate addr */
100 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 122 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
101 mac802154_dev_set_ieee_addr(dev); 123 mac802154_dev_set_ieee_addr(dev);
102 return 0; 124 return mac802154_wpan_update_llsec(dev);
103} 125}
104 126
105int mac802154_set_mac_params(struct net_device *dev, 127int mac802154_set_mac_params(struct net_device *dev,
@@ -124,7 +146,7 @@ void mac802154_get_mac_params(struct net_device *dev,
124 mutex_unlock(&priv->hw->slaves_mtx); 146 mutex_unlock(&priv->hw->slaves_mtx);
125} 147}
126 148
127int mac802154_wpan_open(struct net_device *dev) 149static int mac802154_wpan_open(struct net_device *dev)
128{ 150{
129 int rc; 151 int rc;
130 struct mac802154_sub_if_data *priv = netdev_priv(dev); 152 struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -183,6 +205,38 @@ out:
183 return rc; 205 return rc;
184} 206}
185 207
208static int mac802154_set_header_security(struct mac802154_sub_if_data *priv,
209 struct ieee802154_hdr *hdr,
210 const struct ieee802154_mac_cb *cb)
211{
212 struct ieee802154_llsec_params params;
213 u8 level;
214
215 mac802154_llsec_get_params(&priv->sec, &params);
216
217 if (!params.enabled && cb->secen_override && cb->secen)
218 return -EINVAL;
219 if (!params.enabled ||
220 (cb->secen_override && !cb->secen) ||
221 !params.out_level)
222 return 0;
223 if (cb->seclevel_override && !cb->seclevel)
224 return -EINVAL;
225
226 level = cb->seclevel_override ? cb->seclevel : params.out_level;
227
228 hdr->fc.security_enabled = 1;
229 hdr->sec.level = level;
230 hdr->sec.key_id_mode = params.out_key.mode;
231 if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
232 hdr->sec.short_src = params.out_key.short_source;
233 else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
234 hdr->sec.extended_src = params.out_key.extended_source;
235 hdr->sec.key_id = params.out_key.id;
236
237 return 0;
238}
239
186static int mac802154_header_create(struct sk_buff *skb, 240static int mac802154_header_create(struct sk_buff *skb,
187 struct net_device *dev, 241 struct net_device *dev,
188 unsigned short type, 242 unsigned short type,
@@ -192,15 +246,20 @@ static int mac802154_header_create(struct sk_buff *skb,
192{ 246{
193 struct ieee802154_hdr hdr; 247 struct ieee802154_hdr hdr;
194 struct mac802154_sub_if_data *priv = netdev_priv(dev); 248 struct mac802154_sub_if_data *priv = netdev_priv(dev);
249 struct ieee802154_mac_cb *cb = mac_cb(skb);
195 int hlen; 250 int hlen;
196 251
197 if (!daddr) 252 if (!daddr)
198 return -EINVAL; 253 return -EINVAL;
199 254
200 memset(&hdr.fc, 0, sizeof(hdr.fc)); 255 memset(&hdr.fc, 0, sizeof(hdr.fc));
201 hdr.fc.type = mac_cb_type(skb); 256 hdr.fc.type = cb->type;
202 hdr.fc.security_enabled = mac_cb_is_secen(skb); 257 hdr.fc.security_enabled = cb->secen;
203 hdr.fc.ack_request = mac_cb_is_ackreq(skb); 258 hdr.fc.ack_request = cb->ackreq;
259 hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
260
261 if (mac802154_set_header_security(priv, &hdr, cb) < 0)
262 return -EINVAL;
204 263
205 if (!saddr) { 264 if (!saddr) {
206 spin_lock_bh(&priv->mib_lock); 265 spin_lock_bh(&priv->mib_lock);
@@ -231,7 +290,7 @@ static int mac802154_header_create(struct sk_buff *skb,
231 skb_reset_mac_header(skb); 290 skb_reset_mac_header(skb);
232 skb->mac_len = hlen; 291 skb->mac_len = hlen;
233 292
234 if (hlen + len + 2 > dev->mtu) 293 if (len > ieee802154_max_payload(&hdr))
235 return -EMSGSIZE; 294 return -EMSGSIZE;
236 295
237 return hlen; 296 return hlen;
@@ -257,6 +316,7 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
257{ 316{
258 struct mac802154_sub_if_data *priv; 317 struct mac802154_sub_if_data *priv;
259 u8 chan, page; 318 u8 chan, page;
319 int rc;
260 320
261 priv = netdev_priv(dev); 321 priv = netdev_priv(dev);
262 322
@@ -272,6 +332,13 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
272 return NETDEV_TX_OK; 332 return NETDEV_TX_OK;
273 } 333 }
274 334
335 rc = mac802154_llsec_encrypt(&priv->sec, skb);
336 if (rc) {
337 pr_warn("encryption failed: %i\n", rc);
338 kfree_skb(skb);
339 return NETDEV_TX_OK;
340 }
341
275 skb->skb_iif = dev->ifindex; 342 skb->skb_iif = dev->ifindex;
276 dev->stats.tx_packets++; 343 dev->stats.tx_packets++;
277 dev->stats.tx_bytes += skb->len; 344 dev->stats.tx_bytes += skb->len;
@@ -292,6 +359,15 @@ static const struct net_device_ops mac802154_wpan_ops = {
292 .ndo_set_mac_address = mac802154_wpan_mac_addr, 359 .ndo_set_mac_address = mac802154_wpan_mac_addr,
293}; 360};
294 361
362static void mac802154_wpan_free(struct net_device *dev)
363{
364 struct mac802154_sub_if_data *priv = netdev_priv(dev);
365
366 mac802154_llsec_destroy(&priv->sec);
367
368 free_netdev(dev);
369}
370
295void mac802154_wpan_setup(struct net_device *dev) 371void mac802154_wpan_setup(struct net_device *dev)
296{ 372{
297 struct mac802154_sub_if_data *priv; 373 struct mac802154_sub_if_data *priv;
@@ -301,14 +377,14 @@ void mac802154_wpan_setup(struct net_device *dev)
301 377
302 dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN; 378 dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
303 dev->header_ops = &mac802154_header_ops; 379 dev->header_ops = &mac802154_header_ops;
304 dev->needed_tailroom = 2; /* FCS */ 380 dev->needed_tailroom = 2 + 16; /* FCS + MIC */
305 dev->mtu = IEEE802154_MTU; 381 dev->mtu = IEEE802154_MTU;
306 dev->tx_queue_len = 300; 382 dev->tx_queue_len = 300;
307 dev->type = ARPHRD_IEEE802154; 383 dev->type = ARPHRD_IEEE802154;
308 dev->flags = IFF_NOARP | IFF_BROADCAST; 384 dev->flags = IFF_NOARP | IFF_BROADCAST;
309 dev->watchdog_timeo = 0; 385 dev->watchdog_timeo = 0;
310 386
311 dev->destructor = free_netdev; 387 dev->destructor = mac802154_wpan_free;
312 dev->netdev_ops = &mac802154_wpan_ops; 388 dev->netdev_ops = &mac802154_wpan_ops;
313 dev->ml_priv = &mac802154_mlme_wpan; 389 dev->ml_priv = &mac802154_mlme_wpan;
314 390
@@ -319,6 +395,7 @@ void mac802154_wpan_setup(struct net_device *dev)
319 priv->page = 0; 395 priv->page = 0;
320 396
321 spin_lock_init(&priv->mib_lock); 397 spin_lock_init(&priv->mib_lock);
398 mutex_init(&priv->sec_mtx);
322 399
323 get_random_bytes(&priv->bsn, 1); 400 get_random_bytes(&priv->bsn, 1);
324 get_random_bytes(&priv->dsn, 1); 401 get_random_bytes(&priv->dsn, 1);
@@ -331,6 +408,8 @@ void mac802154_wpan_setup(struct net_device *dev)
331 408
332 priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); 409 priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
333 priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); 410 priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
411
412 mac802154_llsec_init(&priv->sec);
334} 413}
335 414
336static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) 415static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
@@ -339,9 +418,11 @@ static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
339} 418}
340 419
341static int 420static int
342mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb) 421mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
422 const struct ieee802154_hdr *hdr)
343{ 423{
344 __le16 span, sshort; 424 __le16 span, sshort;
425 int rc;
345 426
346 pr_debug("getting packet via slave interface %s\n", sdata->dev->name); 427 pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
347 428
@@ -388,15 +469,22 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
388 469
389 skb->dev = sdata->dev; 470 skb->dev = sdata->dev;
390 471
472 rc = mac802154_llsec_decrypt(&sdata->sec, skb);
473 if (rc) {
474 pr_debug("decryption failed: %i\n", rc);
475 kfree_skb(skb);
476 return NET_RX_DROP;
477 }
478
391 sdata->dev->stats.rx_packets++; 479 sdata->dev->stats.rx_packets++;
392 sdata->dev->stats.rx_bytes += skb->len; 480 sdata->dev->stats.rx_bytes += skb->len;
393 481
394 switch (mac_cb_type(skb)) { 482 switch (mac_cb(skb)->type) {
395 case IEEE802154_FC_TYPE_DATA: 483 case IEEE802154_FC_TYPE_DATA:
396 return mac802154_process_data(sdata->dev, skb); 484 return mac802154_process_data(sdata->dev, skb);
397 default: 485 default:
398 pr_warn("ieee802154: bad frame received (type = %d)\n", 486 pr_warn("ieee802154: bad frame received (type = %d)\n",
399 mac_cb_type(skb)); 487 mac_cb(skb)->type);
400 kfree_skb(skb); 488 kfree_skb(skb);
401 return NET_RX_DROP; 489 return NET_RX_DROP;
402 } 490 }
@@ -419,62 +507,58 @@ static void mac802154_print_addr(const char *name,
419 } 507 }
420} 508}
421 509
422static int mac802154_parse_frame_start(struct sk_buff *skb) 510static int mac802154_parse_frame_start(struct sk_buff *skb,
511 struct ieee802154_hdr *hdr)
423{ 512{
424 int hlen; 513 int hlen;
425 struct ieee802154_hdr hdr; 514 struct ieee802154_mac_cb *cb = mac_cb_init(skb);
426 515
427 hlen = ieee802154_hdr_pull(skb, &hdr); 516 hlen = ieee802154_hdr_pull(skb, hdr);
428 if (hlen < 0) 517 if (hlen < 0)
429 return -EINVAL; 518 return -EINVAL;
430 519
431 skb->mac_len = hlen; 520 skb->mac_len = hlen;
432 521
433 pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr.fc), 522 pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
434 hdr.seq); 523 hdr->seq);
435
436 mac_cb(skb)->flags = hdr.fc.type;
437 524
438 if (hdr.fc.ack_request) 525 cb->type = hdr->fc.type;
439 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; 526 cb->ackreq = hdr->fc.ack_request;
440 if (hdr.fc.security_enabled) 527 cb->secen = hdr->fc.security_enabled;
441 mac_cb(skb)->flags |= MAC_CB_FLAG_SECEN;
442 528
443 mac802154_print_addr("destination", &hdr.dest); 529 mac802154_print_addr("destination", &hdr->dest);
444 mac802154_print_addr("source", &hdr.source); 530 mac802154_print_addr("source", &hdr->source);
445 531
446 mac_cb(skb)->source = hdr.source; 532 cb->source = hdr->source;
447 mac_cb(skb)->dest = hdr.dest; 533 cb->dest = hdr->dest;
448 534
449 if (hdr.fc.security_enabled) { 535 if (hdr->fc.security_enabled) {
450 u64 key; 536 u64 key;
451 537
452 pr_debug("seclevel %i\n", hdr.sec.level); 538 pr_debug("seclevel %i\n", hdr->sec.level);
453 539
454 switch (hdr.sec.key_id_mode) { 540 switch (hdr->sec.key_id_mode) {
455 case IEEE802154_SCF_KEY_IMPLICIT: 541 case IEEE802154_SCF_KEY_IMPLICIT:
456 pr_debug("implicit key\n"); 542 pr_debug("implicit key\n");
457 break; 543 break;
458 544
459 case IEEE802154_SCF_KEY_INDEX: 545 case IEEE802154_SCF_KEY_INDEX:
460 pr_debug("key %02x\n", hdr.sec.key_id); 546 pr_debug("key %02x\n", hdr->sec.key_id);
461 break; 547 break;
462 548
463 case IEEE802154_SCF_KEY_SHORT_INDEX: 549 case IEEE802154_SCF_KEY_SHORT_INDEX:
464 pr_debug("key %04x:%04x %02x\n", 550 pr_debug("key %04x:%04x %02x\n",
465 le32_to_cpu(hdr.sec.short_src) >> 16, 551 le32_to_cpu(hdr->sec.short_src) >> 16,
466 le32_to_cpu(hdr.sec.short_src) & 0xffff, 552 le32_to_cpu(hdr->sec.short_src) & 0xffff,
467 hdr.sec.key_id); 553 hdr->sec.key_id);
468 break; 554 break;
469 555
470 case IEEE802154_SCF_KEY_HW_INDEX: 556 case IEEE802154_SCF_KEY_HW_INDEX:
471 key = swab64((__force u64) hdr.sec.extended_src); 557 key = swab64((__force u64) hdr->sec.extended_src);
472 pr_debug("key source %8phC %02x\n", &key, 558 pr_debug("key source %8phC %02x\n", &key,
473 hdr.sec.key_id); 559 hdr->sec.key_id);
474 break; 560 break;
475 } 561 }
476
477 return -EINVAL;
478 } 562 }
479 563
480 return 0; 564 return 0;
@@ -483,10 +567,10 @@ static int mac802154_parse_frame_start(struct sk_buff *skb)
483void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb) 567void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
484{ 568{
485 int ret; 569 int ret;
486 struct sk_buff *sskb;
487 struct mac802154_sub_if_data *sdata; 570 struct mac802154_sub_if_data *sdata;
571 struct ieee802154_hdr hdr;
488 572
489 ret = mac802154_parse_frame_start(skb); 573 ret = mac802154_parse_frame_start(skb, &hdr);
490 if (ret) { 574 if (ret) {
491 pr_debug("got invalid frame\n"); 575 pr_debug("got invalid frame\n");
492 return; 576 return;
@@ -494,12 +578,16 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
494 578
495 rcu_read_lock(); 579 rcu_read_lock();
496 list_for_each_entry_rcu(sdata, &priv->slaves, list) { 580 list_for_each_entry_rcu(sdata, &priv->slaves, list) {
497 if (sdata->type != IEEE802154_DEV_WPAN) 581 if (sdata->type != IEEE802154_DEV_WPAN ||
582 !netif_running(sdata->dev))
498 continue; 583 continue;
499 584
500 sskb = skb_clone(skb, GFP_ATOMIC); 585 mac802154_subif_frame(sdata, skb, &hdr);
501 if (sskb) 586 skb = NULL;
502 mac802154_subif_frame(sdata, sskb); 587 break;
503 } 588 }
504 rcu_read_unlock(); 589 rcu_read_unlock();
590
591 if (skb)
592 kfree_skb(skb);
505} 593}
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 851cd880b0c0..6b38d083e1c9 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -33,6 +33,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
33 SKB_GSO_DODGY | 33 SKB_GSO_DODGY |
34 SKB_GSO_TCP_ECN | 34 SKB_GSO_TCP_ECN |
35 SKB_GSO_GRE | 35 SKB_GSO_GRE |
36 SKB_GSO_GRE_CSUM |
36 SKB_GSO_IPIP | 37 SKB_GSO_IPIP |
37 SKB_GSO_MPLS))) 38 SKB_GSO_MPLS)))
38 goto out; 39 goto out;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 117208321f16..ec8114fae50b 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -271,10 +271,7 @@ ip_set_free(void *members)
271{ 271{
272 pr_debug("%p: free with %s\n", members, 272 pr_debug("%p: free with %s\n", members,
273 is_vmalloc_addr(members) ? "vfree" : "kfree"); 273 is_vmalloc_addr(members) ? "vfree" : "kfree");
274 if (is_vmalloc_addr(members)) 274 kvfree(members);
275 vfree(members);
276 else
277 kfree(members);
278} 275}
279EXPORT_SYMBOL_GPL(ip_set_free); 276EXPORT_SYMBOL_GPL(ip_set_free);
280 277
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 3d2d2c8108ca..e6836755c45d 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -97,7 +97,7 @@ const char *ip_vs_proto_name(unsigned int proto)
97 return "ICMPv6"; 97 return "ICMPv6";
98#endif 98#endif
99 default: 99 default:
100 sprintf(buf, "IP_%d", proto); 100 sprintf(buf, "IP_%u", proto);
101 return buf; 101 return buf;
102 } 102 }
103} 103}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index c47444e4cf8c..73ba1cc7a88d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -562,7 +562,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
562 ip_send_check(iph); 562 ip_send_check(iph);
563 563
564 /* Another hack: avoid icmp_send in ip_fragment */ 564 /* Another hack: avoid icmp_send in ip_fragment */
565 skb->local_df = 1; 565 skb->ignore_df = 1;
566 566
567 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); 567 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
568 rcu_read_unlock(); 568 rcu_read_unlock();
@@ -590,7 +590,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
590 goto tx_error; 590 goto tx_error;
591 591
592 /* Another hack: avoid icmp_send in ip_fragment */ 592 /* Another hack: avoid icmp_send in ip_fragment */
593 skb->local_df = 1; 593 skb->ignore_df = 1;
594 594
595 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); 595 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
596 rcu_read_unlock(); 596 rcu_read_unlock();
@@ -684,7 +684,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
684 MTU problem. */ 684 MTU problem. */
685 685
686 /* Another hack: avoid icmp_send in ip_fragment */ 686 /* Another hack: avoid icmp_send in ip_fragment */
687 skb->local_df = 1; 687 skb->ignore_df = 1;
688 688
689 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); 689 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
690 rcu_read_unlock(); 690 rcu_read_unlock();
@@ -774,7 +774,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
774 MTU problem. */ 774 MTU problem. */
775 775
776 /* Another hack: avoid icmp_send in ip_fragment */ 776 /* Another hack: avoid icmp_send in ip_fragment */
777 skb->local_df = 1; 777 skb->ignore_df = 1;
778 778
779 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); 779 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
780 rcu_read_unlock(); 780 rcu_read_unlock();
@@ -883,10 +883,10 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
883 iph->daddr = cp->daddr.ip; 883 iph->daddr = cp->daddr.ip;
884 iph->saddr = saddr; 884 iph->saddr = saddr;
885 iph->ttl = old_iph->ttl; 885 iph->ttl = old_iph->ttl;
886 ip_select_ident(skb, &rt->dst, NULL); 886 ip_select_ident(skb, NULL);
887 887
888 /* Another hack: avoid icmp_send in ip_fragment */ 888 /* Another hack: avoid icmp_send in ip_fragment */
889 skb->local_df = 1; 889 skb->ignore_df = 1;
890 890
891 ret = ip_vs_tunnel_xmit_prepare(skb, cp); 891 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
892 if (ret == NF_ACCEPT) 892 if (ret == NF_ACCEPT)
@@ -974,7 +974,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
974 iph->hop_limit = old_iph->hop_limit; 974 iph->hop_limit = old_iph->hop_limit;
975 975
976 /* Another hack: avoid icmp_send in ip_fragment */ 976 /* Another hack: avoid icmp_send in ip_fragment */
977 skb->local_df = 1; 977 skb->ignore_df = 1;
978 978
979 ret = ip_vs_tunnel_xmit_prepare(skb, cp); 979 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
980 if (ret == NF_ACCEPT) 980 if (ret == NF_ACCEPT)
@@ -1023,7 +1023,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1023 ip_send_check(ip_hdr(skb)); 1023 ip_send_check(ip_hdr(skb));
1024 1024
1025 /* Another hack: avoid icmp_send in ip_fragment */ 1025 /* Another hack: avoid icmp_send in ip_fragment */
1026 skb->local_df = 1; 1026 skb->ignore_df = 1;
1027 1027
1028 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); 1028 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
1029 rcu_read_unlock(); 1029 rcu_read_unlock();
@@ -1060,7 +1060,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1060 } 1060 }
1061 1061
1062 /* Another hack: avoid icmp_send in ip_fragment */ 1062 /* Another hack: avoid icmp_send in ip_fragment */
1063 skb->local_df = 1; 1063 skb->ignore_df = 1;
1064 1064
1065 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); 1065 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
1066 rcu_read_unlock(); 1066 rcu_read_unlock();
@@ -1157,7 +1157,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1157 ip_vs_nat_icmp(skb, pp, cp, 0); 1157 ip_vs_nat_icmp(skb, pp, cp, 0);
1158 1158
1159 /* Another hack: avoid icmp_send in ip_fragment */ 1159 /* Another hack: avoid icmp_send in ip_fragment */
1160 skb->local_df = 1; 1160 skb->ignore_df = 1;
1161 1161
1162 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); 1162 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
1163 rcu_read_unlock(); 1163 rcu_read_unlock();
@@ -1249,7 +1249,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1249 ip_vs_nat_icmp_v6(skb, pp, cp, 0); 1249 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1250 1250
1251 /* Another hack: avoid icmp_send in ip_fragment */ 1251 /* Another hack: avoid icmp_send in ip_fragment */
1252 skb->local_df = 1; 1252 skb->ignore_df = 1;
1253 1253
1254 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); 1254 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
1255 rcu_read_unlock(); 1255 rcu_read_unlock();
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 52ca952b802c..09096a670c45 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -358,6 +358,19 @@ out:
358 rcu_read_unlock(); 358 rcu_read_unlock();
359} 359}
360 360
361struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
362{
363 struct nf_conn_nat *nat = nfct_nat(ct);
364 if (nat)
365 return nat;
366
367 if (!nf_ct_is_confirmed(ct))
368 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
369
370 return nat;
371}
372EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
373
361unsigned int 374unsigned int
362nf_nat_setup_info(struct nf_conn *ct, 375nf_nat_setup_info(struct nf_conn *ct,
363 const struct nf_nat_range *range, 376 const struct nf_nat_range *range,
@@ -368,14 +381,9 @@ nf_nat_setup_info(struct nf_conn *ct,
368 struct nf_conn_nat *nat; 381 struct nf_conn_nat *nat;
369 382
370 /* nat helper or nfctnetlink also setup binding */ 383 /* nat helper or nfctnetlink also setup binding */
371 nat = nfct_nat(ct); 384 nat = nf_ct_nat_ext_add(ct);
372 if (!nat) { 385 if (nat == NULL)
373 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 386 return NF_ACCEPT;
374 if (nat == NULL) {
375 pr_debug("failed to add NAT extension\n");
376 return NF_ACCEPT;
377 }
378 }
379 387
380 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 388 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
381 maniptype == NF_NAT_MANIP_DST); 389 maniptype == NF_NAT_MANIP_DST);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 3fd159db9f06..624e083125b9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -88,6 +88,45 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
88 return ERR_PTR(-EAFNOSUPPORT); 88 return ERR_PTR(-EAFNOSUPPORT);
89} 89}
90 90
91static void nft_ctx_init(struct nft_ctx *ctx,
92 const struct sk_buff *skb,
93 const struct nlmsghdr *nlh,
94 struct nft_af_info *afi,
95 struct nft_table *table,
96 struct nft_chain *chain,
97 const struct nlattr * const *nla)
98{
99 ctx->net = sock_net(skb->sk);
100 ctx->afi = afi;
101 ctx->table = table;
102 ctx->chain = chain;
103 ctx->nla = nla;
104 ctx->portid = NETLINK_CB(skb).portid;
105 ctx->report = nlmsg_report(nlh);
106 ctx->seq = nlh->nlmsg_seq;
107}
108
109static struct nft_trans *nft_trans_alloc(struct nft_ctx *ctx, int msg_type,
110 u32 size)
111{
112 struct nft_trans *trans;
113
114 trans = kzalloc(sizeof(struct nft_trans) + size, GFP_KERNEL);
115 if (trans == NULL)
116 return NULL;
117
118 trans->msg_type = msg_type;
119 trans->ctx = *ctx;
120
121 return trans;
122}
123
124static void nft_trans_destroy(struct nft_trans *trans)
125{
126 list_del(&trans->list);
127 kfree(trans);
128}
129
91/* 130/*
92 * Tables 131 * Tables
93 */ 132 */
@@ -197,20 +236,13 @@ nla_put_failure:
197 return -1; 236 return -1;
198} 237}
199 238
200static int nf_tables_table_notify(const struct sk_buff *oskb, 239static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
201 const struct nlmsghdr *nlh,
202 const struct nft_table *table,
203 int event, int family)
204{ 240{
205 struct sk_buff *skb; 241 struct sk_buff *skb;
206 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
207 u32 seq = nlh ? nlh->nlmsg_seq : 0;
208 struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
209 bool report;
210 int err; 242 int err;
211 243
212 report = nlh ? nlmsg_report(nlh) : false; 244 if (!ctx->report &&
213 if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 245 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
214 return 0; 246 return 0;
215 247
216 err = -ENOBUFS; 248 err = -ENOBUFS;
@@ -218,18 +250,20 @@ static int nf_tables_table_notify(const struct sk_buff *oskb,
218 if (skb == NULL) 250 if (skb == NULL)
219 goto err; 251 goto err;
220 252
221 err = nf_tables_fill_table_info(skb, portid, seq, event, 0, 253 err = nf_tables_fill_table_info(skb, ctx->portid, ctx->seq, event, 0,
222 family, table); 254 ctx->afi->family, ctx->table);
223 if (err < 0) { 255 if (err < 0) {
224 kfree_skb(skb); 256 kfree_skb(skb);
225 goto err; 257 goto err;
226 } 258 }
227 259
228 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, 260 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
229 GFP_KERNEL); 261 ctx->report, GFP_KERNEL);
230err: 262err:
231 if (err < 0) 263 if (err < 0) {
232 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 264 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
265 err);
266 }
233 return err; 267 return err;
234} 268}
235 269
@@ -269,6 +303,9 @@ done:
269 return skb->len; 303 return skb->len;
270} 304}
271 305
306/* Internal table flags */
307#define NFT_TABLE_INACTIVE (1 << 15)
308
272static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, 309static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
273 const struct nlmsghdr *nlh, 310 const struct nlmsghdr *nlh,
274 const struct nlattr * const nla[]) 311 const struct nlattr * const nla[])
@@ -295,6 +332,8 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
295 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); 332 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
296 if (IS_ERR(table)) 333 if (IS_ERR(table))
297 return PTR_ERR(table); 334 return PTR_ERR(table);
335 if (table->flags & NFT_TABLE_INACTIVE)
336 return -ENOENT;
298 337
299 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 338 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
300 if (!skb2) 339 if (!skb2)
@@ -343,7 +382,7 @@ err:
343 return err; 382 return err;
344} 383}
345 384
346static int nf_tables_table_disable(const struct nft_af_info *afi, 385static void nf_tables_table_disable(const struct nft_af_info *afi,
347 struct nft_table *table) 386 struct nft_table *table)
348{ 387{
349 struct nft_chain *chain; 388 struct nft_chain *chain;
@@ -353,45 +392,63 @@ static int nf_tables_table_disable(const struct nft_af_info *afi,
353 nf_unregister_hooks(nft_base_chain(chain)->ops, 392 nf_unregister_hooks(nft_base_chain(chain)->ops,
354 afi->nops); 393 afi->nops);
355 } 394 }
356
357 return 0;
358} 395}
359 396
360static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb, 397static int nf_tables_updtable(struct nft_ctx *ctx)
361 const struct nlmsghdr *nlh,
362 const struct nlattr * const nla[],
363 struct nft_af_info *afi, struct nft_table *table)
364{ 398{
365 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 399 struct nft_trans *trans;
366 int family = nfmsg->nfgen_family, ret = 0; 400 u32 flags;
401 int ret = 0;
367 402
368 if (nla[NFTA_TABLE_FLAGS]) { 403 if (!ctx->nla[NFTA_TABLE_FLAGS])
369 u32 flags; 404 return 0;
370 405
371 flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS])); 406 flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS]));
372 if (flags & ~NFT_TABLE_F_DORMANT) 407 if (flags & ~NFT_TABLE_F_DORMANT)
373 return -EINVAL; 408 return -EINVAL;
409
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table));
412 if (trans == NULL)
413 return -ENOMEM;
374 414
375 if ((flags & NFT_TABLE_F_DORMANT) && 415 if ((flags & NFT_TABLE_F_DORMANT) &&
376 !(table->flags & NFT_TABLE_F_DORMANT)) { 416 !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
377 ret = nf_tables_table_disable(afi, table); 417 nft_trans_table_enable(trans) = false;
378 if (ret >= 0) 418 } else if (!(flags & NFT_TABLE_F_DORMANT) &&
379 table->flags |= NFT_TABLE_F_DORMANT; 419 ctx->table->flags & NFT_TABLE_F_DORMANT) {
380 } else if (!(flags & NFT_TABLE_F_DORMANT) && 420 ret = nf_tables_table_enable(ctx->afi, ctx->table);
381 table->flags & NFT_TABLE_F_DORMANT) { 421 if (ret >= 0) {
382 ret = nf_tables_table_enable(afi, table); 422 ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
383 if (ret >= 0) 423 nft_trans_table_enable(trans) = true;
384 table->flags &= ~NFT_TABLE_F_DORMANT;
385 } 424 }
386 if (ret < 0)
387 goto err;
388 } 425 }
426 if (ret < 0)
427 goto err;
389 428
390 nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family); 429 nft_trans_table_update(trans) = true;
430 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
431 return 0;
391err: 432err:
433 nft_trans_destroy(trans);
392 return ret; 434 return ret;
393} 435}
394 436
437static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
438{
439 struct nft_trans *trans;
440
441 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
442 if (trans == NULL)
443 return -ENOMEM;
444
445 if (msg_type == NFT_MSG_NEWTABLE)
446 ctx->table->flags |= NFT_TABLE_INACTIVE;
447
448 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
449 return 0;
450}
451
395static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, 452static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
396 const struct nlmsghdr *nlh, 453 const struct nlmsghdr *nlh,
397 const struct nlattr * const nla[]) 454 const struct nlattr * const nla[])
@@ -403,6 +460,8 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
403 struct net *net = sock_net(skb->sk); 460 struct net *net = sock_net(skb->sk);
404 int family = nfmsg->nfgen_family; 461 int family = nfmsg->nfgen_family;
405 u32 flags = 0; 462 u32 flags = 0;
463 struct nft_ctx ctx;
464 int err;
406 465
407 afi = nf_tables_afinfo_lookup(net, family, true); 466 afi = nf_tables_afinfo_lookup(net, family, true);
408 if (IS_ERR(afi)) 467 if (IS_ERR(afi))
@@ -417,11 +476,15 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
417 } 476 }
418 477
419 if (table != NULL) { 478 if (table != NULL) {
479 if (table->flags & NFT_TABLE_INACTIVE)
480 return -ENOENT;
420 if (nlh->nlmsg_flags & NLM_F_EXCL) 481 if (nlh->nlmsg_flags & NLM_F_EXCL)
421 return -EEXIST; 482 return -EEXIST;
422 if (nlh->nlmsg_flags & NLM_F_REPLACE) 483 if (nlh->nlmsg_flags & NLM_F_REPLACE)
423 return -EOPNOTSUPP; 484 return -EOPNOTSUPP;
424 return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table); 485
486 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
487 return nf_tables_updtable(&ctx);
425 } 488 }
426 489
427 if (nla[NFTA_TABLE_FLAGS]) { 490 if (nla[NFTA_TABLE_FLAGS]) {
@@ -444,8 +507,14 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
444 INIT_LIST_HEAD(&table->sets); 507 INIT_LIST_HEAD(&table->sets);
445 table->flags = flags; 508 table->flags = flags;
446 509
510 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
511 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
512 if (err < 0) {
513 kfree(table);
514 module_put(afi->owner);
515 return err;
516 }
447 list_add_tail(&table->list, &afi->tables); 517 list_add_tail(&table->list, &afi->tables);
448 nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
449 return 0; 518 return 0;
450} 519}
451 520
@@ -457,7 +526,8 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
457 struct nft_af_info *afi; 526 struct nft_af_info *afi;
458 struct nft_table *table; 527 struct nft_table *table;
459 struct net *net = sock_net(skb->sk); 528 struct net *net = sock_net(skb->sk);
460 int family = nfmsg->nfgen_family; 529 int family = nfmsg->nfgen_family, err;
530 struct nft_ctx ctx;
461 531
462 afi = nf_tables_afinfo_lookup(net, family, false); 532 afi = nf_tables_afinfo_lookup(net, family, false);
463 if (IS_ERR(afi)) 533 if (IS_ERR(afi))
@@ -466,17 +536,28 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
466 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); 536 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
467 if (IS_ERR(table)) 537 if (IS_ERR(table))
468 return PTR_ERR(table); 538 return PTR_ERR(table);
469 539 if (table->flags & NFT_TABLE_INACTIVE)
470 if (!list_empty(&table->chains) || !list_empty(&table->sets)) 540 return -ENOENT;
541 if (table->use > 0)
471 return -EBUSY; 542 return -EBUSY;
472 543
544 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
545 err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE);
546 if (err < 0)
547 return err;
548
473 list_del(&table->list); 549 list_del(&table->list);
474 nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
475 kfree(table);
476 module_put(afi->owner);
477 return 0; 550 return 0;
478} 551}
479 552
553static void nf_tables_table_destroy(struct nft_ctx *ctx)
554{
555 BUG_ON(ctx->table->use > 0);
556
557 kfree(ctx->table);
558 module_put(ctx->afi->owner);
559}
560
480int nft_register_chain_type(const struct nf_chain_type *ctype) 561int nft_register_chain_type(const struct nf_chain_type *ctype)
481{ 562{
482 int err = 0; 563 int err = 0;
@@ -541,7 +622,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
541 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 622 .len = NFT_CHAIN_MAXNAMELEN - 1 },
542 [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED }, 623 [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED },
543 [NFTA_CHAIN_POLICY] = { .type = NLA_U32 }, 624 [NFTA_CHAIN_POLICY] = { .type = NLA_U32 },
544 [NFTA_CHAIN_TYPE] = { .type = NLA_NUL_STRING }, 625 [NFTA_CHAIN_TYPE] = { .type = NLA_STRING },
545 [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, 626 [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
546}; 627};
547 628
@@ -637,21 +718,13 @@ nla_put_failure:
637 return -1; 718 return -1;
638} 719}
639 720
640static int nf_tables_chain_notify(const struct sk_buff *oskb, 721static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
641 const struct nlmsghdr *nlh,
642 const struct nft_table *table,
643 const struct nft_chain *chain,
644 int event, int family)
645{ 722{
646 struct sk_buff *skb; 723 struct sk_buff *skb;
647 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
648 struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
649 u32 seq = nlh ? nlh->nlmsg_seq : 0;
650 bool report;
651 int err; 724 int err;
652 725
653 report = nlh ? nlmsg_report(nlh) : false; 726 if (!ctx->report &&
654 if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 727 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
655 return 0; 728 return 0;
656 729
657 err = -ENOBUFS; 730 err = -ENOBUFS;
@@ -659,18 +732,21 @@ static int nf_tables_chain_notify(const struct sk_buff *oskb,
659 if (skb == NULL) 732 if (skb == NULL)
660 goto err; 733 goto err;
661 734
662 err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family, 735 err = nf_tables_fill_chain_info(skb, ctx->portid, ctx->seq, event, 0,
663 table, chain); 736 ctx->afi->family, ctx->table,
737 ctx->chain);
664 if (err < 0) { 738 if (err < 0) {
665 kfree_skb(skb); 739 kfree_skb(skb);
666 goto err; 740 goto err;
667 } 741 }
668 742
669 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, 743 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
670 GFP_KERNEL); 744 ctx->report, GFP_KERNEL);
671err: 745err:
672 if (err < 0) 746 if (err < 0) {
673 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 747 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
748 err);
749 }
674 return err; 750 return err;
675} 751}
676 752
@@ -740,10 +816,14 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
740 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 816 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
741 if (IS_ERR(table)) 817 if (IS_ERR(table))
742 return PTR_ERR(table); 818 return PTR_ERR(table);
819 if (table->flags & NFT_TABLE_INACTIVE)
820 return -ENOENT;
743 821
744 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); 822 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
745 if (IS_ERR(chain)) 823 if (IS_ERR(chain))
746 return PTR_ERR(chain); 824 return PTR_ERR(chain);
825 if (chain->flags & NFT_CHAIN_INACTIVE)
826 return -ENOENT;
747 827
748 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 828 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
749 if (!skb2) 829 if (!skb2)
@@ -767,8 +847,7 @@ static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
767 [NFTA_COUNTER_BYTES] = { .type = NLA_U64 }, 847 [NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
768}; 848};
769 849
770static int 850static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
771nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
772{ 851{
773 struct nlattr *tb[NFTA_COUNTER_MAX+1]; 852 struct nlattr *tb[NFTA_COUNTER_MAX+1];
774 struct nft_stats __percpu *newstats; 853 struct nft_stats __percpu *newstats;
@@ -777,14 +856,14 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
777 856
778 err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy); 857 err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
779 if (err < 0) 858 if (err < 0)
780 return err; 859 return ERR_PTR(err);
781 860
782 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
783 return -EINVAL; 862 return ERR_PTR(-EINVAL);
784 863
785 newstats = alloc_percpu(struct nft_stats); 864 newstats = alloc_percpu(struct nft_stats);
786 if (newstats == NULL) 865 if (newstats == NULL)
787 return -ENOMEM; 866 return ERR_PTR(-ENOMEM);
788 867
789 /* Restore old counters on this cpu, no problem. Per-cpu statistics 868 /* Restore old counters on this cpu, no problem. Per-cpu statistics
790 * are not exposed to userspace. 869 * are not exposed to userspace.
@@ -793,6 +872,12 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
793 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); 872 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
794 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); 873 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
795 874
875 return newstats;
876}
877
878static void nft_chain_stats_replace(struct nft_base_chain *chain,
879 struct nft_stats __percpu *newstats)
880{
796 if (chain->stats) { 881 if (chain->stats) {
797 struct nft_stats __percpu *oldstats = 882 struct nft_stats __percpu *oldstats =
798 nft_dereference(chain->stats); 883 nft_dereference(chain->stats);
@@ -802,17 +887,43 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
802 free_percpu(oldstats); 887 free_percpu(oldstats);
803 } else 888 } else
804 rcu_assign_pointer(chain->stats, newstats); 889 rcu_assign_pointer(chain->stats, newstats);
890}
891
892static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
893{
894 struct nft_trans *trans;
805 895
896 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
897 if (trans == NULL)
898 return -ENOMEM;
899
900 if (msg_type == NFT_MSG_NEWCHAIN)
901 ctx->chain->flags |= NFT_CHAIN_INACTIVE;
902
903 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
806 return 0; 904 return 0;
807} 905}
808 906
907static void nf_tables_chain_destroy(struct nft_chain *chain)
908{
909 BUG_ON(chain->use > 0);
910
911 if (chain->flags & NFT_BASE_CHAIN) {
912 module_put(nft_base_chain(chain)->type->owner);
913 free_percpu(nft_base_chain(chain)->stats);
914 kfree(nft_base_chain(chain));
915 } else {
916 kfree(chain);
917 }
918}
919
809static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, 920static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
810 const struct nlmsghdr *nlh, 921 const struct nlmsghdr *nlh,
811 const struct nlattr * const nla[]) 922 const struct nlattr * const nla[])
812{ 923{
813 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 924 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
814 const struct nlattr * uninitialized_var(name); 925 const struct nlattr * uninitialized_var(name);
815 const struct nft_af_info *afi; 926 struct nft_af_info *afi;
816 struct nft_table *table; 927 struct nft_table *table;
817 struct nft_chain *chain; 928 struct nft_chain *chain;
818 struct nft_base_chain *basechain = NULL; 929 struct nft_base_chain *basechain = NULL;
@@ -822,8 +933,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
822 u8 policy = NF_ACCEPT; 933 u8 policy = NF_ACCEPT;
823 u64 handle = 0; 934 u64 handle = 0;
824 unsigned int i; 935 unsigned int i;
936 struct nft_stats __percpu *stats;
825 int err; 937 int err;
826 bool create; 938 bool create;
939 struct nft_ctx ctx;
827 940
828 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 941 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
829 942
@@ -869,6 +982,11 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
869 } 982 }
870 983
871 if (chain != NULL) { 984 if (chain != NULL) {
985 struct nft_stats *stats = NULL;
986 struct nft_trans *trans;
987
988 if (chain->flags & NFT_CHAIN_INACTIVE)
989 return -ENOENT;
872 if (nlh->nlmsg_flags & NLM_F_EXCL) 990 if (nlh->nlmsg_flags & NLM_F_EXCL)
873 return -EEXIST; 991 return -EEXIST;
874 if (nlh->nlmsg_flags & NLM_F_REPLACE) 992 if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -882,19 +1000,31 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
882 if (!(chain->flags & NFT_BASE_CHAIN)) 1000 if (!(chain->flags & NFT_BASE_CHAIN))
883 return -EOPNOTSUPP; 1001 return -EOPNOTSUPP;
884 1002
885 err = nf_tables_counters(nft_base_chain(chain), 1003 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
886 nla[NFTA_CHAIN_COUNTERS]); 1004 if (IS_ERR(stats))
887 if (err < 0) 1005 return PTR_ERR(stats);
888 return err;
889 } 1006 }
890 1007
891 if (nla[NFTA_CHAIN_POLICY]) 1008 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
892 nft_base_chain(chain)->policy = policy; 1009 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1010 sizeof(struct nft_trans_chain));
1011 if (trans == NULL)
1012 return -ENOMEM;
1013
1014 nft_trans_chain_stats(trans) = stats;
1015 nft_trans_chain_update(trans) = true;
893 1016
894 if (nla[NFTA_CHAIN_HANDLE] && name) 1017 if (nla[NFTA_CHAIN_POLICY])
895 nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN); 1018 nft_trans_chain_policy(trans) = policy;
1019 else
1020 nft_trans_chain_policy(trans) = -1;
896 1021
897 goto notify; 1022 if (nla[NFTA_CHAIN_HANDLE] && name) {
1023 nla_strlcpy(nft_trans_chain_name(trans), name,
1024 NFT_CHAIN_MAXNAMELEN);
1025 }
1026 list_add_tail(&trans->list, &net->nft.commit_list);
1027 return 0;
898 } 1028 }
899 1029
900 if (table->use == UINT_MAX) 1030 if (table->use == UINT_MAX)
@@ -939,23 +1069,21 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
939 return -ENOMEM; 1069 return -ENOMEM;
940 1070
941 if (nla[NFTA_CHAIN_COUNTERS]) { 1071 if (nla[NFTA_CHAIN_COUNTERS]) {
942 err = nf_tables_counters(basechain, 1072 stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
943 nla[NFTA_CHAIN_COUNTERS]); 1073 if (IS_ERR(stats)) {
944 if (err < 0) {
945 module_put(type->owner); 1074 module_put(type->owner);
946 kfree(basechain); 1075 kfree(basechain);
947 return err; 1076 return PTR_ERR(stats);
948 } 1077 }
1078 basechain->stats = stats;
949 } else { 1079 } else {
950 struct nft_stats __percpu *newstats; 1080 stats = alloc_percpu(struct nft_stats);
951 1081 if (IS_ERR(stats)) {
952 newstats = alloc_percpu(struct nft_stats);
953 if (newstats == NULL) {
954 module_put(type->owner); 1082 module_put(type->owner);
955 kfree(basechain); 1083 kfree(basechain);
956 return -ENOMEM; 1084 return PTR_ERR(stats);
957 } 1085 }
958 rcu_assign_pointer(basechain->stats, newstats); 1086 rcu_assign_pointer(basechain->stats, stats);
959 } 1087 }
960 1088
961 basechain->type = type; 1089 basechain->type = type;
@@ -992,31 +1120,27 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
992 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1120 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
993 chain->flags & NFT_BASE_CHAIN) { 1121 chain->flags & NFT_BASE_CHAIN) {
994 err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops); 1122 err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
995 if (err < 0) { 1123 if (err < 0)
996 module_put(basechain->type->owner); 1124 goto err1;
997 free_percpu(basechain->stats);
998 kfree(basechain);
999 return err;
1000 }
1001 } 1125 }
1002 list_add_tail(&chain->list, &table->chains);
1003 table->use++;
1004notify:
1005 nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
1006 family);
1007 return 0;
1008}
1009 1126
1010static void nf_tables_chain_destroy(struct nft_chain *chain) 1127 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1011{ 1128 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
1012 BUG_ON(chain->use > 0); 1129 if (err < 0)
1130 goto err2;
1013 1131
1014 if (chain->flags & NFT_BASE_CHAIN) { 1132 table->use++;
1015 module_put(nft_base_chain(chain)->type->owner); 1133 list_add_tail(&chain->list, &table->chains);
1016 free_percpu(nft_base_chain(chain)->stats); 1134 return 0;
1017 kfree(nft_base_chain(chain)); 1135err2:
1018 } else 1136 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
1019 kfree(chain); 1137 chain->flags & NFT_BASE_CHAIN) {
1138 nf_unregister_hooks(nft_base_chain(chain)->ops,
1139 afi->nops);
1140 }
1141err1:
1142 nf_tables_chain_destroy(chain);
1143 return err;
1020} 1144}
1021 1145
1022static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, 1146static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
@@ -1024,11 +1148,13 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1024 const struct nlattr * const nla[]) 1148 const struct nlattr * const nla[])
1025{ 1149{
1026 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1150 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1027 const struct nft_af_info *afi; 1151 struct nft_af_info *afi;
1028 struct nft_table *table; 1152 struct nft_table *table;
1029 struct nft_chain *chain; 1153 struct nft_chain *chain;
1030 struct net *net = sock_net(skb->sk); 1154 struct net *net = sock_net(skb->sk);
1031 int family = nfmsg->nfgen_family; 1155 int family = nfmsg->nfgen_family;
1156 struct nft_ctx ctx;
1157 int err;
1032 1158
1033 afi = nf_tables_afinfo_lookup(net, family, false); 1159 afi = nf_tables_afinfo_lookup(net, family, false);
1034 if (IS_ERR(afi)) 1160 if (IS_ERR(afi))
@@ -1037,48 +1163,27 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1037 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 1163 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
1038 if (IS_ERR(table)) 1164 if (IS_ERR(table))
1039 return PTR_ERR(table); 1165 return PTR_ERR(table);
1166 if (table->flags & NFT_TABLE_INACTIVE)
1167 return -ENOENT;
1040 1168
1041 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); 1169 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
1042 if (IS_ERR(chain)) 1170 if (IS_ERR(chain))
1043 return PTR_ERR(chain); 1171 return PTR_ERR(chain);
1044 1172 if (chain->flags & NFT_CHAIN_INACTIVE)
1045 if (!list_empty(&chain->rules) || chain->use > 0) 1173 return -ENOENT;
1174 if (chain->use > 0)
1046 return -EBUSY; 1175 return -EBUSY;
1047 1176
1048 list_del(&chain->list); 1177 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
1049 table->use--; 1178 err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN);
1050 1179 if (err < 0)
1051 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1180 return err;
1052 chain->flags & NFT_BASE_CHAIN)
1053 nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
1054
1055 nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
1056 family);
1057
1058 /* Make sure all rule references are gone before this is released */
1059 synchronize_rcu();
1060 1181
1061 nf_tables_chain_destroy(chain); 1182 table->use--;
1183 list_del(&chain->list);
1062 return 0; 1184 return 0;
1063} 1185}
1064 1186
1065static void nft_ctx_init(struct nft_ctx *ctx,
1066 const struct sk_buff *skb,
1067 const struct nlmsghdr *nlh,
1068 const struct nft_af_info *afi,
1069 const struct nft_table *table,
1070 const struct nft_chain *chain,
1071 const struct nlattr * const *nla)
1072{
1073 ctx->net = sock_net(skb->sk);
1074 ctx->skb = skb;
1075 ctx->nlh = nlh;
1076 ctx->afi = afi;
1077 ctx->table = table;
1078 ctx->chain = chain;
1079 ctx->nla = nla;
1080}
1081
1082/* 1187/*
1083 * Expressions 1188 * Expressions
1084 */ 1189 */
@@ -1093,7 +1198,10 @@ static void nft_ctx_init(struct nft_ctx *ctx,
1093int nft_register_expr(struct nft_expr_type *type) 1198int nft_register_expr(struct nft_expr_type *type)
1094{ 1199{
1095 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1200 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1096 list_add_tail(&type->list, &nf_tables_expressions); 1201 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions);
1203 else
1204 list_add(&type->list, &nf_tables_expressions);
1097 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1098 return 0; 1206 return 0;
1099} 1207}
@@ -1361,22 +1469,15 @@ nla_put_failure:
1361 return -1; 1469 return -1;
1362} 1470}
1363 1471
1364static int nf_tables_rule_notify(const struct sk_buff *oskb, 1472static int nf_tables_rule_notify(const struct nft_ctx *ctx,
1365 const struct nlmsghdr *nlh,
1366 const struct nft_table *table,
1367 const struct nft_chain *chain,
1368 const struct nft_rule *rule, 1473 const struct nft_rule *rule,
1369 int event, u32 flags, int family) 1474 int event)
1370{ 1475{
1371 struct sk_buff *skb; 1476 struct sk_buff *skb;
1372 u32 portid = NETLINK_CB(oskb).portid;
1373 struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
1374 u32 seq = nlh->nlmsg_seq;
1375 bool report;
1376 int err; 1477 int err;
1377 1478
1378 report = nlmsg_report(nlh); 1479 if (!ctx->report &&
1379 if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) 1480 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
1380 return 0; 1481 return 0;
1381 1482
1382 err = -ENOBUFS; 1483 err = -ENOBUFS;
@@ -1384,18 +1485,21 @@ static int nf_tables_rule_notify(const struct sk_buff *oskb,
1384 if (skb == NULL) 1485 if (skb == NULL)
1385 goto err; 1486 goto err;
1386 1487
1387 err = nf_tables_fill_rule_info(skb, portid, seq, event, flags, 1488 err = nf_tables_fill_rule_info(skb, ctx->portid, ctx->seq, event, 0,
1388 family, table, chain, rule); 1489 ctx->afi->family, ctx->table,
1490 ctx->chain, rule);
1389 if (err < 0) { 1491 if (err < 0) {
1390 kfree_skb(skb); 1492 kfree_skb(skb);
1391 goto err; 1493 goto err;
1392 } 1494 }
1393 1495
1394 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, 1496 err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1395 GFP_KERNEL); 1497 ctx->report, GFP_KERNEL);
1396err: 1498err:
1397 if (err < 0) 1499 if (err < 0) {
1398 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); 1500 nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
1501 err);
1502 }
1399 return err; 1503 return err;
1400} 1504}
1401 1505
@@ -1511,10 +1615,14 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
1511 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 1615 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
1512 if (IS_ERR(table)) 1616 if (IS_ERR(table))
1513 return PTR_ERR(table); 1617 return PTR_ERR(table);
1618 if (table->flags & NFT_TABLE_INACTIVE)
1619 return -ENOENT;
1514 1620
1515 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1621 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
1516 if (IS_ERR(chain)) 1622 if (IS_ERR(chain))
1517 return PTR_ERR(chain); 1623 return PTR_ERR(chain);
1624 if (chain->flags & NFT_CHAIN_INACTIVE)
1625 return -ENOENT;
1518 1626
1519 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); 1627 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
1520 if (IS_ERR(rule)) 1628 if (IS_ERR(rule))
@@ -1554,37 +1662,36 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
1554 kfree(rule); 1662 kfree(rule);
1555} 1663}
1556 1664
1557#define NFT_RULE_MAXEXPRS 128 1665static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
1558 1666 struct nft_rule *rule)
1559static struct nft_expr_info *info;
1560
1561static struct nft_rule_trans *
1562nf_tables_trans_add(struct nft_ctx *ctx, struct nft_rule *rule)
1563{ 1667{
1564 struct nft_rule_trans *rupd; 1668 struct nft_trans *trans;
1565 1669
1566 rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL); 1670 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
1567 if (rupd == NULL) 1671 if (trans == NULL)
1568 return NULL; 1672 return NULL;
1569 1673
1570 rupd->ctx = *ctx; 1674 nft_trans_rule(trans) = rule;
1571 rupd->rule = rule; 1675 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
1572 list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
1573 1676
1574 return rupd; 1677 return trans;
1575} 1678}
1576 1679
1680#define NFT_RULE_MAXEXPRS 128
1681
1682static struct nft_expr_info *info;
1683
1577static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, 1684static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1578 const struct nlmsghdr *nlh, 1685 const struct nlmsghdr *nlh,
1579 const struct nlattr * const nla[]) 1686 const struct nlattr * const nla[])
1580{ 1687{
1581 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1688 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1582 const struct nft_af_info *afi; 1689 struct nft_af_info *afi;
1583 struct net *net = sock_net(skb->sk); 1690 struct net *net = sock_net(skb->sk);
1584 struct nft_table *table; 1691 struct nft_table *table;
1585 struct nft_chain *chain; 1692 struct nft_chain *chain;
1586 struct nft_rule *rule, *old_rule = NULL; 1693 struct nft_rule *rule, *old_rule = NULL;
1587 struct nft_rule_trans *repl = NULL; 1694 struct nft_trans *trans = NULL;
1588 struct nft_expr *expr; 1695 struct nft_expr *expr;
1589 struct nft_ctx ctx; 1696 struct nft_ctx ctx;
1590 struct nlattr *tmp; 1697 struct nlattr *tmp;
@@ -1682,8 +1789,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1682 1789
1683 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 1790 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
1684 if (nft_rule_is_active_next(net, old_rule)) { 1791 if (nft_rule_is_active_next(net, old_rule)) {
1685 repl = nf_tables_trans_add(&ctx, old_rule); 1792 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
1686 if (repl == NULL) { 1793 old_rule);
1794 if (trans == NULL) {
1687 err = -ENOMEM; 1795 err = -ENOMEM;
1688 goto err2; 1796 goto err2;
1689 } 1797 }
@@ -1705,19 +1813,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1705 list_add_rcu(&rule->list, &chain->rules); 1813 list_add_rcu(&rule->list, &chain->rules);
1706 } 1814 }
1707 1815
1708 if (nf_tables_trans_add(&ctx, rule) == NULL) { 1816 if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
1709 err = -ENOMEM; 1817 err = -ENOMEM;
1710 goto err3; 1818 goto err3;
1711 } 1819 }
1820 chain->use++;
1712 return 0; 1821 return 0;
1713 1822
1714err3: 1823err3:
1715 list_del_rcu(&rule->list); 1824 list_del_rcu(&rule->list);
1716 if (repl) { 1825 if (trans) {
1717 list_del_rcu(&repl->rule->list); 1826 list_del_rcu(&nft_trans_rule(trans)->list);
1718 list_del(&repl->list); 1827 nft_rule_clear(net, nft_trans_rule(trans));
1719 nft_rule_clear(net, repl->rule); 1828 nft_trans_destroy(trans);
1720 kfree(repl);
1721 } 1829 }
1722err2: 1830err2:
1723 nf_tables_rule_destroy(&ctx, rule); 1831 nf_tables_rule_destroy(&ctx, rule);
@@ -1734,9 +1842,10 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
1734{ 1842{
1735 /* You cannot delete the same rule twice */ 1843 /* You cannot delete the same rule twice */
1736 if (nft_rule_is_active_next(ctx->net, rule)) { 1844 if (nft_rule_is_active_next(ctx->net, rule)) {
1737 if (nf_tables_trans_add(ctx, rule) == NULL) 1845 if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL)
1738 return -ENOMEM; 1846 return -ENOMEM;
1739 nft_rule_disactivate_next(ctx->net, rule); 1847 nft_rule_disactivate_next(ctx->net, rule);
1848 ctx->chain->use--;
1740 return 0; 1849 return 0;
1741 } 1850 }
1742 return -ENOENT; 1851 return -ENOENT;
@@ -1760,9 +1869,9 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1760 const struct nlattr * const nla[]) 1869 const struct nlattr * const nla[])
1761{ 1870{
1762 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1871 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1763 const struct nft_af_info *afi; 1872 struct nft_af_info *afi;
1764 struct net *net = sock_net(skb->sk); 1873 struct net *net = sock_net(skb->sk);
1765 const struct nft_table *table; 1874 struct nft_table *table;
1766 struct nft_chain *chain = NULL; 1875 struct nft_chain *chain = NULL;
1767 struct nft_rule *rule; 1876 struct nft_rule *rule;
1768 int family = nfmsg->nfgen_family, err = 0; 1877 int family = nfmsg->nfgen_family, err = 0;
@@ -1775,6 +1884,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1775 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 1884 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
1776 if (IS_ERR(table)) 1885 if (IS_ERR(table))
1777 return PTR_ERR(table); 1886 return PTR_ERR(table);
1887 if (table->flags & NFT_TABLE_INACTIVE)
1888 return -ENOENT;
1778 1889
1779 if (nla[NFTA_RULE_CHAIN]) { 1890 if (nla[NFTA_RULE_CHAIN]) {
1780 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1891 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
@@ -1807,88 +1918,6 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
1807 return err; 1918 return err;
1808} 1919}
1809 1920
1810static int nf_tables_commit(struct sk_buff *skb)
1811{
1812 struct net *net = sock_net(skb->sk);
1813 struct nft_rule_trans *rupd, *tmp;
1814
1815 /* Bump generation counter, invalidate any dump in progress */
1816 net->nft.genctr++;
1817
1818 /* A new generation has just started */
1819 net->nft.gencursor = gencursor_next(net);
1820
1821 /* Make sure all packets have left the previous generation before
1822 * purging old rules.
1823 */
1824 synchronize_rcu();
1825
1826 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1827 /* This rule was inactive in the past and just became active.
1828 * Clear the next bit of the genmask since its meaning has
1829 * changed, now it is the future.
1830 */
1831 if (nft_rule_is_active(net, rupd->rule)) {
1832 nft_rule_clear(net, rupd->rule);
1833 nf_tables_rule_notify(skb, rupd->ctx.nlh,
1834 rupd->ctx.table, rupd->ctx.chain,
1835 rupd->rule, NFT_MSG_NEWRULE, 0,
1836 rupd->ctx.afi->family);
1837 list_del(&rupd->list);
1838 kfree(rupd);
1839 continue;
1840 }
1841
1842 /* This rule is in the past, get rid of it */
1843 list_del_rcu(&rupd->rule->list);
1844 nf_tables_rule_notify(skb, rupd->ctx.nlh,
1845 rupd->ctx.table, rupd->ctx.chain,
1846 rupd->rule, NFT_MSG_DELRULE, 0,
1847 rupd->ctx.afi->family);
1848 }
1849
1850 /* Make sure we don't see any packet traversing old rules */
1851 synchronize_rcu();
1852
1853 /* Now we can safely release unused old rules */
1854 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1855 nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
1856 list_del(&rupd->list);
1857 kfree(rupd);
1858 }
1859
1860 return 0;
1861}
1862
1863static int nf_tables_abort(struct sk_buff *skb)
1864{
1865 struct net *net = sock_net(skb->sk);
1866 struct nft_rule_trans *rupd, *tmp;
1867
1868 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1869 if (!nft_rule_is_active_next(net, rupd->rule)) {
1870 nft_rule_clear(net, rupd->rule);
1871 list_del(&rupd->list);
1872 kfree(rupd);
1873 continue;
1874 }
1875
1876 /* This rule is inactive, get rid of it */
1877 list_del_rcu(&rupd->rule->list);
1878 }
1879
1880 /* Make sure we don't see any packet accessing aborted rules */
1881 synchronize_rcu();
1882
1883 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1884 nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
1885 list_del(&rupd->list);
1886 kfree(rupd);
1887 }
1888
1889 return 0;
1890}
1891
1892/* 1921/*
1893 * Sets 1922 * Sets
1894 */ 1923 */
@@ -1912,9 +1941,18 @@ void nft_unregister_set(struct nft_set_ops *ops)
1912} 1941}
1913EXPORT_SYMBOL_GPL(nft_unregister_set); 1942EXPORT_SYMBOL_GPL(nft_unregister_set);
1914 1943
1915static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[]) 1944/*
1945 * Select a set implementation based on the data characteristics and the
1946 * given policy. The total memory use might not be known if no size is
1947 * given, in that case the amount of memory per element is used.
1948 */
1949static const struct nft_set_ops *
1950nft_select_set_ops(const struct nlattr * const nla[],
1951 const struct nft_set_desc *desc,
1952 enum nft_set_policies policy)
1916{ 1953{
1917 const struct nft_set_ops *ops; 1954 const struct nft_set_ops *ops, *bops;
1955 struct nft_set_estimate est, best;
1918 u32 features; 1956 u32 features;
1919 1957
1920#ifdef CONFIG_MODULES 1958#ifdef CONFIG_MODULES
@@ -1932,15 +1970,45 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
1932 features &= NFT_SET_INTERVAL | NFT_SET_MAP; 1970 features &= NFT_SET_INTERVAL | NFT_SET_MAP;
1933 } 1971 }
1934 1972
1935 // FIXME: implement selection properly 1973 bops = NULL;
1974 best.size = ~0;
1975 best.class = ~0;
1976
1936 list_for_each_entry(ops, &nf_tables_set_ops, list) { 1977 list_for_each_entry(ops, &nf_tables_set_ops, list) {
1937 if ((ops->features & features) != features) 1978 if ((ops->features & features) != features)
1938 continue; 1979 continue;
1980 if (!ops->estimate(desc, features, &est))
1981 continue;
1982
1983 switch (policy) {
1984 case NFT_SET_POL_PERFORMANCE:
1985 if (est.class < best.class)
1986 break;
1987 if (est.class == best.class && est.size < best.size)
1988 break;
1989 continue;
1990 case NFT_SET_POL_MEMORY:
1991 if (est.size < best.size)
1992 break;
1993 if (est.size == best.size && est.class < best.class)
1994 break;
1995 continue;
1996 default:
1997 break;
1998 }
1999
1939 if (!try_module_get(ops->owner)) 2000 if (!try_module_get(ops->owner))
1940 continue; 2001 continue;
1941 return ops; 2002 if (bops != NULL)
2003 module_put(bops->owner);
2004
2005 bops = ops;
2006 best = est;
1942 } 2007 }
1943 2008
2009 if (bops != NULL)
2010 return bops;
2011
1944 return ERR_PTR(-EOPNOTSUPP); 2012 return ERR_PTR(-EOPNOTSUPP);
1945} 2013}
1946 2014
@@ -1953,6 +2021,13 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
1953 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, 2021 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
1954 [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 }, 2022 [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 },
1955 [NFTA_SET_DATA_LEN] = { .type = NLA_U32 }, 2023 [NFTA_SET_DATA_LEN] = { .type = NLA_U32 },
2024 [NFTA_SET_POLICY] = { .type = NLA_U32 },
2025 [NFTA_SET_DESC] = { .type = NLA_NESTED },
2026 [NFTA_SET_ID] = { .type = NLA_U32 },
2027};
2028
2029static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
2030 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
1956}; 2031};
1957 2032
1958static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, 2033static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
@@ -1962,8 +2037,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
1962{ 2037{
1963 struct net *net = sock_net(skb->sk); 2038 struct net *net = sock_net(skb->sk);
1964 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2039 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1965 const struct nft_af_info *afi = NULL; 2040 struct nft_af_info *afi = NULL;
1966 const struct nft_table *table = NULL; 2041 struct nft_table *table = NULL;
1967 2042
1968 if (nfmsg->nfgen_family != NFPROTO_UNSPEC) { 2043 if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
1969 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 2044 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -1978,6 +2053,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
1978 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); 2053 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
1979 if (IS_ERR(table)) 2054 if (IS_ERR(table))
1980 return PTR_ERR(table); 2055 return PTR_ERR(table);
2056 if (table->flags & NFT_TABLE_INACTIVE)
2057 return -ENOENT;
1981 } 2058 }
1982 2059
1983 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2060 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
@@ -1999,13 +2076,27 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
1999 return ERR_PTR(-ENOENT); 2076 return ERR_PTR(-ENOENT);
2000} 2077}
2001 2078
2079struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
2080 const struct nlattr *nla)
2081{
2082 struct nft_trans *trans;
2083 u32 id = ntohl(nla_get_be32(nla));
2084
2085 list_for_each_entry(trans, &net->nft.commit_list, list) {
2086 if (trans->msg_type == NFT_MSG_NEWSET &&
2087 id == nft_trans_set_id(trans))
2088 return nft_trans_set(trans);
2089 }
2090 return ERR_PTR(-ENOENT);
2091}
2092
2002static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, 2093static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
2003 const char *name) 2094 const char *name)
2004{ 2095{
2005 const struct nft_set *i; 2096 const struct nft_set *i;
2006 const char *p; 2097 const char *p;
2007 unsigned long *inuse; 2098 unsigned long *inuse;
2008 unsigned int n = 0; 2099 unsigned int n = 0, min = 0;
2009 2100
2010 p = strnchr(name, IFNAMSIZ, '%'); 2101 p = strnchr(name, IFNAMSIZ, '%');
2011 if (p != NULL) { 2102 if (p != NULL) {
@@ -2015,23 +2106,28 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
2015 inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL); 2106 inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
2016 if (inuse == NULL) 2107 if (inuse == NULL)
2017 return -ENOMEM; 2108 return -ENOMEM;
2018 2109cont:
2019 list_for_each_entry(i, &ctx->table->sets, list) { 2110 list_for_each_entry(i, &ctx->table->sets, list) {
2020 int tmp; 2111 int tmp;
2021 2112
2022 if (!sscanf(i->name, name, &tmp)) 2113 if (!sscanf(i->name, name, &tmp))
2023 continue; 2114 continue;
2024 if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE) 2115 if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
2025 continue; 2116 continue;
2026 2117
2027 set_bit(tmp, inuse); 2118 set_bit(tmp - min, inuse);
2028 } 2119 }
2029 2120
2030 n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE); 2121 n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
2122 if (n >= BITS_PER_BYTE * PAGE_SIZE) {
2123 min += BITS_PER_BYTE * PAGE_SIZE;
2124 memset(inuse, 0, PAGE_SIZE);
2125 goto cont;
2126 }
2031 free_page((unsigned long)inuse); 2127 free_page((unsigned long)inuse);
2032 } 2128 }
2033 2129
2034 snprintf(set->name, sizeof(set->name), name, n); 2130 snprintf(set->name, sizeof(set->name), name, min + n);
2035 list_for_each_entry(i, &ctx->table->sets, list) { 2131 list_for_each_entry(i, &ctx->table->sets, list) {
2036 if (!strcmp(set->name, i->name)) 2132 if (!strcmp(set->name, i->name))
2037 return -ENFILE; 2133 return -ENFILE;
@@ -2044,8 +2140,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2044{ 2140{
2045 struct nfgenmsg *nfmsg; 2141 struct nfgenmsg *nfmsg;
2046 struct nlmsghdr *nlh; 2142 struct nlmsghdr *nlh;
2047 u32 portid = NETLINK_CB(ctx->skb).portid; 2143 struct nlattr *desc;
2048 u32 seq = ctx->nlh->nlmsg_seq; 2144 u32 portid = ctx->portid;
2145 u32 seq = ctx->seq;
2049 2146
2050 event |= NFNL_SUBSYS_NFTABLES << 8; 2147 event |= NFNL_SUBSYS_NFTABLES << 8;
2051 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 2148 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
@@ -2077,6 +2174,14 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2077 goto nla_put_failure; 2174 goto nla_put_failure;
2078 } 2175 }
2079 2176
2177 desc = nla_nest_start(skb, NFTA_SET_DESC);
2178 if (desc == NULL)
2179 goto nla_put_failure;
2180 if (set->size &&
2181 nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
2182 goto nla_put_failure;
2183 nla_nest_end(skb, desc);
2184
2080 return nlmsg_end(skb, nlh); 2185 return nlmsg_end(skb, nlh);
2081 2186
2082nla_put_failure: 2187nla_put_failure:
@@ -2086,19 +2191,18 @@ nla_put_failure:
2086 2191
2087static int nf_tables_set_notify(const struct nft_ctx *ctx, 2192static int nf_tables_set_notify(const struct nft_ctx *ctx,
2088 const struct nft_set *set, 2193 const struct nft_set *set,
2089 int event) 2194 int event, gfp_t gfp_flags)
2090{ 2195{
2091 struct sk_buff *skb; 2196 struct sk_buff *skb;
2092 u32 portid = NETLINK_CB(ctx->skb).portid; 2197 u32 portid = ctx->portid;
2093 bool report;
2094 int err; 2198 int err;
2095 2199
2096 report = nlmsg_report(ctx->nlh); 2200 if (!ctx->report &&
2097 if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) 2201 !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
2098 return 0; 2202 return 0;
2099 2203
2100 err = -ENOBUFS; 2204 err = -ENOBUFS;
2101 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2205 skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags);
2102 if (skb == NULL) 2206 if (skb == NULL)
2103 goto err; 2207 goto err;
2104 2208
@@ -2108,8 +2212,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
2108 goto err; 2212 goto err;
2109 } 2213 }
2110 2214
2111 err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report, 2215 err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES,
2112 GFP_KERNEL); 2216 ctx->report, gfp_flags);
2113err: 2217err:
2114 if (err < 0) 2218 if (err < 0)
2115 nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err); 2219 nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
@@ -2183,7 +2287,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2183{ 2287{
2184 const struct nft_set *set; 2288 const struct nft_set *set;
2185 unsigned int idx, s_idx = cb->args[0]; 2289 unsigned int idx, s_idx = cb->args[0];
2186 const struct nft_af_info *afi; 2290 struct nft_af_info *afi;
2187 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; 2291 struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
2188 struct net *net = sock_net(skb->sk); 2292 struct net *net = sock_net(skb->sk);
2189 int cur_family = cb->args[3]; 2293 int cur_family = cb->args[3];
@@ -2260,6 +2364,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
2260 return ret; 2364 return ret;
2261} 2365}
2262 2366
2367#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */
2368
2263static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, 2369static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2264 const struct nlmsghdr *nlh, 2370 const struct nlmsghdr *nlh,
2265 const struct nlattr * const nla[]) 2371 const struct nlattr * const nla[])
@@ -2289,6 +2395,8 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2289 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2395 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2290 if (IS_ERR(set)) 2396 if (IS_ERR(set))
2291 return PTR_ERR(set); 2397 return PTR_ERR(set);
2398 if (set->flags & NFT_SET_INACTIVE)
2399 return -ENOENT;
2292 2400
2293 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2401 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2294 if (skb2 == NULL) 2402 if (skb2 == NULL)
@@ -2305,13 +2413,50 @@ err:
2305 return err; 2413 return err;
2306} 2414}
2307 2415
2416static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
2417 struct nft_set_desc *desc,
2418 const struct nlattr *nla)
2419{
2420 struct nlattr *da[NFTA_SET_DESC_MAX + 1];
2421 int err;
2422
2423 err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy);
2424 if (err < 0)
2425 return err;
2426
2427 if (da[NFTA_SET_DESC_SIZE] != NULL)
2428 desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE]));
2429
2430 return 0;
2431}
2432
2433static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
2434 struct nft_set *set)
2435{
2436 struct nft_trans *trans;
2437
2438 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
2439 if (trans == NULL)
2440 return -ENOMEM;
2441
2442 if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
2443 nft_trans_set_id(trans) =
2444 ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
2445 set->flags |= NFT_SET_INACTIVE;
2446 }
2447 nft_trans_set(trans) = set;
2448 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
2449
2450 return 0;
2451}
2452
2308static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, 2453static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2309 const struct nlmsghdr *nlh, 2454 const struct nlmsghdr *nlh,
2310 const struct nlattr * const nla[]) 2455 const struct nlattr * const nla[])
2311{ 2456{
2312 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2457 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2313 const struct nft_set_ops *ops; 2458 const struct nft_set_ops *ops;
2314 const struct nft_af_info *afi; 2459 struct nft_af_info *afi;
2315 struct net *net = sock_net(skb->sk); 2460 struct net *net = sock_net(skb->sk);
2316 struct nft_table *table; 2461 struct nft_table *table;
2317 struct nft_set *set; 2462 struct nft_set *set;
@@ -2319,14 +2464,18 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2319 char name[IFNAMSIZ]; 2464 char name[IFNAMSIZ];
2320 unsigned int size; 2465 unsigned int size;
2321 bool create; 2466 bool create;
2322 u32 ktype, klen, dlen, dtype, flags; 2467 u32 ktype, dtype, flags, policy;
2468 struct nft_set_desc desc;
2323 int err; 2469 int err;
2324 2470
2325 if (nla[NFTA_SET_TABLE] == NULL || 2471 if (nla[NFTA_SET_TABLE] == NULL ||
2326 nla[NFTA_SET_NAME] == NULL || 2472 nla[NFTA_SET_NAME] == NULL ||
2327 nla[NFTA_SET_KEY_LEN] == NULL) 2473 nla[NFTA_SET_KEY_LEN] == NULL ||
2474 nla[NFTA_SET_ID] == NULL)
2328 return -EINVAL; 2475 return -EINVAL;
2329 2476
2477 memset(&desc, 0, sizeof(desc));
2478
2330 ktype = NFT_DATA_VALUE; 2479 ktype = NFT_DATA_VALUE;
2331 if (nla[NFTA_SET_KEY_TYPE] != NULL) { 2480 if (nla[NFTA_SET_KEY_TYPE] != NULL) {
2332 ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE])); 2481 ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
@@ -2334,8 +2483,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2334 return -EINVAL; 2483 return -EINVAL;
2335 } 2484 }
2336 2485
2337 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN])); 2486 desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
2338 if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data)) 2487 if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data))
2339 return -EINVAL; 2488 return -EINVAL;
2340 2489
2341 flags = 0; 2490 flags = 0;
@@ -2347,7 +2496,6 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2347 } 2496 }
2348 2497
2349 dtype = 0; 2498 dtype = 0;
2350 dlen = 0;
2351 if (nla[NFTA_SET_DATA_TYPE] != NULL) { 2499 if (nla[NFTA_SET_DATA_TYPE] != NULL) {
2352 if (!(flags & NFT_SET_MAP)) 2500 if (!(flags & NFT_SET_MAP))
2353 return -EINVAL; 2501 return -EINVAL;
@@ -2360,15 +2508,25 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2360 if (dtype != NFT_DATA_VERDICT) { 2508 if (dtype != NFT_DATA_VERDICT) {
2361 if (nla[NFTA_SET_DATA_LEN] == NULL) 2509 if (nla[NFTA_SET_DATA_LEN] == NULL)
2362 return -EINVAL; 2510 return -EINVAL;
2363 dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN])); 2511 desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
2364 if (dlen == 0 || 2512 if (desc.dlen == 0 ||
2365 dlen > FIELD_SIZEOF(struct nft_data, data)) 2513 desc.dlen > FIELD_SIZEOF(struct nft_data, data))
2366 return -EINVAL; 2514 return -EINVAL;
2367 } else 2515 } else
2368 dlen = sizeof(struct nft_data); 2516 desc.dlen = sizeof(struct nft_data);
2369 } else if (flags & NFT_SET_MAP) 2517 } else if (flags & NFT_SET_MAP)
2370 return -EINVAL; 2518 return -EINVAL;
2371 2519
2520 policy = NFT_SET_POL_PERFORMANCE;
2521 if (nla[NFTA_SET_POLICY] != NULL)
2522 policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
2523
2524 if (nla[NFTA_SET_DESC] != NULL) {
2525 err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]);
2526 if (err < 0)
2527 return err;
2528 }
2529
2372 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 2530 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
2373 2531
2374 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create); 2532 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
@@ -2399,7 +2557,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2399 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) 2557 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
2400 return -ENOENT; 2558 return -ENOENT;
2401 2559
2402 ops = nft_select_set_ops(nla); 2560 ops = nft_select_set_ops(nla, &desc, policy);
2403 if (IS_ERR(ops)) 2561 if (IS_ERR(ops))
2404 return PTR_ERR(ops); 2562 return PTR_ERR(ops);
2405 2563
@@ -2420,17 +2578,22 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2420 INIT_LIST_HEAD(&set->bindings); 2578 INIT_LIST_HEAD(&set->bindings);
2421 set->ops = ops; 2579 set->ops = ops;
2422 set->ktype = ktype; 2580 set->ktype = ktype;
2423 set->klen = klen; 2581 set->klen = desc.klen;
2424 set->dtype = dtype; 2582 set->dtype = dtype;
2425 set->dlen = dlen; 2583 set->dlen = desc.dlen;
2426 set->flags = flags; 2584 set->flags = flags;
2585 set->size = desc.size;
2586
2587 err = ops->init(set, &desc, nla);
2588 if (err < 0)
2589 goto err2;
2427 2590
2428 err = ops->init(set, nla); 2591 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
2429 if (err < 0) 2592 if (err < 0)
2430 goto err2; 2593 goto err2;
2431 2594
2432 list_add_tail(&set->list, &table->sets); 2595 list_add_tail(&set->list, &table->sets);
2433 nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET); 2596 table->use++;
2434 return 0; 2597 return 0;
2435 2598
2436err2: 2599err2:
@@ -2440,16 +2603,20 @@ err1:
2440 return err; 2603 return err;
2441} 2604}
2442 2605
2443static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2606static void nft_set_destroy(struct nft_set *set)
2444{ 2607{
2445 list_del(&set->list);
2446 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
2447
2448 set->ops->destroy(set); 2608 set->ops->destroy(set);
2449 module_put(set->ops->owner); 2609 module_put(set->ops->owner);
2450 kfree(set); 2610 kfree(set);
2451} 2611}
2452 2612
2613static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2614{
2615 list_del(&set->list);
2616 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2617 nft_set_destroy(set);
2618}
2619
2453static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, 2620static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2454 const struct nlmsghdr *nlh, 2621 const struct nlmsghdr *nlh,
2455 const struct nlattr * const nla[]) 2622 const struct nlattr * const nla[])
@@ -2471,10 +2638,17 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2471 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2638 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2472 if (IS_ERR(set)) 2639 if (IS_ERR(set))
2473 return PTR_ERR(set); 2640 return PTR_ERR(set);
2641 if (set->flags & NFT_SET_INACTIVE)
2642 return -ENOENT;
2474 if (!list_empty(&set->bindings)) 2643 if (!list_empty(&set->bindings))
2475 return -EBUSY; 2644 return -EBUSY;
2476 2645
2477 nf_tables_set_destroy(&ctx, set); 2646 err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set);
2647 if (err < 0)
2648 return err;
2649
2650 list_del(&set->list);
2651 ctx.table->use--;
2478 return 0; 2652 return 0;
2479} 2653}
2480 2654
@@ -2534,7 +2708,8 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2534{ 2708{
2535 list_del(&binding->list); 2709 list_del(&binding->list);
2536 2710
2537 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS) 2711 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2712 !(set->flags & NFT_SET_INACTIVE))
2538 nf_tables_set_destroy(ctx, set); 2713 nf_tables_set_destroy(ctx, set);
2539} 2714}
2540 2715
@@ -2552,16 +2727,18 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
2552 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, 2727 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING },
2553 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, 2728 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING },
2554 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, 2729 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
2730 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
2555}; 2731};
2556 2732
2557static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, 2733static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
2558 const struct sk_buff *skb, 2734 const struct sk_buff *skb,
2559 const struct nlmsghdr *nlh, 2735 const struct nlmsghdr *nlh,
2560 const struct nlattr * const nla[]) 2736 const struct nlattr * const nla[],
2737 bool trans)
2561{ 2738{
2562 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2739 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2563 const struct nft_af_info *afi; 2740 struct nft_af_info *afi;
2564 const struct nft_table *table; 2741 struct nft_table *table;
2565 struct net *net = sock_net(skb->sk); 2742 struct net *net = sock_net(skb->sk);
2566 2743
2567 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 2744 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -2571,6 +2748,8 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
2571 table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); 2748 table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
2572 if (IS_ERR(table)) 2749 if (IS_ERR(table))
2573 return PTR_ERR(table); 2750 return PTR_ERR(table);
2751 if (!trans && (table->flags & NFT_TABLE_INACTIVE))
2752 return -ENOENT;
2574 2753
2575 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2754 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
2576 return 0; 2755 return 0;
@@ -2644,13 +2823,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
2644 if (err < 0) 2823 if (err < 0)
2645 return err; 2824 return err;
2646 2825
2647 err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla); 2826 err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla,
2827 false);
2648 if (err < 0) 2828 if (err < 0)
2649 return err; 2829 return err;
2650 2830
2651 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 2831 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
2652 if (IS_ERR(set)) 2832 if (IS_ERR(set))
2653 return PTR_ERR(set); 2833 return PTR_ERR(set);
2834 if (set->flags & NFT_SET_INACTIVE)
2835 return -ENOENT;
2654 2836
2655 event = NFT_MSG_NEWSETELEM; 2837 event = NFT_MSG_NEWSETELEM;
2656 event |= NFNL_SUBSYS_NFTABLES << 8; 2838 event |= NFNL_SUBSYS_NFTABLES << 8;
@@ -2707,13 +2889,15 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
2707 struct nft_ctx ctx; 2889 struct nft_ctx ctx;
2708 int err; 2890 int err;
2709 2891
2710 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla); 2892 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
2711 if (err < 0) 2893 if (err < 0)
2712 return err; 2894 return err;
2713 2895
2714 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 2896 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
2715 if (IS_ERR(set)) 2897 if (IS_ERR(set))
2716 return PTR_ERR(set); 2898 return PTR_ERR(set);
2899 if (set->flags & NFT_SET_INACTIVE)
2900 return -ENOENT;
2717 2901
2718 if (nlh->nlmsg_flags & NLM_F_DUMP) { 2902 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2719 struct netlink_dump_control c = { 2903 struct netlink_dump_control c = {
@@ -2724,7 +2908,98 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
2724 return -EOPNOTSUPP; 2908 return -EOPNOTSUPP;
2725} 2909}
2726 2910
2727static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set, 2911static int nf_tables_fill_setelem_info(struct sk_buff *skb,
2912 const struct nft_ctx *ctx, u32 seq,
2913 u32 portid, int event, u16 flags,
2914 const struct nft_set *set,
2915 const struct nft_set_elem *elem)
2916{
2917 struct nfgenmsg *nfmsg;
2918 struct nlmsghdr *nlh;
2919 struct nlattr *nest;
2920 int err;
2921
2922 event |= NFNL_SUBSYS_NFTABLES << 8;
2923 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
2924 flags);
2925 if (nlh == NULL)
2926 goto nla_put_failure;
2927
2928 nfmsg = nlmsg_data(nlh);
2929 nfmsg->nfgen_family = ctx->afi->family;
2930 nfmsg->version = NFNETLINK_V0;
2931 nfmsg->res_id = 0;
2932
2933 if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
2934 goto nla_put_failure;
2935 if (nla_put_string(skb, NFTA_SET_NAME, set->name))
2936 goto nla_put_failure;
2937
2938 nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
2939 if (nest == NULL)
2940 goto nla_put_failure;
2941
2942 err = nf_tables_fill_setelem(skb, set, elem);
2943 if (err < 0)
2944 goto nla_put_failure;
2945
2946 nla_nest_end(skb, nest);
2947
2948 return nlmsg_end(skb, nlh);
2949
2950nla_put_failure:
2951 nlmsg_trim(skb, nlh);
2952 return -1;
2953}
2954
2955static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
2956 const struct nft_set *set,
2957 const struct nft_set_elem *elem,
2958 int event, u16 flags)
2959{
2960 struct net *net = ctx->net;
2961 u32 portid = ctx->portid;
2962 struct sk_buff *skb;
2963 int err;
2964
2965 if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
2966 return 0;
2967
2968 err = -ENOBUFS;
2969 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2970 if (skb == NULL)
2971 goto err;
2972
2973 err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
2974 set, elem);
2975 if (err < 0) {
2976 kfree_skb(skb);
2977 goto err;
2978 }
2979
2980 err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
2981 GFP_KERNEL);
2982err:
2983 if (err < 0)
2984 nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
2985 return err;
2986}
2987
2988static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
2989 int msg_type,
2990 struct nft_set *set)
2991{
2992 struct nft_trans *trans;
2993
2994 trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem));
2995 if (trans == NULL)
2996 return NULL;
2997
2998 nft_trans_elem_set(trans) = set;
2999 return trans;
3000}
3001
3002static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
2728 const struct nlattr *attr) 3003 const struct nlattr *attr)
2729{ 3004{
2730 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; 3005 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
@@ -2732,8 +3007,12 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2732 struct nft_set_elem elem; 3007 struct nft_set_elem elem;
2733 struct nft_set_binding *binding; 3008 struct nft_set_binding *binding;
2734 enum nft_registers dreg; 3009 enum nft_registers dreg;
3010 struct nft_trans *trans;
2735 int err; 3011 int err;
2736 3012
3013 if (set->size && set->nelems == set->size)
3014 return -ENFILE;
3015
2737 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, 3016 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
2738 nft_set_elem_policy); 3017 nft_set_elem_policy);
2739 if (err < 0) 3018 if (err < 0)
@@ -2786,7 +3065,7 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2786 struct nft_ctx bind_ctx = { 3065 struct nft_ctx bind_ctx = {
2787 .afi = ctx->afi, 3066 .afi = ctx->afi,
2788 .table = ctx->table, 3067 .table = ctx->table,
2789 .chain = binding->chain, 3068 .chain = (struct nft_chain *)binding->chain,
2790 }; 3069 };
2791 3070
2792 err = nft_validate_data_load(&bind_ctx, dreg, 3071 err = nft_validate_data_load(&bind_ctx, dreg,
@@ -2796,12 +3075,20 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2796 } 3075 }
2797 } 3076 }
2798 3077
3078 trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
3079 if (trans == NULL)
3080 goto err3;
3081
2799 err = set->ops->insert(set, &elem); 3082 err = set->ops->insert(set, &elem);
2800 if (err < 0) 3083 if (err < 0)
2801 goto err3; 3084 goto err4;
2802 3085
3086 nft_trans_elem(trans) = elem;
3087 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
2803 return 0; 3088 return 0;
2804 3089
3090err4:
3091 kfree(trans);
2805err3: 3092err3:
2806 if (nla[NFTA_SET_ELEM_DATA] != NULL) 3093 if (nla[NFTA_SET_ELEM_DATA] != NULL)
2807 nft_data_uninit(&elem.data, d2.type); 3094 nft_data_uninit(&elem.data, d2.type);
@@ -2815,35 +3102,46 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
2815 const struct nlmsghdr *nlh, 3102 const struct nlmsghdr *nlh,
2816 const struct nlattr * const nla[]) 3103 const struct nlattr * const nla[])
2817{ 3104{
3105 struct net *net = sock_net(skb->sk);
2818 const struct nlattr *attr; 3106 const struct nlattr *attr;
2819 struct nft_set *set; 3107 struct nft_set *set;
2820 struct nft_ctx ctx; 3108 struct nft_ctx ctx;
2821 int rem, err; 3109 int rem, err = 0;
2822 3110
2823 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla); 3111 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
2824 if (err < 0) 3112 if (err < 0)
2825 return err; 3113 return err;
2826 3114
2827 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 3115 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
2828 if (IS_ERR(set)) 3116 if (IS_ERR(set)) {
2829 return PTR_ERR(set); 3117 if (nla[NFTA_SET_ELEM_LIST_SET_ID]) {
3118 set = nf_tables_set_lookup_byid(net,
3119 nla[NFTA_SET_ELEM_LIST_SET_ID]);
3120 }
3121 if (IS_ERR(set))
3122 return PTR_ERR(set);
3123 }
3124
2830 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) 3125 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
2831 return -EBUSY; 3126 return -EBUSY;
2832 3127
2833 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3128 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
2834 err = nft_add_set_elem(&ctx, set, attr); 3129 err = nft_add_set_elem(&ctx, set, attr);
2835 if (err < 0) 3130 if (err < 0)
2836 return err; 3131 break;
3132
3133 set->nelems++;
2837 } 3134 }
2838 return 0; 3135 return err;
2839} 3136}
2840 3137
2841static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set, 3138static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
2842 const struct nlattr *attr) 3139 const struct nlattr *attr)
2843{ 3140{
2844 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; 3141 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
2845 struct nft_data_desc desc; 3142 struct nft_data_desc desc;
2846 struct nft_set_elem elem; 3143 struct nft_set_elem elem;
3144 struct nft_trans *trans;
2847 int err; 3145 int err;
2848 3146
2849 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, 3147 err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
@@ -2867,7 +3165,12 @@ static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
2867 if (err < 0) 3165 if (err < 0)
2868 goto err2; 3166 goto err2;
2869 3167
2870 set->ops->remove(set, &elem); 3168 trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
3169 if (trans == NULL)
3170 goto err2;
3171
3172 nft_trans_elem(trans) = elem;
3173 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
2871 3174
2872 nft_data_uninit(&elem.key, NFT_DATA_VALUE); 3175 nft_data_uninit(&elem.key, NFT_DATA_VALUE);
2873 if (set->flags & NFT_SET_MAP) 3176 if (set->flags & NFT_SET_MAP)
@@ -2886,9 +3189,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
2886 const struct nlattr *attr; 3189 const struct nlattr *attr;
2887 struct nft_set *set; 3190 struct nft_set *set;
2888 struct nft_ctx ctx; 3191 struct nft_ctx ctx;
2889 int rem, err; 3192 int rem, err = 0;
2890 3193
2891 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla); 3194 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
2892 if (err < 0) 3195 if (err < 0)
2893 return err; 3196 return err;
2894 3197
@@ -2901,14 +3204,16 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
2901 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3204 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
2902 err = nft_del_setelem(&ctx, set, attr); 3205 err = nft_del_setelem(&ctx, set, attr);
2903 if (err < 0) 3206 if (err < 0)
2904 return err; 3207 break;
3208
3209 set->nelems--;
2905 } 3210 }
2906 return 0; 3211 return err;
2907} 3212}
2908 3213
2909static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { 3214static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2910 [NFT_MSG_NEWTABLE] = { 3215 [NFT_MSG_NEWTABLE] = {
2911 .call = nf_tables_newtable, 3216 .call_batch = nf_tables_newtable,
2912 .attr_count = NFTA_TABLE_MAX, 3217 .attr_count = NFTA_TABLE_MAX,
2913 .policy = nft_table_policy, 3218 .policy = nft_table_policy,
2914 }, 3219 },
@@ -2918,12 +3223,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2918 .policy = nft_table_policy, 3223 .policy = nft_table_policy,
2919 }, 3224 },
2920 [NFT_MSG_DELTABLE] = { 3225 [NFT_MSG_DELTABLE] = {
2921 .call = nf_tables_deltable, 3226 .call_batch = nf_tables_deltable,
2922 .attr_count = NFTA_TABLE_MAX, 3227 .attr_count = NFTA_TABLE_MAX,
2923 .policy = nft_table_policy, 3228 .policy = nft_table_policy,
2924 }, 3229 },
2925 [NFT_MSG_NEWCHAIN] = { 3230 [NFT_MSG_NEWCHAIN] = {
2926 .call = nf_tables_newchain, 3231 .call_batch = nf_tables_newchain,
2927 .attr_count = NFTA_CHAIN_MAX, 3232 .attr_count = NFTA_CHAIN_MAX,
2928 .policy = nft_chain_policy, 3233 .policy = nft_chain_policy,
2929 }, 3234 },
@@ -2933,7 +3238,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2933 .policy = nft_chain_policy, 3238 .policy = nft_chain_policy,
2934 }, 3239 },
2935 [NFT_MSG_DELCHAIN] = { 3240 [NFT_MSG_DELCHAIN] = {
2936 .call = nf_tables_delchain, 3241 .call_batch = nf_tables_delchain,
2937 .attr_count = NFTA_CHAIN_MAX, 3242 .attr_count = NFTA_CHAIN_MAX,
2938 .policy = nft_chain_policy, 3243 .policy = nft_chain_policy,
2939 }, 3244 },
@@ -2953,7 +3258,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2953 .policy = nft_rule_policy, 3258 .policy = nft_rule_policy,
2954 }, 3259 },
2955 [NFT_MSG_NEWSET] = { 3260 [NFT_MSG_NEWSET] = {
2956 .call = nf_tables_newset, 3261 .call_batch = nf_tables_newset,
2957 .attr_count = NFTA_SET_MAX, 3262 .attr_count = NFTA_SET_MAX,
2958 .policy = nft_set_policy, 3263 .policy = nft_set_policy,
2959 }, 3264 },
@@ -2963,12 +3268,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2963 .policy = nft_set_policy, 3268 .policy = nft_set_policy,
2964 }, 3269 },
2965 [NFT_MSG_DELSET] = { 3270 [NFT_MSG_DELSET] = {
2966 .call = nf_tables_delset, 3271 .call_batch = nf_tables_delset,
2967 .attr_count = NFTA_SET_MAX, 3272 .attr_count = NFTA_SET_MAX,
2968 .policy = nft_set_policy, 3273 .policy = nft_set_policy,
2969 }, 3274 },
2970 [NFT_MSG_NEWSETELEM] = { 3275 [NFT_MSG_NEWSETELEM] = {
2971 .call = nf_tables_newsetelem, 3276 .call_batch = nf_tables_newsetelem,
2972 .attr_count = NFTA_SET_ELEM_LIST_MAX, 3277 .attr_count = NFTA_SET_ELEM_LIST_MAX,
2973 .policy = nft_set_elem_list_policy, 3278 .policy = nft_set_elem_list_policy,
2974 }, 3279 },
@@ -2978,12 +3283,282 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
2978 .policy = nft_set_elem_list_policy, 3283 .policy = nft_set_elem_list_policy,
2979 }, 3284 },
2980 [NFT_MSG_DELSETELEM] = { 3285 [NFT_MSG_DELSETELEM] = {
2981 .call = nf_tables_delsetelem, 3286 .call_batch = nf_tables_delsetelem,
2982 .attr_count = NFTA_SET_ELEM_LIST_MAX, 3287 .attr_count = NFTA_SET_ELEM_LIST_MAX,
2983 .policy = nft_set_elem_list_policy, 3288 .policy = nft_set_elem_list_policy,
2984 }, 3289 },
2985}; 3290};
2986 3291
3292static void nft_chain_commit_update(struct nft_trans *trans)
3293{
3294 struct nft_base_chain *basechain;
3295
3296 if (nft_trans_chain_name(trans)[0])
3297 strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
3298
3299 if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN))
3300 return;
3301
3302 basechain = nft_base_chain(trans->ctx.chain);
3303 nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
3304
3305 switch (nft_trans_chain_policy(trans)) {
3306 case NF_DROP:
3307 case NF_ACCEPT:
3308 basechain->policy = nft_trans_chain_policy(trans);
3309 break;
3310 }
3311}
3312
3313/* Schedule objects for release via rcu to make sure no packets are accesing
3314 * removed rules.
3315 */
3316static void nf_tables_commit_release_rcu(struct rcu_head *rt)
3317{
3318 struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
3319
3320 switch (trans->msg_type) {
3321 case NFT_MSG_DELTABLE:
3322 nf_tables_table_destroy(&trans->ctx);
3323 break;
3324 case NFT_MSG_DELCHAIN:
3325 nf_tables_chain_destroy(trans->ctx.chain);
3326 break;
3327 case NFT_MSG_DELRULE:
3328 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
3329 break;
3330 case NFT_MSG_DELSET:
3331 nft_set_destroy(nft_trans_set(trans));
3332 break;
3333 }
3334 kfree(trans);
3335}
3336
3337static int nf_tables_commit(struct sk_buff *skb)
3338{
3339 struct net *net = sock_net(skb->sk);
3340 struct nft_trans *trans, *next;
3341 struct nft_set *set;
3342
3343 /* Bump generation counter, invalidate any dump in progress */
3344 net->nft.genctr++;
3345
3346 /* A new generation has just started */
3347 net->nft.gencursor = gencursor_next(net);
3348
3349 /* Make sure all packets have left the previous generation before
3350 * purging old rules.
3351 */
3352 synchronize_rcu();
3353
3354 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3355 switch (trans->msg_type) {
3356 case NFT_MSG_NEWTABLE:
3357 if (nft_trans_table_update(trans)) {
3358 if (!nft_trans_table_enable(trans)) {
3359 nf_tables_table_disable(trans->ctx.afi,
3360 trans->ctx.table);
3361 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
3362 }
3363 } else {
3364 trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE;
3365 }
3366 nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
3367 nft_trans_destroy(trans);
3368 break;
3369 case NFT_MSG_DELTABLE:
3370 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
3371 break;
3372 case NFT_MSG_NEWCHAIN:
3373 if (nft_trans_chain_update(trans))
3374 nft_chain_commit_update(trans);
3375 else
3376 trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE;
3377
3378 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
3379 nft_trans_destroy(trans);
3380 break;
3381 case NFT_MSG_DELCHAIN:
3382 nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
3383 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3384 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3385 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
3386 trans->ctx.afi->nops);
3387 }
3388 break;
3389 case NFT_MSG_NEWRULE:
3390 nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
3391 nf_tables_rule_notify(&trans->ctx,
3392 nft_trans_rule(trans),
3393 NFT_MSG_NEWRULE);
3394 nft_trans_destroy(trans);
3395 break;
3396 case NFT_MSG_DELRULE:
3397 list_del_rcu(&nft_trans_rule(trans)->list);
3398 nf_tables_rule_notify(&trans->ctx,
3399 nft_trans_rule(trans),
3400 NFT_MSG_DELRULE);
3401 break;
3402 case NFT_MSG_NEWSET:
3403 nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE;
3404 /* This avoids hitting -EBUSY when deleting the table
3405 * from the transaction.
3406 */
3407 if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS &&
3408 !list_empty(&nft_trans_set(trans)->bindings))
3409 trans->ctx.table->use--;
3410
3411 nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
3412 NFT_MSG_NEWSET, GFP_KERNEL);
3413 nft_trans_destroy(trans);
3414 break;
3415 case NFT_MSG_DELSET:
3416 nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
3417 NFT_MSG_DELSET, GFP_KERNEL);
3418 break;
3419 case NFT_MSG_NEWSETELEM:
3420 nf_tables_setelem_notify(&trans->ctx,
3421 nft_trans_elem_set(trans),
3422 &nft_trans_elem(trans),
3423 NFT_MSG_NEWSETELEM, 0);
3424 nft_trans_destroy(trans);
3425 break;
3426 case NFT_MSG_DELSETELEM:
3427 nf_tables_setelem_notify(&trans->ctx,
3428 nft_trans_elem_set(trans),
3429 &nft_trans_elem(trans),
3430 NFT_MSG_DELSETELEM, 0);
3431 set = nft_trans_elem_set(trans);
3432 set->ops->get(set, &nft_trans_elem(trans));
3433 set->ops->remove(set, &nft_trans_elem(trans));
3434 nft_trans_destroy(trans);
3435 break;
3436 }
3437 }
3438
3439 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3440 list_del(&trans->list);
3441 trans->ctx.nla = NULL;
3442 call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
3443 }
3444
3445 return 0;
3446}
3447
3448/* Schedule objects for release via rcu to make sure no packets are accesing
3449 * aborted rules.
3450 */
3451static void nf_tables_abort_release_rcu(struct rcu_head *rt)
3452{
3453 struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
3454
3455 switch (trans->msg_type) {
3456 case NFT_MSG_NEWTABLE:
3457 nf_tables_table_destroy(&trans->ctx);
3458 break;
3459 case NFT_MSG_NEWCHAIN:
3460 nf_tables_chain_destroy(trans->ctx.chain);
3461 break;
3462 case NFT_MSG_NEWRULE:
3463 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
3464 break;
3465 case NFT_MSG_NEWSET:
3466 nft_set_destroy(nft_trans_set(trans));
3467 break;
3468 }
3469 kfree(trans);
3470}
3471
3472static int nf_tables_abort(struct sk_buff *skb)
3473{
3474 struct net *net = sock_net(skb->sk);
3475 struct nft_trans *trans, *next;
3476 struct nft_set *set;
3477
3478 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3479 switch (trans->msg_type) {
3480 case NFT_MSG_NEWTABLE:
3481 if (nft_trans_table_update(trans)) {
3482 if (nft_trans_table_enable(trans)) {
3483 nf_tables_table_disable(trans->ctx.afi,
3484 trans->ctx.table);
3485 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
3486 }
3487 nft_trans_destroy(trans);
3488 } else {
3489 list_del(&trans->ctx.table->list);
3490 }
3491 break;
3492 case NFT_MSG_DELTABLE:
3493 list_add_tail(&trans->ctx.table->list,
3494 &trans->ctx.afi->tables);
3495 nft_trans_destroy(trans);
3496 break;
3497 case NFT_MSG_NEWCHAIN:
3498 if (nft_trans_chain_update(trans)) {
3499 if (nft_trans_chain_stats(trans))
3500 free_percpu(nft_trans_chain_stats(trans));
3501
3502 nft_trans_destroy(trans);
3503 } else {
3504 trans->ctx.table->use--;
3505 list_del(&trans->ctx.chain->list);
3506 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3507 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3508 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
3509 trans->ctx.afi->nops);
3510 }
3511 }
3512 break;
3513 case NFT_MSG_DELCHAIN:
3514 trans->ctx.table->use++;
3515 list_add_tail(&trans->ctx.chain->list,
3516 &trans->ctx.table->chains);
3517 nft_trans_destroy(trans);
3518 break;
3519 case NFT_MSG_NEWRULE:
3520 trans->ctx.chain->use--;
3521 list_del_rcu(&nft_trans_rule(trans)->list);
3522 break;
3523 case NFT_MSG_DELRULE:
3524 trans->ctx.chain->use++;
3525 nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
3526 nft_trans_destroy(trans);
3527 break;
3528 case NFT_MSG_NEWSET:
3529 trans->ctx.table->use--;
3530 list_del(&nft_trans_set(trans)->list);
3531 break;
3532 case NFT_MSG_DELSET:
3533 trans->ctx.table->use++;
3534 list_add_tail(&nft_trans_set(trans)->list,
3535 &trans->ctx.table->sets);
3536 nft_trans_destroy(trans);
3537 break;
3538 case NFT_MSG_NEWSETELEM:
3539 nft_trans_elem_set(trans)->nelems--;
3540 set = nft_trans_elem_set(trans);
3541 set->ops->get(set, &nft_trans_elem(trans));
3542 set->ops->remove(set, &nft_trans_elem(trans));
3543 nft_trans_destroy(trans);
3544 break;
3545 case NFT_MSG_DELSETELEM:
3546 nft_trans_elem_set(trans)->nelems++;
3547 nft_trans_destroy(trans);
3548 break;
3549 }
3550 }
3551
3552 list_for_each_entry_safe_reverse(trans, next,
3553 &net->nft.commit_list, list) {
3554 list_del(&trans->list);
3555 trans->ctx.nla = NULL;
3556 call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu);
3557 }
3558
3559 return 0;
3560}
3561
2987static const struct nfnetlink_subsystem nf_tables_subsys = { 3562static const struct nfnetlink_subsystem nf_tables_subsys = {
2988 .name = "nf_tables", 3563 .name = "nf_tables",
2989 .subsys_id = NFNL_SUBSYS_NFTABLES, 3564 .subsys_id = NFNL_SUBSYS_NFTABLES,
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 23ef77c60fff..c138b8fbe280 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -399,19 +399,17 @@ static void nfnetlink_rcv(struct sk_buff *skb)
399} 399}
400 400
401#ifdef CONFIG_MODULES 401#ifdef CONFIG_MODULES
402static void nfnetlink_bind(int group) 402static int nfnetlink_bind(int group)
403{ 403{
404 const struct nfnetlink_subsystem *ss; 404 const struct nfnetlink_subsystem *ss;
405 int type = nfnl_group2type[group]; 405 int type = nfnl_group2type[group];
406 406
407 rcu_read_lock(); 407 rcu_read_lock();
408 ss = nfnetlink_get_subsys(type); 408 ss = nfnetlink_get_subsys(type);
409 if (!ss) {
410 rcu_read_unlock();
411 request_module("nfnetlink-subsys-%d", type);
412 return;
413 }
414 rcu_read_unlock(); 409 rcu_read_unlock();
410 if (!ss)
411 request_module("nfnetlink-subsys-%d", type);
412 return 0;
415} 413}
416#endif 414#endif
417 415
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c7b6d466a662..54af9853e2cd 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -32,18 +32,24 @@ static LIST_HEAD(nfnl_acct_list);
32struct nf_acct { 32struct nf_acct {
33 atomic64_t pkts; 33 atomic64_t pkts;
34 atomic64_t bytes; 34 atomic64_t bytes;
35 unsigned long flags;
35 struct list_head head; 36 struct list_head head;
36 atomic_t refcnt; 37 atomic_t refcnt;
37 char name[NFACCT_NAME_MAX]; 38 char name[NFACCT_NAME_MAX];
38 struct rcu_head rcu_head; 39 struct rcu_head rcu_head;
40 char data[0];
39}; 41};
40 42
43#define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
44
41static int 45static int
42nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, 46nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
43 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) 47 const struct nlmsghdr *nlh, const struct nlattr * const tb[])
44{ 48{
45 struct nf_acct *nfacct, *matching = NULL; 49 struct nf_acct *nfacct, *matching = NULL;
46 char *acct_name; 50 char *acct_name;
51 unsigned int size = 0;
52 u32 flags = 0;
47 53
48 if (!tb[NFACCT_NAME]) 54 if (!tb[NFACCT_NAME])
49 return -EINVAL; 55 return -EINVAL;
@@ -68,15 +74,38 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
68 /* reset counters if you request a replacement. */ 74 /* reset counters if you request a replacement. */
69 atomic64_set(&matching->pkts, 0); 75 atomic64_set(&matching->pkts, 0);
70 atomic64_set(&matching->bytes, 0); 76 atomic64_set(&matching->bytes, 0);
77 smp_mb__before_clear_bit();
78 /* reset overquota flag if quota is enabled. */
79 if ((matching->flags & NFACCT_F_QUOTA))
80 clear_bit(NFACCT_F_OVERQUOTA, &matching->flags);
71 return 0; 81 return 0;
72 } 82 }
73 return -EBUSY; 83 return -EBUSY;
74 } 84 }
75 85
76 nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL); 86 if (tb[NFACCT_FLAGS]) {
87 flags = ntohl(nla_get_be32(tb[NFACCT_FLAGS]));
88 if (flags & ~NFACCT_F_QUOTA)
89 return -EOPNOTSUPP;
90 if ((flags & NFACCT_F_QUOTA) == NFACCT_F_QUOTA)
91 return -EINVAL;
92 if (flags & NFACCT_F_OVERQUOTA)
93 return -EINVAL;
94
95 size += sizeof(u64);
96 }
97
98 nfacct = kzalloc(sizeof(struct nf_acct) + size, GFP_KERNEL);
77 if (nfacct == NULL) 99 if (nfacct == NULL)
78 return -ENOMEM; 100 return -ENOMEM;
79 101
102 if (flags & NFACCT_F_QUOTA) {
103 u64 *quota = (u64 *)nfacct->data;
104
105 *quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA]));
106 nfacct->flags = flags;
107 }
108
80 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); 109 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
81 110
82 if (tb[NFACCT_BYTES]) { 111 if (tb[NFACCT_BYTES]) {
@@ -117,6 +146,9 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
117 if (type == NFNL_MSG_ACCT_GET_CTRZERO) { 146 if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
118 pkts = atomic64_xchg(&acct->pkts, 0); 147 pkts = atomic64_xchg(&acct->pkts, 0);
119 bytes = atomic64_xchg(&acct->bytes, 0); 148 bytes = atomic64_xchg(&acct->bytes, 0);
149 smp_mb__before_clear_bit();
150 if (acct->flags & NFACCT_F_QUOTA)
151 clear_bit(NFACCT_F_OVERQUOTA, &acct->flags);
120 } else { 152 } else {
121 pkts = atomic64_read(&acct->pkts); 153 pkts = atomic64_read(&acct->pkts);
122 bytes = atomic64_read(&acct->bytes); 154 bytes = atomic64_read(&acct->bytes);
@@ -125,7 +157,13 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
125 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) || 157 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
126 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) 158 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
127 goto nla_put_failure; 159 goto nla_put_failure;
160 if (acct->flags & NFACCT_F_QUOTA) {
161 u64 *quota = (u64 *)acct->data;
128 162
163 if (nla_put_be32(skb, NFACCT_FLAGS, htonl(acct->flags)) ||
164 nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota)))
165 goto nla_put_failure;
166 }
129 nlmsg_end(skb, nlh); 167 nlmsg_end(skb, nlh);
130 return skb->len; 168 return skb->len;
131 169
@@ -270,6 +308,8 @@ static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
270 [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 }, 308 [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
271 [NFACCT_BYTES] = { .type = NLA_U64 }, 309 [NFACCT_BYTES] = { .type = NLA_U64 },
272 [NFACCT_PKTS] = { .type = NLA_U64 }, 310 [NFACCT_PKTS] = { .type = NLA_U64 },
311 [NFACCT_FLAGS] = { .type = NLA_U32 },
312 [NFACCT_QUOTA] = { .type = NLA_U64 },
273}; 313};
274 314
275static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = { 315static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
@@ -336,6 +376,50 @@ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
336} 376}
337EXPORT_SYMBOL_GPL(nfnl_acct_update); 377EXPORT_SYMBOL_GPL(nfnl_acct_update);
338 378
379static void nfnl_overquota_report(struct nf_acct *nfacct)
380{
381 int ret;
382 struct sk_buff *skb;
383
384 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
385 if (skb == NULL)
386 return;
387
388 ret = nfnl_acct_fill_info(skb, 0, 0, NFNL_MSG_ACCT_OVERQUOTA, 0,
389 nfacct);
390 if (ret <= 0) {
391 kfree_skb(skb);
392 return;
393 }
394 netlink_broadcast(init_net.nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
395 GFP_ATOMIC);
396}
397
398int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
399{
400 u64 now;
401 u64 *quota;
402 int ret = NFACCT_UNDERQUOTA;
403
404 /* no place here if we don't have a quota */
405 if (!(nfacct->flags & NFACCT_F_QUOTA))
406 return NFACCT_NO_QUOTA;
407
408 quota = (u64 *)nfacct->data;
409 now = (nfacct->flags & NFACCT_F_QUOTA_PKTS) ?
410 atomic64_read(&nfacct->pkts) : atomic64_read(&nfacct->bytes);
411
412 ret = now > *quota;
413
414 if (now >= *quota &&
415 !test_and_set_bit(NFACCT_F_OVERQUOTA, &nfacct->flags)) {
416 nfnl_overquota_report(nfacct);
417 }
418
419 return ret;
420}
421EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
422
339static int __init nfnl_acct_init(void) 423static int __init nfnl_acct_init(void)
340{ 424{
341 int ret; 425 int ret;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bd0d41e69341..cc5603016242 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -215,22 +215,14 @@ static void nft_ct_l3proto_module_put(uint8_t family)
215 nf_ct_l3proto_module_put(family); 215 nf_ct_l3proto_module_put(family);
216} 216}
217 217
218static int nft_ct_init_validate_get(const struct nft_expr *expr, 218static int nft_ct_get_init(const struct nft_ctx *ctx,
219 const struct nlattr * const tb[]) 219 const struct nft_expr *expr,
220 const struct nlattr * const tb[])
220{ 221{
221 struct nft_ct *priv = nft_expr_priv(expr); 222 struct nft_ct *priv = nft_expr_priv(expr);
223 int err;
222 224
223 if (tb[NFTA_CT_DIRECTION] != NULL) { 225 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
224 priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
225 switch (priv->dir) {
226 case IP_CT_DIR_ORIGINAL:
227 case IP_CT_DIR_REPLY:
228 break;
229 default:
230 return -EINVAL;
231 }
232 }
233
234 switch (priv->key) { 226 switch (priv->key) {
235 case NFT_CT_STATE: 227 case NFT_CT_STATE:
236 case NFT_CT_DIRECTION: 228 case NFT_CT_DIRECTION:
@@ -262,55 +254,55 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
262 return -EOPNOTSUPP; 254 return -EOPNOTSUPP;
263 } 255 }
264 256
265 return 0; 257 if (tb[NFTA_CT_DIRECTION] != NULL) {
266} 258 priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
267 259 switch (priv->dir) {
268static int nft_ct_init_validate_set(uint32_t key) 260 case IP_CT_DIR_ORIGINAL:
269{ 261 case IP_CT_DIR_REPLY:
270 switch (key) { 262 break;
271 case NFT_CT_MARK: 263 default:
272 break; 264 return -EINVAL;
273 default: 265 }
274 return -EOPNOTSUPP;
275 } 266 }
276 267
268 priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
269 err = nft_validate_output_register(priv->dreg);
270 if (err < 0)
271 return err;
272
273 err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
274 if (err < 0)
275 return err;
276
277 err = nft_ct_l3proto_try_module_get(ctx->afi->family);
278 if (err < 0)
279 return err;
280
277 return 0; 281 return 0;
278} 282}
279 283
280static int nft_ct_init(const struct nft_ctx *ctx, 284static int nft_ct_set_init(const struct nft_ctx *ctx,
281 const struct nft_expr *expr, 285 const struct nft_expr *expr,
282 const struct nlattr * const tb[]) 286 const struct nlattr * const tb[])
283{ 287{
284 struct nft_ct *priv = nft_expr_priv(expr); 288 struct nft_ct *priv = nft_expr_priv(expr);
285 int err; 289 int err;
286 290
287 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); 291 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
288 292 switch (priv->key) {
289 if (tb[NFTA_CT_DREG]) { 293#ifdef CONFIG_NF_CONNTRACK_MARK
290 err = nft_ct_init_validate_get(expr, tb); 294 case NFT_CT_MARK:
291 if (err < 0) 295 break;
292 return err; 296#endif
293 297 default:
294 priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG])); 298 return -EOPNOTSUPP;
295 err = nft_validate_output_register(priv->dreg);
296 if (err < 0)
297 return err;
298
299 err = nft_validate_data_load(ctx, priv->dreg, NULL,
300 NFT_DATA_VALUE);
301 if (err < 0)
302 return err;
303 } else {
304 err = nft_ct_init_validate_set(priv->key);
305 if (err < 0)
306 return err;
307
308 priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
309 err = nft_validate_input_register(priv->sreg);
310 if (err < 0)
311 return err;
312 } 299 }
313 300
301 priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
302 err = nft_validate_input_register(priv->sreg);
303 if (err < 0)
304 return err;
305
314 err = nft_ct_l3proto_try_module_get(ctx->afi->family); 306 err = nft_ct_l3proto_try_module_get(ctx->afi->family);
315 if (err < 0) 307 if (err < 0)
316 return err; 308 return err;
@@ -370,7 +362,7 @@ static const struct nft_expr_ops nft_ct_get_ops = {
370 .type = &nft_ct_type, 362 .type = &nft_ct_type,
371 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), 363 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
372 .eval = nft_ct_get_eval, 364 .eval = nft_ct_get_eval,
373 .init = nft_ct_init, 365 .init = nft_ct_get_init,
374 .destroy = nft_ct_destroy, 366 .destroy = nft_ct_destroy,
375 .dump = nft_ct_get_dump, 367 .dump = nft_ct_get_dump,
376}; 368};
@@ -379,7 +371,7 @@ static const struct nft_expr_ops nft_ct_set_ops = {
379 .type = &nft_ct_type, 371 .type = &nft_ct_type,
380 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), 372 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
381 .eval = nft_ct_set_eval, 373 .eval = nft_ct_set_eval,
382 .init = nft_ct_init, 374 .init = nft_ct_set_init,
383 .destroy = nft_ct_destroy, 375 .destroy = nft_ct_destroy,
384 .dump = nft_ct_set_dump, 376 .dump = nft_ct_set_dump,
385}; 377};
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 3b1ad876d6b0..4080ed6a072b 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/log2.h>
15#include <linux/jhash.h> 16#include <linux/jhash.h>
16#include <linux/netlink.h> 17#include <linux/netlink.h>
17#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
@@ -19,7 +20,7 @@
19#include <linux/netfilter/nf_tables.h> 20#include <linux/netfilter/nf_tables.h>
20#include <net/netfilter/nf_tables.h> 21#include <net/netfilter/nf_tables.h>
21 22
22#define NFT_HASH_MIN_SIZE 4 23#define NFT_HASH_MIN_SIZE 4UL
23 24
24struct nft_hash { 25struct nft_hash {
25 struct nft_hash_table __rcu *tbl; 26 struct nft_hash_table __rcu *tbl;
@@ -27,7 +28,6 @@ struct nft_hash {
27 28
28struct nft_hash_table { 29struct nft_hash_table {
29 unsigned int size; 30 unsigned int size;
30 unsigned int elements;
31 struct nft_hash_elem __rcu *buckets[]; 31 struct nft_hash_elem __rcu *buckets[];
32}; 32};
33 33
@@ -76,10 +76,12 @@ static bool nft_hash_lookup(const struct nft_set *set,
76 76
77static void nft_hash_tbl_free(const struct nft_hash_table *tbl) 77static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
78{ 78{
79 if (is_vmalloc_addr(tbl)) 79 kvfree(tbl);
80 vfree(tbl); 80}
81 else 81
82 kfree(tbl); 82static unsigned int nft_hash_tbl_size(unsigned int nelem)
83{
84 return max(roundup_pow_of_two(nelem * 4 / 3), NFT_HASH_MIN_SIZE);
83} 85}
84 86
85static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets) 87static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
@@ -161,7 +163,6 @@ static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
161 break; 163 break;
162 } 164 }
163 } 165 }
164 ntbl->elements = tbl->elements;
165 166
166 /* Publish new table */ 167 /* Publish new table */
167 rcu_assign_pointer(priv->tbl, ntbl); 168 rcu_assign_pointer(priv->tbl, ntbl);
@@ -201,7 +202,6 @@ static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
201 ; 202 ;
202 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); 203 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
203 } 204 }
204 ntbl->elements = tbl->elements;
205 205
206 /* Publish new table */ 206 /* Publish new table */
207 rcu_assign_pointer(priv->tbl, ntbl); 207 rcu_assign_pointer(priv->tbl, ntbl);
@@ -237,10 +237,9 @@ static int nft_hash_insert(const struct nft_set *set,
237 h = nft_hash_data(&he->key, tbl->size, set->klen); 237 h = nft_hash_data(&he->key, tbl->size, set->klen);
238 RCU_INIT_POINTER(he->next, tbl->buckets[h]); 238 RCU_INIT_POINTER(he->next, tbl->buckets[h]);
239 rcu_assign_pointer(tbl->buckets[h], he); 239 rcu_assign_pointer(tbl->buckets[h], he);
240 tbl->elements++;
241 240
242 /* Expand table when exceeding 75% load */ 241 /* Expand table when exceeding 75% load */
243 if (tbl->elements > tbl->size / 4 * 3) 242 if (set->nelems + 1 > tbl->size / 4 * 3)
244 nft_hash_tbl_expand(set, priv); 243 nft_hash_tbl_expand(set, priv);
245 244
246 return 0; 245 return 0;
@@ -268,10 +267,9 @@ static void nft_hash_remove(const struct nft_set *set,
268 RCU_INIT_POINTER(*pprev, he->next); 267 RCU_INIT_POINTER(*pprev, he->next);
269 synchronize_rcu(); 268 synchronize_rcu();
270 kfree(he); 269 kfree(he);
271 tbl->elements--;
272 270
273 /* Shrink table beneath 30% load */ 271 /* Shrink table beneath 30% load */
274 if (tbl->elements < tbl->size * 3 / 10 && 272 if (set->nelems - 1 < tbl->size * 3 / 10 &&
275 tbl->size > NFT_HASH_MIN_SIZE) 273 tbl->size > NFT_HASH_MIN_SIZE)
276 nft_hash_tbl_shrink(set, priv); 274 nft_hash_tbl_shrink(set, priv);
277} 275}
@@ -335,17 +333,23 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
335} 333}
336 334
337static int nft_hash_init(const struct nft_set *set, 335static int nft_hash_init(const struct nft_set *set,
336 const struct nft_set_desc *desc,
338 const struct nlattr * const tb[]) 337 const struct nlattr * const tb[])
339{ 338{
340 struct nft_hash *priv = nft_set_priv(set); 339 struct nft_hash *priv = nft_set_priv(set);
341 struct nft_hash_table *tbl; 340 struct nft_hash_table *tbl;
341 unsigned int size;
342 342
343 if (unlikely(!nft_hash_rnd_initted)) { 343 if (unlikely(!nft_hash_rnd_initted)) {
344 get_random_bytes(&nft_hash_rnd, 4); 344 get_random_bytes(&nft_hash_rnd, 4);
345 nft_hash_rnd_initted = true; 345 nft_hash_rnd_initted = true;
346 } 346 }
347 347
348 tbl = nft_hash_tbl_alloc(NFT_HASH_MIN_SIZE); 348 size = NFT_HASH_MIN_SIZE;
349 if (desc->size)
350 size = nft_hash_tbl_size(desc->size);
351
352 tbl = nft_hash_tbl_alloc(size);
349 if (tbl == NULL) 353 if (tbl == NULL)
350 return -ENOMEM; 354 return -ENOMEM;
351 RCU_INIT_POINTER(priv->tbl, tbl); 355 RCU_INIT_POINTER(priv->tbl, tbl);
@@ -369,8 +373,37 @@ static void nft_hash_destroy(const struct nft_set *set)
369 kfree(tbl); 373 kfree(tbl);
370} 374}
371 375
376static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
377 struct nft_set_estimate *est)
378{
379 unsigned int esize;
380
381 esize = sizeof(struct nft_hash_elem);
382 if (features & NFT_SET_MAP)
383 esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
384
385 if (desc->size) {
386 est->size = sizeof(struct nft_hash) +
387 nft_hash_tbl_size(desc->size) *
388 sizeof(struct nft_hash_elem *) +
389 desc->size * esize;
390 } else {
391 /* Resizing happens when the load drops below 30% or goes
392 * above 75%. The average of 52.5% load (approximated by 50%)
393 * is used for the size estimation of the hash buckets,
394 * meaning we calculate two buckets per element.
395 */
396 est->size = esize + 2 * sizeof(struct nft_hash_elem *);
397 }
398
399 est->class = NFT_SET_CLASS_O_1;
400
401 return true;
402}
403
372static struct nft_set_ops nft_hash_ops __read_mostly = { 404static struct nft_set_ops nft_hash_ops __read_mostly = {
373 .privsize = nft_hash_privsize, 405 .privsize = nft_hash_privsize,
406 .estimate = nft_hash_estimate,
374 .init = nft_hash_init, 407 .init = nft_hash_init,
375 .destroy = nft_hash_destroy, 408 .destroy = nft_hash_destroy,
376 .get = nft_hash_get, 409 .get = nft_hash_get,
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 7fd2bea8aa23..6404a726d17b 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -56,8 +56,14 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
56 return -EINVAL; 56 return -EINVAL;
57 57
58 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]); 58 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
59 if (IS_ERR(set)) 59 if (IS_ERR(set)) {
60 return PTR_ERR(set); 60 if (tb[NFTA_LOOKUP_SET_ID]) {
61 set = nf_tables_set_lookup_byid(ctx->net,
62 tb[NFTA_LOOKUP_SET_ID]);
63 }
64 if (IS_ERR(set))
65 return PTR_ERR(set);
66 }
61 67
62 priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG])); 68 priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
63 err = nft_validate_input_register(priv->sreg); 69 err = nft_validate_input_register(priv->sreg);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 425cf39af890..852b178c6ae7 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -18,18 +18,11 @@
18#include <net/sock.h> 18#include <net/sock.h>
19#include <net/tcp_states.h> /* for TCP_TIME_WAIT */ 19#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
20#include <net/netfilter/nf_tables.h> 20#include <net/netfilter/nf_tables.h>
21#include <net/netfilter/nft_meta.h>
21 22
22struct nft_meta { 23void nft_meta_get_eval(const struct nft_expr *expr,
23 enum nft_meta_keys key:8; 24 struct nft_data data[NFT_REG_MAX + 1],
24 union { 25 const struct nft_pktinfo *pkt)
25 enum nft_registers dreg:8;
26 enum nft_registers sreg:8;
27 };
28};
29
30static void nft_meta_get_eval(const struct nft_expr *expr,
31 struct nft_data data[NFT_REG_MAX + 1],
32 const struct nft_pktinfo *pkt)
33{ 26{
34 const struct nft_meta *priv = nft_expr_priv(expr); 27 const struct nft_meta *priv = nft_expr_priv(expr);
35 const struct sk_buff *skb = pkt->skb; 28 const struct sk_buff *skb = pkt->skb;
@@ -140,10 +133,11 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
140err: 133err:
141 data[NFT_REG_VERDICT].verdict = NFT_BREAK; 134 data[NFT_REG_VERDICT].verdict = NFT_BREAK;
142} 135}
136EXPORT_SYMBOL_GPL(nft_meta_get_eval);
143 137
144static void nft_meta_set_eval(const struct nft_expr *expr, 138void nft_meta_set_eval(const struct nft_expr *expr,
145 struct nft_data data[NFT_REG_MAX + 1], 139 struct nft_data data[NFT_REG_MAX + 1],
146 const struct nft_pktinfo *pkt) 140 const struct nft_pktinfo *pkt)
147{ 141{
148 const struct nft_meta *meta = nft_expr_priv(expr); 142 const struct nft_meta *meta = nft_expr_priv(expr);
149 struct sk_buff *skb = pkt->skb; 143 struct sk_buff *skb = pkt->skb;
@@ -163,28 +157,24 @@ static void nft_meta_set_eval(const struct nft_expr *expr,
163 WARN_ON(1); 157 WARN_ON(1);
164 } 158 }
165} 159}
160EXPORT_SYMBOL_GPL(nft_meta_set_eval);
166 161
167static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = { 162const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
168 [NFTA_META_DREG] = { .type = NLA_U32 }, 163 [NFTA_META_DREG] = { .type = NLA_U32 },
169 [NFTA_META_KEY] = { .type = NLA_U32 }, 164 [NFTA_META_KEY] = { .type = NLA_U32 },
170 [NFTA_META_SREG] = { .type = NLA_U32 }, 165 [NFTA_META_SREG] = { .type = NLA_U32 },
171}; 166};
167EXPORT_SYMBOL_GPL(nft_meta_policy);
172 168
173static int nft_meta_init_validate_set(uint32_t key) 169int nft_meta_get_init(const struct nft_ctx *ctx,
170 const struct nft_expr *expr,
171 const struct nlattr * const tb[])
174{ 172{
175 switch (key) { 173 struct nft_meta *priv = nft_expr_priv(expr);
176 case NFT_META_MARK: 174 int err;
177 case NFT_META_PRIORITY:
178 case NFT_META_NFTRACE:
179 return 0;
180 default:
181 return -EOPNOTSUPP;
182 }
183}
184 175
185static int nft_meta_init_validate_get(uint32_t key) 176 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
186{ 177 switch (priv->key) {
187 switch (key) {
188 case NFT_META_LEN: 178 case NFT_META_LEN:
189 case NFT_META_PROTOCOL: 179 case NFT_META_PROTOCOL:
190 case NFT_META_NFPROTO: 180 case NFT_META_NFPROTO:
@@ -205,39 +195,41 @@ static int nft_meta_init_validate_get(uint32_t key)
205#ifdef CONFIG_NETWORK_SECMARK 195#ifdef CONFIG_NETWORK_SECMARK
206 case NFT_META_SECMARK: 196 case NFT_META_SECMARK:
207#endif 197#endif
208 return 0; 198 break;
209 default: 199 default:
210 return -EOPNOTSUPP; 200 return -EOPNOTSUPP;
211 } 201 }
212 202
203 priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
204 err = nft_validate_output_register(priv->dreg);
205 if (err < 0)
206 return err;
207
208 err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
209 if (err < 0)
210 return err;
211
212 return 0;
213} 213}
214EXPORT_SYMBOL_GPL(nft_meta_get_init);
214 215
215static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 216int nft_meta_set_init(const struct nft_ctx *ctx,
216 const struct nlattr * const tb[]) 217 const struct nft_expr *expr,
218 const struct nlattr * const tb[])
217{ 219{
218 struct nft_meta *priv = nft_expr_priv(expr); 220 struct nft_meta *priv = nft_expr_priv(expr);
219 int err; 221 int err;
220 222
221 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); 223 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
222 224 switch (priv->key) {
223 if (tb[NFTA_META_DREG]) { 225 case NFT_META_MARK:
224 err = nft_meta_init_validate_get(priv->key); 226 case NFT_META_PRIORITY:
225 if (err < 0) 227 case NFT_META_NFTRACE:
226 return err; 228 break;
227 229 default:
228 priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG])); 230 return -EOPNOTSUPP;
229 err = nft_validate_output_register(priv->dreg);
230 if (err < 0)
231 return err;
232
233 return nft_validate_data_load(ctx, priv->dreg, NULL,
234 NFT_DATA_VALUE);
235 } 231 }
236 232
237 err = nft_meta_init_validate_set(priv->key);
238 if (err < 0)
239 return err;
240
241 priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG])); 233 priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
242 err = nft_validate_input_register(priv->sreg); 234 err = nft_validate_input_register(priv->sreg);
243 if (err < 0) 235 if (err < 0)
@@ -245,9 +237,10 @@ static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
245 237
246 return 0; 238 return 0;
247} 239}
240EXPORT_SYMBOL_GPL(nft_meta_set_init);
248 241
249static int nft_meta_get_dump(struct sk_buff *skb, 242int nft_meta_get_dump(struct sk_buff *skb,
250 const struct nft_expr *expr) 243 const struct nft_expr *expr)
251{ 244{
252 const struct nft_meta *priv = nft_expr_priv(expr); 245 const struct nft_meta *priv = nft_expr_priv(expr);
253 246
@@ -260,9 +253,10 @@ static int nft_meta_get_dump(struct sk_buff *skb,
260nla_put_failure: 253nla_put_failure:
261 return -1; 254 return -1;
262} 255}
256EXPORT_SYMBOL_GPL(nft_meta_get_dump);
263 257
264static int nft_meta_set_dump(struct sk_buff *skb, 258int nft_meta_set_dump(struct sk_buff *skb,
265 const struct nft_expr *expr) 259 const struct nft_expr *expr)
266{ 260{
267 const struct nft_meta *priv = nft_expr_priv(expr); 261 const struct nft_meta *priv = nft_expr_priv(expr);
268 262
@@ -276,13 +270,14 @@ static int nft_meta_set_dump(struct sk_buff *skb,
276nla_put_failure: 270nla_put_failure:
277 return -1; 271 return -1;
278} 272}
273EXPORT_SYMBOL_GPL(nft_meta_set_dump);
279 274
280static struct nft_expr_type nft_meta_type; 275static struct nft_expr_type nft_meta_type;
281static const struct nft_expr_ops nft_meta_get_ops = { 276static const struct nft_expr_ops nft_meta_get_ops = {
282 .type = &nft_meta_type, 277 .type = &nft_meta_type,
283 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), 278 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
284 .eval = nft_meta_get_eval, 279 .eval = nft_meta_get_eval,
285 .init = nft_meta_init, 280 .init = nft_meta_get_init,
286 .dump = nft_meta_get_dump, 281 .dump = nft_meta_get_dump,
287}; 282};
288 283
@@ -290,7 +285,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
290 .type = &nft_meta_type, 285 .type = &nft_meta_type,
291 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), 286 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
292 .eval = nft_meta_set_eval, 287 .eval = nft_meta_set_eval,
293 .init = nft_meta_init, 288 .init = nft_meta_set_init,
294 .dump = nft_meta_set_dump, 289 .dump = nft_meta_set_dump,
295}; 290};
296 291
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index e21d69d13506..e1836ff88199 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -18,6 +18,8 @@
18#include <linux/netfilter/nf_tables.h> 18#include <linux/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables.h> 19#include <net/netfilter/nf_tables.h>
20 20
21static DEFINE_SPINLOCK(nft_rbtree_lock);
22
21struct nft_rbtree { 23struct nft_rbtree {
22 struct rb_root root; 24 struct rb_root root;
23}; 25};
@@ -38,6 +40,7 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
38 const struct rb_node *parent = priv->root.rb_node; 40 const struct rb_node *parent = priv->root.rb_node;
39 int d; 41 int d;
40 42
43 spin_lock_bh(&nft_rbtree_lock);
41 while (parent != NULL) { 44 while (parent != NULL) {
42 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 45 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
43 46
@@ -53,6 +56,8 @@ found:
53 goto out; 56 goto out;
54 if (set->flags & NFT_SET_MAP) 57 if (set->flags & NFT_SET_MAP)
55 nft_data_copy(data, rbe->data); 58 nft_data_copy(data, rbe->data);
59
60 spin_unlock_bh(&nft_rbtree_lock);
56 return true; 61 return true;
57 } 62 }
58 } 63 }
@@ -62,6 +67,7 @@ found:
62 goto found; 67 goto found;
63 } 68 }
64out: 69out:
70 spin_unlock_bh(&nft_rbtree_lock);
65 return false; 71 return false;
66} 72}
67 73
@@ -124,9 +130,12 @@ static int nft_rbtree_insert(const struct nft_set *set,
124 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) 130 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
125 nft_data_copy(rbe->data, &elem->data); 131 nft_data_copy(rbe->data, &elem->data);
126 132
133 spin_lock_bh(&nft_rbtree_lock);
127 err = __nft_rbtree_insert(set, rbe); 134 err = __nft_rbtree_insert(set, rbe);
128 if (err < 0) 135 if (err < 0)
129 kfree(rbe); 136 kfree(rbe);
137
138 spin_unlock_bh(&nft_rbtree_lock);
130 return err; 139 return err;
131} 140}
132 141
@@ -136,7 +145,9 @@ static void nft_rbtree_remove(const struct nft_set *set,
136 struct nft_rbtree *priv = nft_set_priv(set); 145 struct nft_rbtree *priv = nft_set_priv(set);
137 struct nft_rbtree_elem *rbe = elem->cookie; 146 struct nft_rbtree_elem *rbe = elem->cookie;
138 147
148 spin_lock_bh(&nft_rbtree_lock);
139 rb_erase(&rbe->node, &priv->root); 149 rb_erase(&rbe->node, &priv->root);
150 spin_unlock_bh(&nft_rbtree_lock);
140 kfree(rbe); 151 kfree(rbe);
141} 152}
142 153
@@ -147,6 +158,7 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
147 struct nft_rbtree_elem *rbe; 158 struct nft_rbtree_elem *rbe;
148 int d; 159 int d;
149 160
161 spin_lock_bh(&nft_rbtree_lock);
150 while (parent != NULL) { 162 while (parent != NULL) {
151 rbe = rb_entry(parent, struct nft_rbtree_elem, node); 163 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
152 164
@@ -161,9 +173,11 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
161 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) 173 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
162 nft_data_copy(&elem->data, rbe->data); 174 nft_data_copy(&elem->data, rbe->data);
163 elem->flags = rbe->flags; 175 elem->flags = rbe->flags;
176 spin_unlock_bh(&nft_rbtree_lock);
164 return 0; 177 return 0;
165 } 178 }
166 } 179 }
180 spin_unlock_bh(&nft_rbtree_lock);
167 return -ENOENT; 181 return -ENOENT;
168} 182}
169 183
@@ -176,6 +190,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
176 struct nft_set_elem elem; 190 struct nft_set_elem elem;
177 struct rb_node *node; 191 struct rb_node *node;
178 192
193 spin_lock_bh(&nft_rbtree_lock);
179 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { 194 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
180 if (iter->count < iter->skip) 195 if (iter->count < iter->skip)
181 goto cont; 196 goto cont;
@@ -188,11 +203,14 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
188 elem.flags = rbe->flags; 203 elem.flags = rbe->flags;
189 204
190 iter->err = iter->fn(ctx, set, iter, &elem); 205 iter->err = iter->fn(ctx, set, iter, &elem);
191 if (iter->err < 0) 206 if (iter->err < 0) {
207 spin_unlock_bh(&nft_rbtree_lock);
192 return; 208 return;
209 }
193cont: 210cont:
194 iter->count++; 211 iter->count++;
195 } 212 }
213 spin_unlock_bh(&nft_rbtree_lock);
196} 214}
197 215
198static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) 216static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
@@ -201,6 +219,7 @@ static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
201} 219}
202 220
203static int nft_rbtree_init(const struct nft_set *set, 221static int nft_rbtree_init(const struct nft_set *set,
222 const struct nft_set_desc *desc,
204 const struct nlattr * const nla[]) 223 const struct nlattr * const nla[])
205{ 224{
206 struct nft_rbtree *priv = nft_set_priv(set); 225 struct nft_rbtree *priv = nft_set_priv(set);
@@ -215,15 +234,37 @@ static void nft_rbtree_destroy(const struct nft_set *set)
215 struct nft_rbtree_elem *rbe; 234 struct nft_rbtree_elem *rbe;
216 struct rb_node *node; 235 struct rb_node *node;
217 236
237 spin_lock_bh(&nft_rbtree_lock);
218 while ((node = priv->root.rb_node) != NULL) { 238 while ((node = priv->root.rb_node) != NULL) {
219 rb_erase(node, &priv->root); 239 rb_erase(node, &priv->root);
220 rbe = rb_entry(node, struct nft_rbtree_elem, node); 240 rbe = rb_entry(node, struct nft_rbtree_elem, node);
221 nft_rbtree_elem_destroy(set, rbe); 241 nft_rbtree_elem_destroy(set, rbe);
222 } 242 }
243 spin_unlock_bh(&nft_rbtree_lock);
244}
245
246static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
247 struct nft_set_estimate *est)
248{
249 unsigned int nsize;
250
251 nsize = sizeof(struct nft_rbtree_elem);
252 if (features & NFT_SET_MAP)
253 nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
254
255 if (desc->size)
256 est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
257 else
258 est->size = nsize;
259
260 est->class = NFT_SET_CLASS_O_LOG_N;
261
262 return true;
223} 263}
224 264
225static struct nft_set_ops nft_rbtree_ops __read_mostly = { 265static struct nft_set_ops nft_rbtree_ops __read_mostly = {
226 .privsize = nft_rbtree_privsize, 266 .privsize = nft_rbtree_privsize,
267 .estimate = nft_rbtree_estimate,
227 .init = nft_rbtree_init, 268 .init = nft_rbtree_init,
228 .destroy = nft_rbtree_destroy, 269 .destroy = nft_rbtree_destroy,
229 .insert = nft_rbtree_insert, 270 .insert = nft_rbtree_insert,
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 12d4da8e6c77..bbffdbdaf603 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -23,10 +23,11 @@ MODULE_ALIAS("ip6t_bpf");
23static int bpf_mt_check(const struct xt_mtchk_param *par) 23static int bpf_mt_check(const struct xt_mtchk_param *par)
24{ 24{
25 struct xt_bpf_info *info = par->matchinfo; 25 struct xt_bpf_info *info = par->matchinfo;
26 struct sock_fprog program; 26 struct sock_fprog_kern program;
27 27
28 program.len = info->bpf_program_num_elem; 28 program.len = info->bpf_program_num_elem;
29 program.filter = (struct sock_filter __user *) info->bpf_program; 29 program.filter = info->bpf_program;
30
30 if (sk_unattached_filter_create(&info->filter, &program)) { 31 if (sk_unattached_filter_create(&info->filter, &program)) {
31 pr_info("bpf: check failed: parse error\n"); 32 pr_info("bpf: check failed: parse error\n");
32 return -EINVAL; 33 return -EINVAL;
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index b3be0ef21f19..8c646ed9c921 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -21,11 +21,14 @@ MODULE_ALIAS("ip6t_nfacct");
21 21
22static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par) 22static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
23{ 23{
24 int overquota;
24 const struct xt_nfacct_match_info *info = par->targinfo; 25 const struct xt_nfacct_match_info *info = par->targinfo;
25 26
26 nfnl_acct_update(skb, info->nfacct); 27 nfnl_acct_update(skb, info->nfacct);
27 28
28 return true; 29 overquota = nfnl_acct_overquota(skb, info->nfacct);
30
31 return overquota == NFACCT_UNDERQUOTA ? false : true;
29} 32}
30 33
31static int 34static int
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 1e657cf715c4..a9faae89f955 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -313,10 +313,7 @@ out:
313 313
314static void recent_table_free(void *addr) 314static void recent_table_free(void *addr)
315{ 315{
316 if (is_vmalloc_addr(addr)) 316 kvfree(addr);
317 vfree(addr);
318 else
319 kfree(addr);
320} 317}
321 318
322static int recent_mt_check(const struct xt_mtchk_param *par, 319static int recent_mt_check(const struct xt_mtchk_param *par,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f22757a29cd0..15c731f03fa6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1206,7 +1206,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1206 struct module *module = NULL; 1206 struct module *module = NULL;
1207 struct mutex *cb_mutex; 1207 struct mutex *cb_mutex;
1208 struct netlink_sock *nlk; 1208 struct netlink_sock *nlk;
1209 void (*bind)(int group); 1209 int (*bind)(int group);
1210 void (*unbind)(int group);
1210 int err = 0; 1211 int err = 0;
1211 1212
1212 sock->state = SS_UNCONNECTED; 1213 sock->state = SS_UNCONNECTED;
@@ -1232,6 +1233,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1232 err = -EPROTONOSUPPORT; 1233 err = -EPROTONOSUPPORT;
1233 cb_mutex = nl_table[protocol].cb_mutex; 1234 cb_mutex = nl_table[protocol].cb_mutex;
1234 bind = nl_table[protocol].bind; 1235 bind = nl_table[protocol].bind;
1236 unbind = nl_table[protocol].unbind;
1235 netlink_unlock_table(); 1237 netlink_unlock_table();
1236 1238
1237 if (err < 0) 1239 if (err < 0)
@@ -1248,6 +1250,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1248 nlk = nlk_sk(sock->sk); 1250 nlk = nlk_sk(sock->sk);
1249 nlk->module = module; 1251 nlk->module = module;
1250 nlk->netlink_bind = bind; 1252 nlk->netlink_bind = bind;
1253 nlk->netlink_unbind = unbind;
1251out: 1254out:
1252 return err; 1255 return err;
1253 1256
@@ -1301,6 +1304,7 @@ static int netlink_release(struct socket *sock)
1301 kfree_rcu(old, rcu); 1304 kfree_rcu(old, rcu);
1302 nl_table[sk->sk_protocol].module = NULL; 1305 nl_table[sk->sk_protocol].module = NULL;
1303 nl_table[sk->sk_protocol].bind = NULL; 1306 nl_table[sk->sk_protocol].bind = NULL;
1307 nl_table[sk->sk_protocol].unbind = NULL;
1304 nl_table[sk->sk_protocol].flags = 0; 1308 nl_table[sk->sk_protocol].flags = 0;
1305 nl_table[sk->sk_protocol].registered = 0; 1309 nl_table[sk->sk_protocol].registered = 0;
1306 } 1310 }
@@ -1478,6 +1482,19 @@ static int netlink_realloc_groups(struct sock *sk)
1478 return err; 1482 return err;
1479} 1483}
1480 1484
1485static void netlink_unbind(int group, long unsigned int groups,
1486 struct netlink_sock *nlk)
1487{
1488 int undo;
1489
1490 if (!nlk->netlink_unbind)
1491 return;
1492
1493 for (undo = 0; undo < group; undo++)
1494 if (test_bit(group, &groups))
1495 nlk->netlink_unbind(undo);
1496}
1497
1481static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1498static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1482 int addr_len) 1499 int addr_len)
1483{ 1500{
@@ -1486,6 +1503,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1486 struct netlink_sock *nlk = nlk_sk(sk); 1503 struct netlink_sock *nlk = nlk_sk(sk);
1487 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1504 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1488 int err; 1505 int err;
1506 long unsigned int groups = nladdr->nl_groups;
1489 1507
1490 if (addr_len < sizeof(struct sockaddr_nl)) 1508 if (addr_len < sizeof(struct sockaddr_nl))
1491 return -EINVAL; 1509 return -EINVAL;
@@ -1494,7 +1512,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1494 return -EINVAL; 1512 return -EINVAL;
1495 1513
1496 /* Only superuser is allowed to listen multicasts */ 1514 /* Only superuser is allowed to listen multicasts */
1497 if (nladdr->nl_groups) { 1515 if (groups) {
1498 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 1516 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1499 return -EPERM; 1517 return -EPERM;
1500 err = netlink_realloc_groups(sk); 1518 err = netlink_realloc_groups(sk);
@@ -1502,37 +1520,45 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1502 return err; 1520 return err;
1503 } 1521 }
1504 1522
1505 if (nlk->portid) { 1523 if (nlk->portid)
1506 if (nladdr->nl_pid != nlk->portid) 1524 if (nladdr->nl_pid != nlk->portid)
1507 return -EINVAL; 1525 return -EINVAL;
1508 } else { 1526
1527 if (nlk->netlink_bind && groups) {
1528 int group;
1529
1530 for (group = 0; group < nlk->ngroups; group++) {
1531 if (!test_bit(group, &groups))
1532 continue;
1533 err = nlk->netlink_bind(group);
1534 if (!err)
1535 continue;
1536 netlink_unbind(group, groups, nlk);
1537 return err;
1538 }
1539 }
1540
1541 if (!nlk->portid) {
1509 err = nladdr->nl_pid ? 1542 err = nladdr->nl_pid ?
1510 netlink_insert(sk, net, nladdr->nl_pid) : 1543 netlink_insert(sk, net, nladdr->nl_pid) :
1511 netlink_autobind(sock); 1544 netlink_autobind(sock);
1512 if (err) 1545 if (err) {
1546 netlink_unbind(nlk->ngroups - 1, groups, nlk);
1513 return err; 1547 return err;
1548 }
1514 } 1549 }
1515 1550
1516 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) 1551 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1517 return 0; 1552 return 0;
1518 1553
1519 netlink_table_grab(); 1554 netlink_table_grab();
1520 netlink_update_subscriptions(sk, nlk->subscriptions + 1555 netlink_update_subscriptions(sk, nlk->subscriptions +
1521 hweight32(nladdr->nl_groups) - 1556 hweight32(groups) -
1522 hweight32(nlk->groups[0])); 1557 hweight32(nlk->groups[0]));
1523 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; 1558 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1524 netlink_update_listeners(sk); 1559 netlink_update_listeners(sk);
1525 netlink_table_ungrab(); 1560 netlink_table_ungrab();
1526 1561
1527 if (nlk->netlink_bind && nlk->groups[0]) {
1528 int i;
1529
1530 for (i = 0; i < nlk->ngroups; i++) {
1531 if (test_bit(i, nlk->groups))
1532 nlk->netlink_bind(i);
1533 }
1534 }
1535
1536 return 0; 1562 return 0;
1537} 1563}
1538 1564
@@ -2170,13 +2196,17 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2170 return err; 2196 return err;
2171 if (!val || val - 1 >= nlk->ngroups) 2197 if (!val || val - 1 >= nlk->ngroups)
2172 return -EINVAL; 2198 return -EINVAL;
2199 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2200 err = nlk->netlink_bind(val);
2201 if (err)
2202 return err;
2203 }
2173 netlink_table_grab(); 2204 netlink_table_grab();
2174 netlink_update_socket_mc(nlk, val, 2205 netlink_update_socket_mc(nlk, val,
2175 optname == NETLINK_ADD_MEMBERSHIP); 2206 optname == NETLINK_ADD_MEMBERSHIP);
2176 netlink_table_ungrab(); 2207 netlink_table_ungrab();
2177 2208 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2178 if (nlk->netlink_bind) 2209 nlk->netlink_unbind(val);
2179 nlk->netlink_bind(val);
2180 2210
2181 err = 0; 2211 err = 0;
2182 break; 2212 break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index ed13a790b00e..0b59d441f5b6 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -38,7 +38,8 @@ struct netlink_sock {
38 struct mutex *cb_mutex; 38 struct mutex *cb_mutex;
39 struct mutex cb_def_mutex; 39 struct mutex cb_def_mutex;
40 void (*netlink_rcv)(struct sk_buff *skb); 40 void (*netlink_rcv)(struct sk_buff *skb);
41 void (*netlink_bind)(int group); 41 int (*netlink_bind)(int group);
42 void (*netlink_unbind)(int group);
42 struct module *module; 43 struct module *module;
43#ifdef CONFIG_NETLINK_MMAP 44#ifdef CONFIG_NETLINK_MMAP
44 struct mutex pg_vec_lock; 45 struct mutex pg_vec_lock;
@@ -74,7 +75,8 @@ struct netlink_table {
74 unsigned int groups; 75 unsigned int groups;
75 struct mutex *cb_mutex; 76 struct mutex *cb_mutex;
76 struct module *module; 77 struct module *module;
77 void (*bind)(int group); 78 int (*bind)(int group);
79 void (*unbind)(int group);
78 bool (*compare)(struct net *net, struct sock *sock); 80 bool (*compare)(struct net *net, struct sock *sock);
79 int registered; 81 int registered;
80}; 82};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index a3ba3ca0ff92..76393f2f4b22 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -317,7 +317,7 @@ static void genl_unregister_mc_groups(struct genl_family *family)
317 } 317 }
318} 318}
319 319
320static int genl_validate_ops(struct genl_family *family) 320static int genl_validate_ops(const struct genl_family *family)
321{ 321{
322 const struct genl_ops *ops = family->ops; 322 const struct genl_ops *ops = family->ops;
323 unsigned int n_ops = family->n_ops; 323 unsigned int n_ops = family->n_ops;
@@ -337,10 +337,6 @@ static int genl_validate_ops(struct genl_family *family)
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
340 /* family is not registered yet, so no locking needed */
341 family->ops = ops;
342 family->n_ops = n_ops;
343
344 return 0; 340 return 0;
345} 341}
346 342
diff --git a/net/nfc/digital.h b/net/nfc/digital.h
index 3759add68b1b..71ad7eefddd4 100644
--- a/net/nfc/digital.h
+++ b/net/nfc/digital.h
@@ -71,6 +71,7 @@ static inline int digital_in_send_cmd(struct nfc_digital_dev *ddev,
71void digital_poll_next_tech(struct nfc_digital_dev *ddev); 71void digital_poll_next_tech(struct nfc_digital_dev *ddev);
72 72
73int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech); 73int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
74int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech);
74int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech); 75int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
75int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech); 76int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech);
76 77
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index e01e15dbf1ab..a6ce3c627e4e 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -22,6 +22,8 @@
22#define DIGITAL_PROTO_NFCA_RF_TECH \ 22#define DIGITAL_PROTO_NFCA_RF_TECH \
23 (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK) 23 (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK)
24 24
25#define DIGITAL_PROTO_NFCB_RF_TECH NFC_PROTO_ISO14443_B_MASK
26
25#define DIGITAL_PROTO_NFCF_RF_TECH \ 27#define DIGITAL_PROTO_NFCF_RF_TECH \
26 (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK) 28 (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
27 29
@@ -345,6 +347,12 @@ int digital_target_found(struct nfc_digital_dev *ddev,
345 add_crc = digital_skb_add_crc_a; 347 add_crc = digital_skb_add_crc_a;
346 break; 348 break;
347 349
350 case NFC_PROTO_ISO14443_B:
351 framing = NFC_DIGITAL_FRAMING_NFCB_T4T;
352 check_crc = digital_skb_check_crc_b;
353 add_crc = digital_skb_add_crc_b;
354 break;
355
348 default: 356 default:
349 pr_err("Invalid protocol %d\n", protocol); 357 pr_err("Invalid protocol %d\n", protocol);
350 return -EINVAL; 358 return -EINVAL;
@@ -378,6 +386,8 @@ int digital_target_found(struct nfc_digital_dev *ddev,
378 386
379void digital_poll_next_tech(struct nfc_digital_dev *ddev) 387void digital_poll_next_tech(struct nfc_digital_dev *ddev)
380{ 388{
389 u8 rand_mod;
390
381 digital_switch_rf(ddev, 0); 391 digital_switch_rf(ddev, 0);
382 392
383 mutex_lock(&ddev->poll_lock); 393 mutex_lock(&ddev->poll_lock);
@@ -387,8 +397,8 @@ void digital_poll_next_tech(struct nfc_digital_dev *ddev)
387 return; 397 return;
388 } 398 }
389 399
390 ddev->poll_tech_index = (ddev->poll_tech_index + 1) % 400 get_random_bytes(&rand_mod, sizeof(rand_mod));
391 ddev->poll_tech_count; 401 ddev->poll_tech_index = rand_mod % ddev->poll_tech_count;
392 402
393 mutex_unlock(&ddev->poll_lock); 403 mutex_unlock(&ddev->poll_lock);
394 404
@@ -475,6 +485,10 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
475 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, 485 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
476 digital_in_send_sens_req); 486 digital_in_send_sens_req);
477 487
488 if (matching_im_protocols & DIGITAL_PROTO_NFCB_RF_TECH)
489 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106B,
490 digital_in_send_sensb_req);
491
478 if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) { 492 if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
479 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F, 493 digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
480 digital_in_send_sensf_req); 494 digital_in_send_sensf_req);
@@ -635,7 +649,8 @@ static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
635 goto done; 649 goto done;
636 } 650 }
637 651
638 if (ddev->curr_protocol == NFC_PROTO_ISO14443) { 652 if ((ddev->curr_protocol == NFC_PROTO_ISO14443) ||
653 (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) {
639 rc = digital_in_iso_dep_pull_sod(ddev, resp); 654 rc = digital_in_iso_dep_pull_sod(ddev, resp);
640 if (rc) 655 if (rc)
641 goto done; 656 goto done;
@@ -676,7 +691,8 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
676 goto exit; 691 goto exit;
677 } 692 }
678 693
679 if (ddev->curr_protocol == NFC_PROTO_ISO14443) { 694 if ((ddev->curr_protocol == NFC_PROTO_ISO14443) ||
695 (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) {
680 rc = digital_in_iso_dep_push_sod(ddev, skb); 696 rc = digital_in_iso_dep_push_sod(ddev, skb);
681 if (rc) 697 if (rc)
682 goto exit; 698 goto exit;
@@ -747,6 +763,8 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
747 ddev->protocols |= NFC_PROTO_ISO15693_MASK; 763 ddev->protocols |= NFC_PROTO_ISO15693_MASK;
748 if (supported_protocols & NFC_PROTO_ISO14443_MASK) 764 if (supported_protocols & NFC_PROTO_ISO14443_MASK)
749 ddev->protocols |= NFC_PROTO_ISO14443_MASK; 765 ddev->protocols |= NFC_PROTO_ISO14443_MASK;
766 if (supported_protocols & NFC_PROTO_ISO14443_B_MASK)
767 ddev->protocols |= NFC_PROTO_ISO14443_B_MASK;
750 768
751 ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN; 769 ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
752 ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN; 770 ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index d4ed25ff723f..171cb9949ab5 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -224,9 +224,8 @@ int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
224 224
225 ddev->skb_add_crc(skb); 225 ddev->skb_add_crc(skb);
226 226
227 digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, target); 227 return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res,
228 228 target);
229 return 0;
230} 229}
231 230
232static int digital_in_send_rtox(struct nfc_digital_dev *ddev, 231static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
index 278c3fed27e0..c2c1c0189b7c 100644
--- a/net/nfc/digital_technology.c
+++ b/net/nfc/digital_technology.c
@@ -41,6 +41,24 @@
41#define DIGITAL_MIFARE_READ_RES_LEN 16 41#define DIGITAL_MIFARE_READ_RES_LEN 16
42#define DIGITAL_MIFARE_ACK_RES 0x0A 42#define DIGITAL_MIFARE_ACK_RES 0x0A
43 43
44#define DIGITAL_CMD_SENSB_REQ 0x05
45#define DIGITAL_SENSB_ADVANCED BIT(5)
46#define DIGITAL_SENSB_EXTENDED BIT(4)
47#define DIGITAL_SENSB_ALLB_REQ BIT(3)
48#define DIGITAL_SENSB_N(n) ((n) & 0x7)
49
50#define DIGITAL_CMD_SENSB_RES 0x50
51
52#define DIGITAL_CMD_ATTRIB_REQ 0x1D
53#define DIGITAL_ATTRIB_P1_TR0_DEFAULT (0x0 << 6)
54#define DIGITAL_ATTRIB_P1_TR1_DEFAULT (0x0 << 4)
55#define DIGITAL_ATTRIB_P1_SUPRESS_EOS BIT(3)
56#define DIGITAL_ATTRIB_P1_SUPRESS_SOS BIT(2)
57#define DIGITAL_ATTRIB_P2_LISTEN_POLL_1 (0x0 << 6)
58#define DIGITAL_ATTRIB_P2_POLL_LISTEN_1 (0x0 << 4)
59#define DIGITAL_ATTRIB_P2_MAX_FRAME_256 0x8
60#define DIGITAL_ATTRIB_P4_DID(n) ((n) & 0xf)
61
44#define DIGITAL_CMD_SENSF_REQ 0x00 62#define DIGITAL_CMD_SENSF_REQ 0x00
45#define DIGITAL_CMD_SENSF_RES 0x01 63#define DIGITAL_CMD_SENSF_RES 0x01
46 64
@@ -75,6 +93,7 @@ static const u8 digital_ats_fsc[] = {
75}; 93};
76 94
77#define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F) 95#define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F)
96#define DIGITAL_SENSB_FSCI(pi2) (((pi2) & 0xF0) >> 4)
78#define DIGITAL_ATS_MAX_FSC 256 97#define DIGITAL_ATS_MAX_FSC 256
79 98
80#define DIGITAL_RATS_BYTE1 0xE0 99#define DIGITAL_RATS_BYTE1 0xE0
@@ -92,6 +111,32 @@ struct digital_sel_req {
92 u8 bcc; 111 u8 bcc;
93} __packed; 112} __packed;
94 113
114struct digital_sensb_req {
115 u8 cmd;
116 u8 afi;
117 u8 param;
118} __packed;
119
120struct digital_sensb_res {
121 u8 cmd;
122 u8 nfcid0[4];
123 u8 app_data[4];
124 u8 proto_info[3];
125} __packed;
126
127struct digital_attrib_req {
128 u8 cmd;
129 u8 nfcid0[4];
130 u8 param1;
131 u8 param2;
132 u8 param3;
133 u8 param4;
134} __packed;
135
136struct digital_attrib_res {
137 u8 mbli_did;
138} __packed;
139
95struct digital_sensf_req { 140struct digital_sensf_req {
96 u8 cmd; 141 u8 cmd;
97 u8 sc1; 142 u8 sc1;
@@ -531,6 +576,175 @@ int digital_in_recv_mifare_res(struct sk_buff *resp)
531 return -EIO; 576 return -EIO;
532} 577}
533 578
579static void digital_in_recv_attrib_res(struct nfc_digital_dev *ddev, void *arg,
580 struct sk_buff *resp)
581{
582 struct nfc_target *target = arg;
583 struct digital_attrib_res *attrib_res;
584 int rc;
585
586 if (IS_ERR(resp)) {
587 rc = PTR_ERR(resp);
588 resp = NULL;
589 goto exit;
590 }
591
592 if (resp->len < sizeof(*attrib_res)) {
593 PROTOCOL_ERR("12.6.2");
594 rc = -EIO;
595 goto exit;
596 }
597
598 attrib_res = (struct digital_attrib_res *)resp->data;
599
600 if (attrib_res->mbli_did & 0x0f) {
601 PROTOCOL_ERR("12.6.2.1");
602 rc = -EIO;
603 goto exit;
604 }
605
606 rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443_B);
607
608exit:
609 dev_kfree_skb(resp);
610 kfree(target);
611
612 if (rc)
613 digital_poll_next_tech(ddev);
614}
615
616static int digital_in_send_attrib_req(struct nfc_digital_dev *ddev,
617 struct nfc_target *target,
618 struct digital_sensb_res *sensb_res)
619{
620 struct digital_attrib_req *attrib_req;
621 struct sk_buff *skb;
622 int rc;
623
624 skb = digital_skb_alloc(ddev, sizeof(*attrib_req));
625 if (!skb)
626 return -ENOMEM;
627
628 attrib_req = (struct digital_attrib_req *)skb_put(skb,
629 sizeof(*attrib_req));
630
631 attrib_req->cmd = DIGITAL_CMD_ATTRIB_REQ;
632 memcpy(attrib_req->nfcid0, sensb_res->nfcid0,
633 sizeof(attrib_req->nfcid0));
634 attrib_req->param1 = DIGITAL_ATTRIB_P1_TR0_DEFAULT |
635 DIGITAL_ATTRIB_P1_TR1_DEFAULT;
636 attrib_req->param2 = DIGITAL_ATTRIB_P2_LISTEN_POLL_1 |
637 DIGITAL_ATTRIB_P2_POLL_LISTEN_1 |
638 DIGITAL_ATTRIB_P2_MAX_FRAME_256;
639 attrib_req->param3 = sensb_res->proto_info[1] & 0x07;
640 attrib_req->param4 = DIGITAL_ATTRIB_P4_DID(0);
641
642 rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_attrib_res,
643 target);
644 if (rc)
645 kfree_skb(skb);
646
647 return rc;
648}
649
650static void digital_in_recv_sensb_res(struct nfc_digital_dev *ddev, void *arg,
651 struct sk_buff *resp)
652{
653 struct nfc_target *target = NULL;
654 struct digital_sensb_res *sensb_res;
655 u8 fsci;
656 int rc;
657
658 if (IS_ERR(resp)) {
659 rc = PTR_ERR(resp);
660 resp = NULL;
661 goto exit;
662 }
663
664 if (resp->len != sizeof(*sensb_res)) {
665 PROTOCOL_ERR("5.6.2.1");
666 rc = -EIO;
667 goto exit;
668 }
669
670 sensb_res = (struct digital_sensb_res *)resp->data;
671
672 if (sensb_res->cmd != DIGITAL_CMD_SENSB_RES) {
673 PROTOCOL_ERR("5.6.2");
674 rc = -EIO;
675 goto exit;
676 }
677
678 if (!(sensb_res->proto_info[1] & BIT(0))) {
679 PROTOCOL_ERR("5.6.2.12");
680 rc = -EIO;
681 goto exit;
682 }
683
684 if (sensb_res->proto_info[1] & BIT(3)) {
685 PROTOCOL_ERR("5.6.2.16");
686 rc = -EIO;
687 goto exit;
688 }
689
690 fsci = DIGITAL_SENSB_FSCI(sensb_res->proto_info[1]);
691 if (fsci >= 8)
692 ddev->target_fsc = DIGITAL_ATS_MAX_FSC;
693 else
694 ddev->target_fsc = digital_ats_fsc[fsci];
695
696 target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
697 if (!target) {
698 rc = -ENOMEM;
699 goto exit;
700 }
701
702 rc = digital_in_send_attrib_req(ddev, target, sensb_res);
703
704exit:
705 dev_kfree_skb(resp);
706
707 if (rc) {
708 kfree(target);
709 digital_poll_next_tech(ddev);
710 }
711}
712
713int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech)
714{
715 struct digital_sensb_req *sensb_req;
716 struct sk_buff *skb;
717 int rc;
718
719 rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
720 NFC_DIGITAL_RF_TECH_106B);
721 if (rc)
722 return rc;
723
724 rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
725 NFC_DIGITAL_FRAMING_NFCB);
726 if (rc)
727 return rc;
728
729 skb = digital_skb_alloc(ddev, sizeof(*sensb_req));
730 if (!skb)
731 return -ENOMEM;
732
733 sensb_req = (struct digital_sensb_req *)skb_put(skb,
734 sizeof(*sensb_req));
735
736 sensb_req->cmd = DIGITAL_CMD_SENSB_REQ;
737 sensb_req->afi = 0x00; /* All families and sub-families */
738 sensb_req->param = DIGITAL_SENSB_N(0);
739
740 rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensb_res,
741 NULL);
742 if (rc)
743 kfree_skb(skb);
744
745 return rc;
746}
747
534static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg, 748static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg,
535 struct sk_buff *resp) 749 struct sk_buff *resp)
536{ 750{
@@ -877,6 +1091,18 @@ exit:
877 dev_kfree_skb(resp); 1091 dev_kfree_skb(resp);
878} 1092}
879 1093
1094static void digital_tg_recv_atr_or_sensf_req(struct nfc_digital_dev *ddev,
1095 void *arg, struct sk_buff *resp)
1096{
1097 if (!IS_ERR(resp) && (resp->len >= 2) &&
1098 (resp->data[1] == DIGITAL_CMD_SENSF_REQ))
1099 digital_tg_recv_sensf_req(ddev, arg, resp);
1100 else
1101 digital_tg_recv_atr_req(ddev, arg, resp);
1102
1103 return;
1104}
1105
880static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev, 1106static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
881 struct digital_sensf_req *sensf_req) 1107 struct digital_sensf_req *sensf_req)
882{ 1108{
@@ -887,7 +1113,7 @@ static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
887 1113
888 size = sizeof(struct digital_sensf_res); 1114 size = sizeof(struct digital_sensf_res);
889 1115
890 if (sensf_req->rc != DIGITAL_SENSF_REQ_RC_NONE) 1116 if (sensf_req->rc == DIGITAL_SENSF_REQ_RC_NONE)
891 size -= sizeof(sensf_res->rd); 1117 size -= sizeof(sensf_res->rd);
892 1118
893 skb = digital_skb_alloc(ddev, size); 1119 skb = digital_skb_alloc(ddev, size);
@@ -922,7 +1148,7 @@ static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
922 digital_skb_add_crc_f(skb); 1148 digital_skb_add_crc_f(skb);
923 1149
924 rc = digital_tg_send_cmd(ddev, skb, 300, 1150 rc = digital_tg_send_cmd(ddev, skb, 300,
925 digital_tg_recv_atr_req, NULL); 1151 digital_tg_recv_atr_or_sensf_req, NULL);
926 if (rc) 1152 if (rc)
927 kfree_skb(skb); 1153 kfree_skb(skb);
928 1154
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index a9f4d2e62d8d..677d24bb70f8 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -26,6 +26,8 @@
26 26
27#include "hci.h" 27#include "hci.h"
28 28
29#define MAX_FWI 4949
30
29static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, 31static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
30 const u8 *param, size_t param_len, 32 const u8 *param, size_t param_len,
31 data_exchange_cb_t cb, void *cb_context) 33 data_exchange_cb_t cb, void *cb_context)
@@ -37,7 +39,7 @@ static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
37 * for all commands? 39 * for all commands?
38 */ 40 */
39 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd, 41 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
40 param, param_len, cb, cb_context, 3000); 42 param, param_len, cb, cb_context, MAX_FWI);
41} 43}
42 44
43/* 45/*
@@ -82,7 +84,7 @@ static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
82 NFC_HCI_HCP_COMMAND, cmd, 84 NFC_HCI_HCP_COMMAND, cmd,
83 param, param_len, 85 param, param_len,
84 nfc_hci_execute_cb, &hcp_ew, 86 nfc_hci_execute_cb, &hcp_ew,
85 3000); 87 MAX_FWI);
86 if (hcp_ew.exec_result < 0) 88 if (hcp_ew.exec_result < 0)
87 return hcp_ew.exec_result; 89 return hcp_ew.exec_result;
88 90
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index d45b638e77c7..47403705197e 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -225,7 +225,7 @@ int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
225 goto exit; 225 goto exit;
226 } 226 }
227 227
228 targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data); 228 targets->sens_res = be16_to_cpu(*(__be16 *)atqa_skb->data);
229 targets->sel_res = sak_skb->data[0]; 229 targets->sel_res = sak_skb->data[0];
230 230
231 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, 231 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
@@ -380,34 +380,31 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
380 if (r < 0) 380 if (r < 0)
381 goto disconnect_all; 381 goto disconnect_all;
382 382
383 if (skb->len && skb->len == strlen(hdev->init_data.session_id)) 383 if (skb->len && skb->len == strlen(hdev->init_data.session_id) &&
384 if (memcmp(hdev->init_data.session_id, skb->data, 384 (memcmp(hdev->init_data.session_id, skb->data,
385 skb->len) == 0) { 385 skb->len) == 0) && hdev->ops->load_session) {
386 /* TODO ELa: restore gate<->pipe table from 386 /* Restore gate<->pipe table from some proprietary location. */
387 * some TBD location.
388 * note: it doesn't seem possible to get the chip
389 * currently open gate/pipe table.
390 * It is only possible to obtain the supported
391 * gate list.
392 */
393 387
394 /* goto exit 388 r = hdev->ops->load_session(hdev);
395 * For now, always do a full initialization */
396 }
397 389
398 r = nfc_hci_disconnect_all_gates(hdev); 390 if (r < 0)
399 if (r < 0) 391 goto disconnect_all;
400 goto exit; 392 } else {
401 393
402 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count, 394 r = nfc_hci_disconnect_all_gates(hdev);
403 hdev->init_data.gates); 395 if (r < 0)
404 if (r < 0) 396 goto exit;
405 goto disconnect_all;
406 397
407 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, 398 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
408 NFC_HCI_ADMIN_SESSION_IDENTITY, 399 hdev->init_data.gates);
409 hdev->init_data.session_id, 400 if (r < 0)
410 strlen(hdev->init_data.session_id)); 401 goto disconnect_all;
402
403 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
404 NFC_HCI_ADMIN_SESSION_IDENTITY,
405 hdev->init_data.session_id,
406 strlen(hdev->init_data.session_id));
407 }
411 if (r == 0) 408 if (r == 0)
412 goto exit; 409 goto exit;
413 410
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index bec6ed15f503..a3ad69a4c648 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -387,7 +387,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
387 387
388 __net_timestamp(skb); 388 __net_timestamp(skb);
389 389
390 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX); 390 nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
391 391
392 return nfc_data_exchange(dev, local->target_idx, skb, 392 return nfc_data_exchange(dev, local->target_idx, skb,
393 nfc_llcp_recv, local); 393 nfc_llcp_recv, local);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index b4671958fcf9..51e788797317 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -680,16 +680,17 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
680 continue; 680 continue;
681 681
682 if (skb_copy == NULL) { 682 if (skb_copy == NULL) {
683 skb_copy = __pskb_copy(skb, NFC_LLCP_RAW_HEADER_SIZE, 683 skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
684 GFP_ATOMIC); 684 GFP_ATOMIC, true);
685 685
686 if (skb_copy == NULL) 686 if (skb_copy == NULL)
687 continue; 687 continue;
688 688
689 data = skb_push(skb_copy, NFC_LLCP_RAW_HEADER_SIZE); 689 data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
690 690
691 data[0] = local->dev ? local->dev->idx : 0xFF; 691 data[0] = local->dev ? local->dev->idx : 0xFF;
692 data[1] = direction; 692 data[1] = direction & 0x01;
693 data[1] |= (RAW_PAYLOAD_LLCP << 1);
693 } 694 }
694 695
695 nskb = skb_clone(skb_copy, GFP_ATOMIC); 696 nskb = skb_clone(skb_copy, GFP_ATOMIC);
@@ -747,7 +748,7 @@ static void nfc_llcp_tx_work(struct work_struct *work)
747 __net_timestamp(skb); 748 __net_timestamp(skb);
748 749
749 nfc_llcp_send_to_raw_sock(local, skb, 750 nfc_llcp_send_to_raw_sock(local, skb,
750 NFC_LLCP_DIRECTION_TX); 751 NFC_DIRECTION_TX);
751 752
752 ret = nfc_data_exchange(local->dev, local->target_idx, 753 ret = nfc_data_exchange(local->dev, local->target_idx,
753 skb, nfc_llcp_recv, local); 754 skb, nfc_llcp_recv, local);
@@ -1476,7 +1477,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1476 1477
1477 __net_timestamp(skb); 1478 __net_timestamp(skb);
1478 1479
1479 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX); 1480 nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_RX);
1480 1481
1481 nfc_llcp_rx_skb(local, skb); 1482 nfc_llcp_rx_skb(local, skb);
1482 1483
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 6c34ac978501..2b400e1a8695 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -861,6 +861,10 @@ static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
861 /* Get rid of skb owner, prior to sending to the driver. */ 861 /* Get rid of skb owner, prior to sending to the driver. */
862 skb_orphan(skb); 862 skb_orphan(skb);
863 863
864 /* Send copy to sniffer */
865 nfc_send_to_raw_sock(ndev->nfc_dev, skb,
866 RAW_PAYLOAD_NCI, NFC_DIRECTION_TX);
867
864 return ndev->ops->send(ndev, skb); 868 return ndev->ops->send(ndev, skb);
865} 869}
866 870
@@ -935,6 +939,11 @@ static void nci_rx_work(struct work_struct *work)
935 struct sk_buff *skb; 939 struct sk_buff *skb;
936 940
937 while ((skb = skb_dequeue(&ndev->rx_q))) { 941 while ((skb = skb_dequeue(&ndev->rx_q))) {
942
943 /* Send copy to sniffer */
944 nfc_send_to_raw_sock(ndev->nfc_dev, skb,
945 RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
946
938 /* Process frame */ 947 /* Process frame */
939 switch (nci_mt(skb->data)) { 948 switch (nci_mt(skb->data)) {
940 case NCI_MT_RSP_PKT: 949 case NCI_MT_RSP_PKT:
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 1e905097456b..f8f6af231381 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -366,7 +366,6 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
366 struct nci_rf_intf_activated_ntf *ntf, __u8 *data) 366 struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
367{ 367{
368 struct activation_params_poll_nfc_dep *poll; 368 struct activation_params_poll_nfc_dep *poll;
369 int i;
370 369
371 switch (ntf->activation_rf_tech_and_mode) { 370 switch (ntf->activation_rf_tech_and_mode) {
372 case NCI_NFC_A_PASSIVE_POLL_MODE: 371 case NCI_NFC_A_PASSIVE_POLL_MODE:
@@ -374,10 +373,8 @@ static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
374 poll = &ntf->activation_params.poll_nfc_dep; 373 poll = &ntf->activation_params.poll_nfc_dep;
375 poll->atr_res_len = min_t(__u8, *data++, 63); 374 poll->atr_res_len = min_t(__u8, *data++, 63);
376 pr_debug("atr_res_len %d\n", poll->atr_res_len); 375 pr_debug("atr_res_len %d\n", poll->atr_res_len);
377 if (poll->atr_res_len > 0) { 376 if (poll->atr_res_len > 0)
378 for (i = 0; i < poll->atr_res_len; i++) 377 memcpy(poll->atr_res, data, poll->atr_res_len);
379 poll->atr_res[poll->atr_res_len-1-i] = data[i];
380 }
381 break; 378 break;
382 379
383 default: 380 default:
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 9d6e74f7e6b3..88d60064890e 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -40,6 +40,12 @@ struct nfc_rawsock {
40 struct work_struct tx_work; 40 struct work_struct tx_work;
41 bool tx_work_scheduled; 41 bool tx_work_scheduled;
42}; 42};
43
44struct nfc_sock_list {
45 struct hlist_head head;
46 rwlock_t lock;
47};
48
43#define nfc_rawsock(sk) ((struct nfc_rawsock *) sk) 49#define nfc_rawsock(sk) ((struct nfc_rawsock *) sk)
44#define to_rawsock_sk(_tx_work) \ 50#define to_rawsock_sk(_tx_work) \
45 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) 51 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index c27a6e86cae4..11c3544ea546 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -27,6 +27,24 @@
27 27
28#include "nfc.h" 28#include "nfc.h"
29 29
30static struct nfc_sock_list raw_sk_list = {
31 .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock)
32};
33
34static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk)
35{
36 write_lock(&l->lock);
37 sk_add_node(sk, &l->head);
38 write_unlock(&l->lock);
39}
40
41static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk)
42{
43 write_lock(&l->lock);
44 sk_del_node_init(sk);
45 write_unlock(&l->lock);
46}
47
30static void rawsock_write_queue_purge(struct sock *sk) 48static void rawsock_write_queue_purge(struct sock *sk)
31{ 49{
32 pr_debug("sk=%p\n", sk); 50 pr_debug("sk=%p\n", sk);
@@ -57,6 +75,9 @@ static int rawsock_release(struct socket *sock)
57 if (!sk) 75 if (!sk)
58 return 0; 76 return 0;
59 77
78 if (sock->type == SOCK_RAW)
79 nfc_sock_unlink(&raw_sk_list, sk);
80
60 sock_orphan(sk); 81 sock_orphan(sk);
61 sock_put(sk); 82 sock_put(sk);
62 83
@@ -275,6 +296,26 @@ static const struct proto_ops rawsock_ops = {
275 .mmap = sock_no_mmap, 296 .mmap = sock_no_mmap,
276}; 297};
277 298
299static const struct proto_ops rawsock_raw_ops = {
300 .family = PF_NFC,
301 .owner = THIS_MODULE,
302 .release = rawsock_release,
303 .bind = sock_no_bind,
304 .connect = sock_no_connect,
305 .socketpair = sock_no_socketpair,
306 .accept = sock_no_accept,
307 .getname = sock_no_getname,
308 .poll = datagram_poll,
309 .ioctl = sock_no_ioctl,
310 .listen = sock_no_listen,
311 .shutdown = sock_no_shutdown,
312 .setsockopt = sock_no_setsockopt,
313 .getsockopt = sock_no_getsockopt,
314 .sendmsg = sock_no_sendmsg,
315 .recvmsg = rawsock_recvmsg,
316 .mmap = sock_no_mmap,
317};
318
278static void rawsock_destruct(struct sock *sk) 319static void rawsock_destruct(struct sock *sk)
279{ 320{
280 pr_debug("sk=%p\n", sk); 321 pr_debug("sk=%p\n", sk);
@@ -300,10 +341,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
300 341
301 pr_debug("sock=%p\n", sock); 342 pr_debug("sock=%p\n", sock);
302 343
303 if (sock->type != SOCK_SEQPACKET) 344 if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
304 return -ESOCKTNOSUPPORT; 345 return -ESOCKTNOSUPPORT;
305 346
306 sock->ops = &rawsock_ops; 347 if (sock->type == SOCK_RAW)
348 sock->ops = &rawsock_raw_ops;
349 else
350 sock->ops = &rawsock_ops;
307 351
308 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto); 352 sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
309 if (!sk) 353 if (!sk)
@@ -313,13 +357,53 @@ static int rawsock_create(struct net *net, struct socket *sock,
313 sk->sk_protocol = nfc_proto->id; 357 sk->sk_protocol = nfc_proto->id;
314 sk->sk_destruct = rawsock_destruct; 358 sk->sk_destruct = rawsock_destruct;
315 sock->state = SS_UNCONNECTED; 359 sock->state = SS_UNCONNECTED;
316 360 if (sock->type == SOCK_RAW)
317 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); 361 nfc_sock_link(&raw_sk_list, sk);
318 nfc_rawsock(sk)->tx_work_scheduled = false; 362 else {
363 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
364 nfc_rawsock(sk)->tx_work_scheduled = false;
365 }
319 366
320 return 0; 367 return 0;
321} 368}
322 369
370void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
371 u8 payload_type, u8 direction)
372{
373 struct sk_buff *skb_copy = NULL, *nskb;
374 struct sock *sk;
375 u8 *data;
376
377 read_lock(&raw_sk_list.lock);
378
379 sk_for_each(sk, &raw_sk_list.head) {
380 if (!skb_copy) {
381 skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
382 GFP_ATOMIC, true);
383 if (!skb_copy)
384 continue;
385
386 data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE);
387
388 data[0] = dev ? dev->idx : 0xFF;
389 data[1] = direction & 0x01;
390 data[1] |= (payload_type << 1);
391 }
392
393 nskb = skb_clone(skb_copy, GFP_ATOMIC);
394 if (!nskb)
395 continue;
396
397 if (sock_queue_rcv_skb(sk, nskb))
398 kfree_skb(nskb);
399 }
400
401 read_unlock(&raw_sk_list.lock);
402
403 kfree_skb(skb_copy);
404}
405EXPORT_SYMBOL(nfc_send_to_raw_sock);
406
323static struct proto rawsock_proto = { 407static struct proto rawsock_proto = {
324 .name = "NFC_RAW", 408 .name = "NFC_RAW",
325 .owner = THIS_MODULE, 409 .owner = THIS_MODULE,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2c77e7b1a913..c36856a457ca 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -134,8 +134,8 @@ static int set_eth_addr(struct sk_buff *skb,
134 134
135 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 135 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
136 136
137 memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN); 137 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
138 memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN); 138 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
139 139
140 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 140 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
141 141
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a3276e3c4feb..0d407bca81e3 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -44,11 +44,11 @@
44#include <linux/netfilter_ipv4.h> 44#include <linux/netfilter_ipv4.h>
45#include <linux/inetdevice.h> 45#include <linux/inetdevice.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/lockdep.h>
48#include <linux/openvswitch.h> 47#include <linux/openvswitch.h>
49#include <linux/rculist.h> 48#include <linux/rculist.h>
50#include <linux/dmi.h> 49#include <linux/dmi.h>
51#include <linux/workqueue.h> 50#include <linux/genetlink.h>
51#include <net/genetlink.h>
52#include <net/genetlink.h> 52#include <net/genetlink.h>
53#include <net/net_namespace.h> 53#include <net/net_namespace.h>
54#include <net/netns/generic.h> 54#include <net/netns/generic.h>
@@ -62,6 +62,31 @@
62 62
63int ovs_net_id __read_mostly; 63int ovs_net_id __read_mostly;
64 64
65static struct genl_family dp_packet_genl_family;
66static struct genl_family dp_flow_genl_family;
67static struct genl_family dp_datapath_genl_family;
68
69static struct genl_multicast_group ovs_dp_flow_multicast_group = {
70 .name = OVS_FLOW_MCGROUP
71};
72
73static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
74 .name = OVS_DATAPATH_MCGROUP
75};
76
77struct genl_multicast_group ovs_dp_vport_multicast_group = {
78 .name = OVS_VPORT_MCGROUP
79};
80
81/* Check if need to build a reply message.
82 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
83static bool ovs_must_notify(struct genl_info *info,
84 const struct genl_multicast_group *grp)
85{
86 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
87 netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
88}
89
65static void ovs_notify(struct genl_family *family, 90static void ovs_notify(struct genl_family *family,
66 struct sk_buff *skb, struct genl_info *info) 91 struct sk_buff *skb, struct genl_info *info)
67{ 92{
@@ -173,6 +198,7 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
173 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; 198 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
174} 199}
175 200
201/* Called with ovs_mutex or RCU read lock. */
176struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) 202struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
177{ 203{
178 struct vport *vport; 204 struct vport *vport;
@@ -262,16 +288,6 @@ out:
262 u64_stats_update_end(&stats->syncp); 288 u64_stats_update_end(&stats->syncp);
263} 289}
264 290
265static struct genl_family dp_packet_genl_family = {
266 .id = GENL_ID_GENERATE,
267 .hdrsize = sizeof(struct ovs_header),
268 .name = OVS_PACKET_FAMILY,
269 .version = OVS_PACKET_VERSION,
270 .maxattr = OVS_PACKET_ATTR_MAX,
271 .netnsok = true,
272 .parallel_ops = true,
273};
274
275int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, 291int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
276 const struct dp_upcall_info *upcall_info) 292 const struct dp_upcall_info *upcall_info)
277{ 293{
@@ -524,7 +540,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
524 packet->protocol = htons(ETH_P_802_2); 540 packet->protocol = htons(ETH_P_802_2);
525 541
526 /* Build an sw_flow for sending this packet. */ 542 /* Build an sw_flow for sending this packet. */
527 flow = ovs_flow_alloc(false); 543 flow = ovs_flow_alloc();
528 err = PTR_ERR(flow); 544 err = PTR_ERR(flow);
529 if (IS_ERR(flow)) 545 if (IS_ERR(flow))
530 goto err_kfree_skb; 546 goto err_kfree_skb;
@@ -590,6 +606,18 @@ static const struct genl_ops dp_packet_genl_ops[] = {
590 } 606 }
591}; 607};
592 608
609static struct genl_family dp_packet_genl_family = {
610 .id = GENL_ID_GENERATE,
611 .hdrsize = sizeof(struct ovs_header),
612 .name = OVS_PACKET_FAMILY,
613 .version = OVS_PACKET_VERSION,
614 .maxattr = OVS_PACKET_ATTR_MAX,
615 .netnsok = true,
616 .parallel_ops = true,
617 .ops = dp_packet_genl_ops,
618 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
619};
620
593static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, 621static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
594 struct ovs_dp_megaflow_stats *mega_stats) 622 struct ovs_dp_megaflow_stats *mega_stats)
595{ 623{
@@ -621,26 +649,6 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
621 } 649 }
622} 650}
623 651
624static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
625 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
626 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
627 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
628};
629
630static struct genl_family dp_flow_genl_family = {
631 .id = GENL_ID_GENERATE,
632 .hdrsize = sizeof(struct ovs_header),
633 .name = OVS_FLOW_FAMILY,
634 .version = OVS_FLOW_VERSION,
635 .maxattr = OVS_FLOW_ATTR_MAX,
636 .netnsok = true,
637 .parallel_ops = true,
638};
639
640static struct genl_multicast_group ovs_dp_flow_multicast_group = {
641 .name = OVS_FLOW_MCGROUP
642};
643
644static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) 652static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
645{ 653{
646 return NLMSG_ALIGN(sizeof(struct ovs_header)) 654 return NLMSG_ALIGN(sizeof(struct ovs_header))
@@ -652,8 +660,8 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
652 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ 660 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
653} 661}
654 662
655/* Called with ovs_mutex. */ 663/* Called with ovs_mutex or RCU read lock. */
656static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, 664static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
657 struct sk_buff *skb, u32 portid, 665 struct sk_buff *skb, u32 portid,
658 u32 seq, u32 flags, u8 cmd) 666 u32 seq, u32 flags, u8 cmd)
659{ 667{
@@ -670,7 +678,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
670 if (!ovs_header) 678 if (!ovs_header)
671 return -EMSGSIZE; 679 return -EMSGSIZE;
672 680
673 ovs_header->dp_ifindex = get_dpifindex(dp); 681 ovs_header->dp_ifindex = dp_ifindex;
674 682
675 /* Fill flow key. */ 683 /* Fill flow key. */
676 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); 684 nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
@@ -693,6 +701,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
693 nla_nest_end(skb, nla); 701 nla_nest_end(skb, nla);
694 702
695 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); 703 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
704
696 if (used && 705 if (used &&
697 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) 706 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
698 goto nla_put_failure; 707 goto nla_put_failure;
@@ -720,9 +729,9 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
720 const struct sw_flow_actions *sf_acts; 729 const struct sw_flow_actions *sf_acts;
721 730
722 sf_acts = rcu_dereference_ovsl(flow->sf_acts); 731 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
723
724 err = ovs_nla_put_actions(sf_acts->actions, 732 err = ovs_nla_put_actions(sf_acts->actions,
725 sf_acts->actions_len, skb); 733 sf_acts->actions_len, skb);
734
726 if (!err) 735 if (!err)
727 nla_nest_end(skb, start); 736 nla_nest_end(skb, start);
728 else { 737 else {
@@ -743,113 +752,128 @@ error:
743 return err; 752 return err;
744} 753}
745 754
746static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow, 755/* May not be called with RCU read lock. */
747 struct genl_info *info) 756static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
757 struct genl_info *info,
758 bool always)
748{ 759{
749 size_t len; 760 struct sk_buff *skb;
761
762 if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
763 return NULL;
750 764
751 len = ovs_flow_cmd_msg_size(ovsl_dereference(flow->sf_acts)); 765 skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
766 if (!skb)
767 return ERR_PTR(-ENOMEM);
752 768
753 return genlmsg_new_unicast(len, info, GFP_KERNEL); 769 return skb;
754} 770}
755 771
756static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, 772/* Called with ovs_mutex. */
757 struct datapath *dp, 773static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
758 struct genl_info *info, 774 int dp_ifindex,
759 u8 cmd) 775 struct genl_info *info, u8 cmd,
776 bool always)
760{ 777{
761 struct sk_buff *skb; 778 struct sk_buff *skb;
762 int retval; 779 int retval;
763 780
764 skb = ovs_flow_cmd_alloc_info(flow, info); 781 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
765 if (!skb) 782 always);
766 return ERR_PTR(-ENOMEM); 783 if (!skb || IS_ERR(skb))
784 return skb;
767 785
768 retval = ovs_flow_cmd_fill_info(flow, dp, skb, info->snd_portid, 786 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
769 info->snd_seq, 0, cmd); 787 info->snd_portid, info->snd_seq, 0,
788 cmd);
770 BUG_ON(retval < 0); 789 BUG_ON(retval < 0);
771 return skb; 790 return skb;
772} 791}
773 792
774static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) 793static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
775{ 794{
776 struct nlattr **a = info->attrs; 795 struct nlattr **a = info->attrs;
777 struct ovs_header *ovs_header = info->userhdr; 796 struct ovs_header *ovs_header = info->userhdr;
778 struct sw_flow_key key, masked_key; 797 struct sw_flow *flow, *new_flow;
779 struct sw_flow *flow = NULL;
780 struct sw_flow_mask mask; 798 struct sw_flow_mask mask;
781 struct sk_buff *reply; 799 struct sk_buff *reply;
782 struct datapath *dp; 800 struct datapath *dp;
783 struct sw_flow_actions *acts = NULL; 801 struct sw_flow_actions *acts;
784 struct sw_flow_match match; 802 struct sw_flow_match match;
785 bool exact_5tuple;
786 int error; 803 int error;
787 804
788 /* Extract key. */ 805 /* Must have key and actions. */
789 error = -EINVAL; 806 error = -EINVAL;
790 if (!a[OVS_FLOW_ATTR_KEY]) 807 if (!a[OVS_FLOW_ATTR_KEY])
791 goto error; 808 goto error;
809 if (!a[OVS_FLOW_ATTR_ACTIONS])
810 goto error;
792 811
793 ovs_match_init(&match, &key, &mask); 812 /* Most of the time we need to allocate a new flow, do it before
794 error = ovs_nla_get_match(&match, &exact_5tuple, 813 * locking.
814 */
815 new_flow = ovs_flow_alloc();
816 if (IS_ERR(new_flow)) {
817 error = PTR_ERR(new_flow);
818 goto error;
819 }
820
821 /* Extract key. */
822 ovs_match_init(&match, &new_flow->unmasked_key, &mask);
823 error = ovs_nla_get_match(&match,
795 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); 824 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
796 if (error) 825 if (error)
797 goto error; 826 goto err_kfree_flow;
827
828 ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
798 829
799 /* Validate actions. */ 830 /* Validate actions. */
800 if (a[OVS_FLOW_ATTR_ACTIONS]) { 831 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
801 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); 832 error = PTR_ERR(acts);
802 error = PTR_ERR(acts); 833 if (IS_ERR(acts))
803 if (IS_ERR(acts)) 834 goto err_kfree_flow;
804 goto error;
805 835
806 ovs_flow_mask_key(&masked_key, &key, &mask); 836 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
807 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], 837 0, &acts);
808 &masked_key, 0, &acts); 838 if (error) {
809 if (error) { 839 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
810 OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); 840 goto err_kfree_acts;
811 goto err_kfree; 841 }
812 } 842
813 } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { 843 reply = ovs_flow_cmd_alloc_info(acts, info, false);
814 error = -EINVAL; 844 if (IS_ERR(reply)) {
815 goto error; 845 error = PTR_ERR(reply);
846 goto err_kfree_acts;
816 } 847 }
817 848
818 ovs_lock(); 849 ovs_lock();
819 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 850 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
820 error = -ENODEV; 851 if (unlikely(!dp)) {
821 if (!dp) 852 error = -ENODEV;
822 goto err_unlock_ovs; 853 goto err_unlock_ovs;
823 854 }
824 /* Check if this is a duplicate flow */ 855 /* Check if this is a duplicate flow */
825 flow = ovs_flow_tbl_lookup(&dp->table, &key); 856 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
826 if (!flow) { 857 if (likely(!flow)) {
827 /* Bail out if we're not allowed to create a new flow. */ 858 rcu_assign_pointer(new_flow->sf_acts, acts);
828 error = -ENOENT;
829 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
830 goto err_unlock_ovs;
831
832 /* Allocate flow. */
833 flow = ovs_flow_alloc(!exact_5tuple);
834 if (IS_ERR(flow)) {
835 error = PTR_ERR(flow);
836 goto err_unlock_ovs;
837 }
838
839 flow->key = masked_key;
840 flow->unmasked_key = key;
841 rcu_assign_pointer(flow->sf_acts, acts);
842 859
843 /* Put flow in bucket. */ 860 /* Put flow in bucket. */
844 error = ovs_flow_tbl_insert(&dp->table, flow, &mask); 861 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
845 if (error) { 862 if (unlikely(error)) {
846 acts = NULL; 863 acts = NULL;
847 goto err_flow_free; 864 goto err_unlock_ovs;
848 } 865 }
849 866
850 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); 867 if (unlikely(reply)) {
868 error = ovs_flow_cmd_fill_info(new_flow,
869 ovs_header->dp_ifindex,
870 reply, info->snd_portid,
871 info->snd_seq, 0,
872 OVS_FLOW_CMD_NEW);
873 BUG_ON(error < 0);
874 }
875 ovs_unlock();
851 } else { 876 } else {
852 /* We found a matching flow. */
853 struct sw_flow_actions *old_acts; 877 struct sw_flow_actions *old_acts;
854 878
855 /* Bail out if we're not allowed to modify an existing flow. 879 /* Bail out if we're not allowed to modify an existing flow.
@@ -858,40 +882,154 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
858 * request. We also accept NLM_F_EXCL in case that bug ever 882 * request. We also accept NLM_F_EXCL in case that bug ever
859 * gets fixed. 883 * gets fixed.
860 */ 884 */
861 error = -EEXIST; 885 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
862 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && 886 | NLM_F_EXCL))) {
863 info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) 887 error = -EEXIST;
864 goto err_unlock_ovs; 888 goto err_unlock_ovs;
865 889 }
866 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
867 if (!ovs_flow_cmp_unmasked_key(flow, &match)) 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST;
868 goto err_unlock_ovs; 893 goto err_unlock_ovs;
869 894 }
870 /* Update actions. */ 895 /* Update actions. */
871 old_acts = ovsl_dereference(flow->sf_acts); 896 old_acts = ovsl_dereference(flow->sf_acts);
872 rcu_assign_pointer(flow->sf_acts, acts); 897 rcu_assign_pointer(flow->sf_acts, acts);
898
899 if (unlikely(reply)) {
900 error = ovs_flow_cmd_fill_info(flow,
901 ovs_header->dp_ifindex,
902 reply, info->snd_portid,
903 info->snd_seq, 0,
904 OVS_FLOW_CMD_NEW);
905 BUG_ON(error < 0);
906 }
907 ovs_unlock();
908
873 ovs_nla_free_flow_actions(old_acts); 909 ovs_nla_free_flow_actions(old_acts);
910 ovs_flow_free(new_flow, false);
911 }
912
913 if (reply)
914 ovs_notify(&dp_flow_genl_family, reply, info);
915 return 0;
916
917err_unlock_ovs:
918 ovs_unlock();
919 kfree_skb(reply);
920err_kfree_acts:
921 kfree(acts);
922err_kfree_flow:
923 ovs_flow_free(new_flow, false);
924error:
925 return error;
926}
927
928static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
929{
930 struct nlattr **a = info->attrs;
931 struct ovs_header *ovs_header = info->userhdr;
932 struct sw_flow_key key, masked_key;
933 struct sw_flow *flow;
934 struct sw_flow_mask mask;
935 struct sk_buff *reply = NULL;
936 struct datapath *dp;
937 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
938 struct sw_flow_match match;
939 int error;
940
941 /* Extract key. */
942 error = -EINVAL;
943 if (!a[OVS_FLOW_ATTR_KEY])
944 goto error;
945
946 ovs_match_init(&match, &key, &mask);
947 error = ovs_nla_get_match(&match,
948 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
949 if (error)
950 goto error;
951
952 /* Validate actions. */
953 if (a[OVS_FLOW_ATTR_ACTIONS]) {
954 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
955 error = PTR_ERR(acts);
956 if (IS_ERR(acts))
957 goto error;
958
959 ovs_flow_mask_key(&masked_key, &key, &mask);
960 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
961 &masked_key, 0, &acts);
962 if (error) {
963 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
964 goto err_kfree_acts;
965 }
966 }
967
968 /* Can allocate before locking if have acts. */
969 if (acts) {
970 reply = ovs_flow_cmd_alloc_info(acts, info, false);
971 if (IS_ERR(reply)) {
972 error = PTR_ERR(reply);
973 goto err_kfree_acts;
974 }
975 }
874 976
875 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); 977 ovs_lock();
978 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
979 if (unlikely(!dp)) {
980 error = -ENODEV;
981 goto err_unlock_ovs;
982 }
983 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key);
985 if (unlikely(!flow)) {
986 error = -ENOENT;
987 goto err_unlock_ovs;
988 }
989 /* The unmasked key has to be the same for flow updates. */
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */
995 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts);
997 rcu_assign_pointer(flow->sf_acts, acts);
876 998
877 /* Clear stats. */ 999 if (unlikely(reply)) {
878 if (a[OVS_FLOW_ATTR_CLEAR]) 1000 error = ovs_flow_cmd_fill_info(flow,
879 ovs_flow_stats_clear(flow); 1001 ovs_header->dp_ifindex,
1002 reply, info->snd_portid,
1003 info->snd_seq, 0,
1004 OVS_FLOW_CMD_NEW);
1005 BUG_ON(error < 0);
1006 }
1007 } else {
1008 /* Could not alloc without acts before locking. */
1009 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1010 info, OVS_FLOW_CMD_NEW, false);
1011 if (unlikely(IS_ERR(reply))) {
1012 error = PTR_ERR(reply);
1013 goto err_unlock_ovs;
1014 }
880 } 1015 }
1016
1017 /* Clear stats. */
1018 if (a[OVS_FLOW_ATTR_CLEAR])
1019 ovs_flow_stats_clear(flow);
881 ovs_unlock(); 1020 ovs_unlock();
882 1021
883 if (!IS_ERR(reply)) 1022 if (reply)
884 ovs_notify(&dp_flow_genl_family, reply, info); 1023 ovs_notify(&dp_flow_genl_family, reply, info);
885 else 1024 if (old_acts)
886 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0, 1025 ovs_nla_free_flow_actions(old_acts);
887 0, PTR_ERR(reply)); 1026
888 return 0; 1027 return 0;
889 1028
890err_flow_free:
891 ovs_flow_free(flow, false);
892err_unlock_ovs: 1029err_unlock_ovs:
893 ovs_unlock(); 1030 ovs_unlock();
894err_kfree: 1031 kfree_skb(reply);
1032err_kfree_acts:
895 kfree(acts); 1033 kfree(acts);
896error: 1034error:
897 return error; 1035 return error;
@@ -914,7 +1052,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
914 } 1052 }
915 1053
916 ovs_match_init(&match, &key, NULL); 1054 ovs_match_init(&match, &key, NULL);
917 err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL); 1055 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
918 if (err) 1056 if (err)
919 return err; 1057 return err;
920 1058
@@ -931,7 +1069,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
931 goto unlock; 1069 goto unlock;
932 } 1070 }
933 1071
934 reply = ovs_flow_cmd_build_info(flow, dp, info, OVS_FLOW_CMD_NEW); 1072 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1073 OVS_FLOW_CMD_NEW, true);
935 if (IS_ERR(reply)) { 1074 if (IS_ERR(reply)) {
936 err = PTR_ERR(reply); 1075 err = PTR_ERR(reply);
937 goto unlock; 1076 goto unlock;
@@ -955,45 +1094,53 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
955 struct sw_flow_match match; 1094 struct sw_flow_match match;
956 int err; 1095 int err;
957 1096
1097 if (likely(a[OVS_FLOW_ATTR_KEY])) {
1098 ovs_match_init(&match, &key, NULL);
1099 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1100 if (unlikely(err))
1101 return err;
1102 }
1103
958 ovs_lock(); 1104 ovs_lock();
959 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1105 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
960 if (!dp) { 1106 if (unlikely(!dp)) {
961 err = -ENODEV; 1107 err = -ENODEV;
962 goto unlock; 1108 goto unlock;
963 } 1109 }
964 1110
965 if (!a[OVS_FLOW_ATTR_KEY]) { 1111 if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
966 err = ovs_flow_tbl_flush(&dp->table); 1112 err = ovs_flow_tbl_flush(&dp->table);
967 goto unlock; 1113 goto unlock;
968 } 1114 }
969 1115
970 ovs_match_init(&match, &key, NULL);
971 err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
972 if (err)
973 goto unlock;
974
975 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1116 flow = ovs_flow_tbl_lookup(&dp->table, &key);
976 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
977 err = -ENOENT; 1118 err = -ENOENT;
978 goto unlock; 1119 goto unlock;
979 } 1120 }
980 1121
981 reply = ovs_flow_cmd_alloc_info(flow, info);
982 if (!reply) {
983 err = -ENOMEM;
984 goto unlock;
985 }
986
987 ovs_flow_tbl_remove(&dp->table, flow); 1122 ovs_flow_tbl_remove(&dp->table, flow);
1123 ovs_unlock();
988 1124
989 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, 1125 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
990 info->snd_seq, 0, OVS_FLOW_CMD_DEL); 1126 info, false);
991 BUG_ON(err < 0); 1127 if (likely(reply)) {
1128 if (likely(!IS_ERR(reply))) {
1129 rcu_read_lock(); /*To keep RCU checker happy. */
1130 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1131 reply, info->snd_portid,
1132 info->snd_seq, 0,
1133 OVS_FLOW_CMD_DEL);
1134 rcu_read_unlock();
1135 BUG_ON(err < 0);
1136
1137 ovs_notify(&dp_flow_genl_family, reply, info);
1138 } else {
1139 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1140 }
1141 }
992 1142
993 ovs_flow_free(flow, true); 1143 ovs_flow_free(flow, true);
994 ovs_unlock();
995
996 ovs_notify(&dp_flow_genl_family, reply, info);
997 return 0; 1144 return 0;
998unlock: 1145unlock:
999 ovs_unlock(); 1146 ovs_unlock();
@@ -1024,7 +1171,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1024 if (!flow) 1171 if (!flow)
1025 break; 1172 break;
1026 1173
1027 if (ovs_flow_cmd_fill_info(flow, dp, skb, 1174 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1028 NETLINK_CB(cb->skb).portid, 1175 NETLINK_CB(cb->skb).portid,
1029 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1176 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1030 OVS_FLOW_CMD_NEW) < 0) 1177 OVS_FLOW_CMD_NEW) < 0)
@@ -1037,11 +1184,17 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1037 return skb->len; 1184 return skb->len;
1038} 1185}
1039 1186
1040static const struct genl_ops dp_flow_genl_ops[] = { 1187static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1188 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1189 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1190 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1191};
1192
1193static struct genl_ops dp_flow_genl_ops[] = {
1041 { .cmd = OVS_FLOW_CMD_NEW, 1194 { .cmd = OVS_FLOW_CMD_NEW,
1042 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1195 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1043 .policy = flow_policy, 1196 .policy = flow_policy,
1044 .doit = ovs_flow_cmd_new_or_set 1197 .doit = ovs_flow_cmd_new
1045 }, 1198 },
1046 { .cmd = OVS_FLOW_CMD_DEL, 1199 { .cmd = OVS_FLOW_CMD_DEL,
1047 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1200 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -1057,28 +1210,22 @@ static const struct genl_ops dp_flow_genl_ops[] = {
1057 { .cmd = OVS_FLOW_CMD_SET, 1210 { .cmd = OVS_FLOW_CMD_SET,
1058 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1211 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1059 .policy = flow_policy, 1212 .policy = flow_policy,
1060 .doit = ovs_flow_cmd_new_or_set, 1213 .doit = ovs_flow_cmd_set,
1061 }, 1214 },
1062}; 1215};
1063 1216
1064static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { 1217static struct genl_family dp_flow_genl_family = {
1065 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1066 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1067 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1068};
1069
1070static struct genl_family dp_datapath_genl_family = {
1071 .id = GENL_ID_GENERATE, 1218 .id = GENL_ID_GENERATE,
1072 .hdrsize = sizeof(struct ovs_header), 1219 .hdrsize = sizeof(struct ovs_header),
1073 .name = OVS_DATAPATH_FAMILY, 1220 .name = OVS_FLOW_FAMILY,
1074 .version = OVS_DATAPATH_VERSION, 1221 .version = OVS_FLOW_VERSION,
1075 .maxattr = OVS_DP_ATTR_MAX, 1222 .maxattr = OVS_FLOW_ATTR_MAX,
1076 .netnsok = true, 1223 .netnsok = true,
1077 .parallel_ops = true, 1224 .parallel_ops = true,
1078}; 1225 .ops = dp_flow_genl_ops,
1079 1226 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1080static struct genl_multicast_group ovs_dp_datapath_multicast_group = { 1227 .mcgrps = &ovs_dp_flow_multicast_group,
1081 .name = OVS_DATAPATH_MCGROUP 1228 .n_mcgrps = 1,
1082}; 1229};
1083 1230
1084static size_t ovs_dp_cmd_msg_size(void) 1231static size_t ovs_dp_cmd_msg_size(void)
@@ -1093,6 +1240,7 @@ static size_t ovs_dp_cmd_msg_size(void)
1093 return msgsize; 1240 return msgsize;
1094} 1241}
1095 1242
1243/* Called with ovs_mutex or RCU read lock. */
1096static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, 1244static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1097 u32 portid, u32 seq, u32 flags, u8 cmd) 1245 u32 portid, u32 seq, u32 flags, u8 cmd)
1098{ 1246{
@@ -1108,9 +1256,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1108 1256
1109 ovs_header->dp_ifindex = get_dpifindex(dp); 1257 ovs_header->dp_ifindex = get_dpifindex(dp);
1110 1258
1111 rcu_read_lock();
1112 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); 1259 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1113 rcu_read_unlock();
1114 if (err) 1260 if (err)
1115 goto nla_put_failure; 1261 goto nla_put_failure;
1116 1262
@@ -1135,25 +1281,12 @@ error:
1135 return -EMSGSIZE; 1281 return -EMSGSIZE;
1136} 1282}
1137 1283
1138static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, 1284static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1139 struct genl_info *info, u8 cmd)
1140{ 1285{
1141 struct sk_buff *skb; 1286 return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1142 int retval;
1143
1144 skb = genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1145 if (!skb)
1146 return ERR_PTR(-ENOMEM);
1147
1148 retval = ovs_dp_cmd_fill_info(dp, skb, info->snd_portid, info->snd_seq, 0, cmd);
1149 if (retval < 0) {
1150 kfree_skb(skb);
1151 return ERR_PTR(retval);
1152 }
1153 return skb;
1154} 1287}
1155 1288
1156/* Called with ovs_mutex. */ 1289/* Called with rcu_read_lock or ovs_mutex. */
1157static struct datapath *lookup_datapath(struct net *net, 1290static struct datapath *lookup_datapath(struct net *net,
1158 struct ovs_header *ovs_header, 1291 struct ovs_header *ovs_header,
1159 struct nlattr *a[OVS_DP_ATTR_MAX + 1]) 1292 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
@@ -1165,10 +1298,8 @@ static struct datapath *lookup_datapath(struct net *net,
1165 else { 1298 else {
1166 struct vport *vport; 1299 struct vport *vport;
1167 1300
1168 rcu_read_lock();
1169 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); 1301 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1170 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; 1302 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1171 rcu_read_unlock();
1172 } 1303 }
1173 return dp ? dp : ERR_PTR(-ENODEV); 1304 return dp ? dp : ERR_PTR(-ENODEV);
1174} 1305}
@@ -1205,12 +1336,14 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1205 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) 1336 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1206 goto err; 1337 goto err;
1207 1338
1208 ovs_lock(); 1339 reply = ovs_dp_cmd_alloc_info(info);
1340 if (!reply)
1341 return -ENOMEM;
1209 1342
1210 err = -ENOMEM; 1343 err = -ENOMEM;
1211 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1344 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1212 if (dp == NULL) 1345 if (dp == NULL)
1213 goto err_unlock_ovs; 1346 goto err_free_reply;
1214 1347
1215 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); 1348 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1216 1349
@@ -1245,6 +1378,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1245 1378
1246 ovs_dp_change(dp, a); 1379 ovs_dp_change(dp, a);
1247 1380
1381 /* So far only local changes have been made, now need the lock. */
1382 ovs_lock();
1383
1248 vport = new_vport(&parms); 1384 vport = new_vport(&parms);
1249 if (IS_ERR(vport)) { 1385 if (IS_ERR(vport)) {
1250 err = PTR_ERR(vport); 1386 err = PTR_ERR(vport);
@@ -1263,10 +1399,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1263 goto err_destroy_ports_array; 1399 goto err_destroy_ports_array;
1264 } 1400 }
1265 1401
1266 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); 1402 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1267 err = PTR_ERR(reply); 1403 info->snd_seq, 0, OVS_DP_CMD_NEW);
1268 if (IS_ERR(reply)) 1404 BUG_ON(err < 0);
1269 goto err_destroy_local_port;
1270 1405
1271 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); 1406 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1272 list_add_tail_rcu(&dp->list_node, &ovs_net->dps); 1407 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
@@ -1276,9 +1411,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1276 ovs_notify(&dp_datapath_genl_family, reply, info); 1411 ovs_notify(&dp_datapath_genl_family, reply, info);
1277 return 0; 1412 return 0;
1278 1413
1279err_destroy_local_port:
1280 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1281err_destroy_ports_array: 1414err_destroy_ports_array:
1415 ovs_unlock();
1282 kfree(dp->ports); 1416 kfree(dp->ports);
1283err_destroy_percpu: 1417err_destroy_percpu:
1284 free_percpu(dp->stats_percpu); 1418 free_percpu(dp->stats_percpu);
@@ -1287,8 +1421,8 @@ err_destroy_table:
1287err_free_dp: 1421err_free_dp:
1288 release_net(ovs_dp_get_net(dp)); 1422 release_net(ovs_dp_get_net(dp));
1289 kfree(dp); 1423 kfree(dp);
1290err_unlock_ovs: 1424err_free_reply:
1291 ovs_unlock(); 1425 kfree_skb(reply);
1292err: 1426err:
1293 return err; 1427 return err;
1294} 1428}
@@ -1326,16 +1460,19 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1326 struct datapath *dp; 1460 struct datapath *dp;
1327 int err; 1461 int err;
1328 1462
1463 reply = ovs_dp_cmd_alloc_info(info);
1464 if (!reply)
1465 return -ENOMEM;
1466
1329 ovs_lock(); 1467 ovs_lock();
1330 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1468 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1331 err = PTR_ERR(dp); 1469 err = PTR_ERR(dp);
1332 if (IS_ERR(dp)) 1470 if (IS_ERR(dp))
1333 goto unlock; 1471 goto err_unlock_free;
1334 1472
1335 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_DEL); 1473 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1336 err = PTR_ERR(reply); 1474 info->snd_seq, 0, OVS_DP_CMD_DEL);
1337 if (IS_ERR(reply)) 1475 BUG_ON(err < 0);
1338 goto unlock;
1339 1476
1340 __dp_destroy(dp); 1477 __dp_destroy(dp);
1341 ovs_unlock(); 1478 ovs_unlock();
@@ -1343,8 +1480,10 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1343 ovs_notify(&dp_datapath_genl_family, reply, info); 1480 ovs_notify(&dp_datapath_genl_family, reply, info);
1344 1481
1345 return 0; 1482 return 0;
1346unlock: 1483
1484err_unlock_free:
1347 ovs_unlock(); 1485 ovs_unlock();
1486 kfree_skb(reply);
1348 return err; 1487 return err;
1349} 1488}
1350 1489
@@ -1354,29 +1493,30 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1354 struct datapath *dp; 1493 struct datapath *dp;
1355 int err; 1494 int err;
1356 1495
1496 reply = ovs_dp_cmd_alloc_info(info);
1497 if (!reply)
1498 return -ENOMEM;
1499
1357 ovs_lock(); 1500 ovs_lock();
1358 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1501 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1359 err = PTR_ERR(dp); 1502 err = PTR_ERR(dp);
1360 if (IS_ERR(dp)) 1503 if (IS_ERR(dp))
1361 goto unlock; 1504 goto err_unlock_free;
1362 1505
1363 ovs_dp_change(dp, info->attrs); 1506 ovs_dp_change(dp, info->attrs);
1364 1507
1365 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW); 1508 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1366 if (IS_ERR(reply)) { 1509 info->snd_seq, 0, OVS_DP_CMD_NEW);
1367 err = PTR_ERR(reply); 1510 BUG_ON(err < 0);
1368 genl_set_err(&dp_datapath_genl_family, sock_net(skb->sk), 0,
1369 0, err);
1370 err = 0;
1371 goto unlock;
1372 }
1373 1511
1374 ovs_unlock(); 1512 ovs_unlock();
1375 ovs_notify(&dp_datapath_genl_family, reply, info); 1513 ovs_notify(&dp_datapath_genl_family, reply, info);
1376 1514
1377 return 0; 1515 return 0;
1378unlock: 1516
1517err_unlock_free:
1379 ovs_unlock(); 1518 ovs_unlock();
1519 kfree_skb(reply);
1380 return err; 1520 return err;
1381} 1521}
1382 1522
@@ -1386,24 +1526,26 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1386 struct datapath *dp; 1526 struct datapath *dp;
1387 int err; 1527 int err;
1388 1528
1389 ovs_lock(); 1529 reply = ovs_dp_cmd_alloc_info(info);
1530 if (!reply)
1531 return -ENOMEM;
1532
1533 rcu_read_lock();
1390 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1534 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1391 if (IS_ERR(dp)) { 1535 if (IS_ERR(dp)) {
1392 err = PTR_ERR(dp); 1536 err = PTR_ERR(dp);
1393 goto unlock; 1537 goto err_unlock_free;
1394 }
1395
1396 reply = ovs_dp_cmd_build_info(dp, info, OVS_DP_CMD_NEW);
1397 if (IS_ERR(reply)) {
1398 err = PTR_ERR(reply);
1399 goto unlock;
1400 } 1538 }
1539 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1540 info->snd_seq, 0, OVS_DP_CMD_NEW);
1541 BUG_ON(err < 0);
1542 rcu_read_unlock();
1401 1543
1402 ovs_unlock();
1403 return genlmsg_reply(reply, info); 1544 return genlmsg_reply(reply, info);
1404 1545
1405unlock: 1546err_unlock_free:
1406 ovs_unlock(); 1547 rcu_read_unlock();
1548 kfree_skb(reply);
1407 return err; 1549 return err;
1408} 1550}
1409 1551
@@ -1430,7 +1572,13 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1430 return skb->len; 1572 return skb->len;
1431} 1573}
1432 1574
1433static const struct genl_ops dp_datapath_genl_ops[] = { 1575static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1576 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1577 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1578 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1579};
1580
1581static struct genl_ops dp_datapath_genl_ops[] = {
1434 { .cmd = OVS_DP_CMD_NEW, 1582 { .cmd = OVS_DP_CMD_NEW,
1435 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1583 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1436 .policy = datapath_policy, 1584 .policy = datapath_policy,
@@ -1454,27 +1602,18 @@ static const struct genl_ops dp_datapath_genl_ops[] = {
1454 }, 1602 },
1455}; 1603};
1456 1604
1457static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { 1605static struct genl_family dp_datapath_genl_family = {
1458 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1459 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1460 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1461 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1462 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1463 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1464};
1465
1466struct genl_family dp_vport_genl_family = {
1467 .id = GENL_ID_GENERATE, 1606 .id = GENL_ID_GENERATE,
1468 .hdrsize = sizeof(struct ovs_header), 1607 .hdrsize = sizeof(struct ovs_header),
1469 .name = OVS_VPORT_FAMILY, 1608 .name = OVS_DATAPATH_FAMILY,
1470 .version = OVS_VPORT_VERSION, 1609 .version = OVS_DATAPATH_VERSION,
1471 .maxattr = OVS_VPORT_ATTR_MAX, 1610 .maxattr = OVS_DP_ATTR_MAX,
1472 .netnsok = true, 1611 .netnsok = true,
1473 .parallel_ops = true, 1612 .parallel_ops = true,
1474}; 1613 .ops = dp_datapath_genl_ops,
1475 1614 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1476static struct genl_multicast_group ovs_dp_vport_multicast_group = { 1615 .mcgrps = &ovs_dp_datapath_multicast_group,
1477 .name = OVS_VPORT_MCGROUP 1616 .n_mcgrps = 1,
1478}; 1617};
1479 1618
1480/* Called with ovs_mutex or RCU read lock. */ 1619/* Called with ovs_mutex or RCU read lock. */
@@ -1516,7 +1655,12 @@ error:
1516 return err; 1655 return err;
1517} 1656}
1518 1657
1519/* Called with ovs_mutex or RCU read lock. */ 1658static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1659{
1660 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1661}
1662
1663/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1520struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, 1664struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1521 u32 seq, u8 cmd) 1665 u32 seq, u8 cmd)
1522{ 1666{
@@ -1578,33 +1722,35 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1578 u32 port_no; 1722 u32 port_no;
1579 int err; 1723 int err;
1580 1724
1581 err = -EINVAL;
1582 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || 1725 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1583 !a[OVS_VPORT_ATTR_UPCALL_PID]) 1726 !a[OVS_VPORT_ATTR_UPCALL_PID])
1584 goto exit; 1727 return -EINVAL;
1728
1729 port_no = a[OVS_VPORT_ATTR_PORT_NO]
1730 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1731 if (port_no >= DP_MAX_PORTS)
1732 return -EFBIG;
1733
1734 reply = ovs_vport_cmd_alloc_info();
1735 if (!reply)
1736 return -ENOMEM;
1585 1737
1586 ovs_lock(); 1738 ovs_lock();
1587 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1739 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1588 err = -ENODEV; 1740 err = -ENODEV;
1589 if (!dp) 1741 if (!dp)
1590 goto exit_unlock; 1742 goto exit_unlock_free;
1591
1592 if (a[OVS_VPORT_ATTR_PORT_NO]) {
1593 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1594
1595 err = -EFBIG;
1596 if (port_no >= DP_MAX_PORTS)
1597 goto exit_unlock;
1598 1743
1744 if (port_no) {
1599 vport = ovs_vport_ovsl(dp, port_no); 1745 vport = ovs_vport_ovsl(dp, port_no);
1600 err = -EBUSY; 1746 err = -EBUSY;
1601 if (vport) 1747 if (vport)
1602 goto exit_unlock; 1748 goto exit_unlock_free;
1603 } else { 1749 } else {
1604 for (port_no = 1; ; port_no++) { 1750 for (port_no = 1; ; port_no++) {
1605 if (port_no >= DP_MAX_PORTS) { 1751 if (port_no >= DP_MAX_PORTS) {
1606 err = -EFBIG; 1752 err = -EFBIG;
1607 goto exit_unlock; 1753 goto exit_unlock_free;
1608 } 1754 }
1609 vport = ovs_vport_ovsl(dp, port_no); 1755 vport = ovs_vport_ovsl(dp, port_no);
1610 if (!vport) 1756 if (!vport)
@@ -1622,22 +1768,19 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1622 vport = new_vport(&parms); 1768 vport = new_vport(&parms);
1623 err = PTR_ERR(vport); 1769 err = PTR_ERR(vport);
1624 if (IS_ERR(vport)) 1770 if (IS_ERR(vport))
1625 goto exit_unlock; 1771 goto exit_unlock_free;
1626 1772
1627 err = 0; 1773 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1628 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1774 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1629 OVS_VPORT_CMD_NEW); 1775 BUG_ON(err < 0);
1630 if (IS_ERR(reply)) { 1776 ovs_unlock();
1631 err = PTR_ERR(reply);
1632 ovs_dp_detach_port(vport);
1633 goto exit_unlock;
1634 }
1635 1777
1636 ovs_notify(&dp_vport_genl_family, reply, info); 1778 ovs_notify(&dp_vport_genl_family, reply, info);
1779 return 0;
1637 1780
1638exit_unlock: 1781exit_unlock_free:
1639 ovs_unlock(); 1782 ovs_unlock();
1640exit: 1783 kfree_skb(reply);
1641 return err; 1784 return err;
1642} 1785}
1643 1786
@@ -1648,28 +1791,26 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1648 struct vport *vport; 1791 struct vport *vport;
1649 int err; 1792 int err;
1650 1793
1794 reply = ovs_vport_cmd_alloc_info();
1795 if (!reply)
1796 return -ENOMEM;
1797
1651 ovs_lock(); 1798 ovs_lock();
1652 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 1799 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1653 err = PTR_ERR(vport); 1800 err = PTR_ERR(vport);
1654 if (IS_ERR(vport)) 1801 if (IS_ERR(vport))
1655 goto exit_unlock; 1802 goto exit_unlock_free;
1656 1803
1657 if (a[OVS_VPORT_ATTR_TYPE] && 1804 if (a[OVS_VPORT_ATTR_TYPE] &&
1658 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) { 1805 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1659 err = -EINVAL; 1806 err = -EINVAL;
1660 goto exit_unlock; 1807 goto exit_unlock_free;
1661 }
1662
1663 reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1664 if (!reply) {
1665 err = -ENOMEM;
1666 goto exit_unlock;
1667 } 1808 }
1668 1809
1669 if (a[OVS_VPORT_ATTR_OPTIONS]) { 1810 if (a[OVS_VPORT_ATTR_OPTIONS]) {
1670 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 1811 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1671 if (err) 1812 if (err)
1672 goto exit_free; 1813 goto exit_unlock_free;
1673 } 1814 }
1674 1815
1675 if (a[OVS_VPORT_ATTR_UPCALL_PID]) 1816 if (a[OVS_VPORT_ATTR_UPCALL_PID])
@@ -1683,10 +1824,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1683 ovs_notify(&dp_vport_genl_family, reply, info); 1824 ovs_notify(&dp_vport_genl_family, reply, info);
1684 return 0; 1825 return 0;
1685 1826
1686exit_free: 1827exit_unlock_free:
1687 kfree_skb(reply);
1688exit_unlock:
1689 ovs_unlock(); 1828 ovs_unlock();
1829 kfree_skb(reply);
1690 return err; 1830 return err;
1691} 1831}
1692 1832
@@ -1697,30 +1837,33 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1697 struct vport *vport; 1837 struct vport *vport;
1698 int err; 1838 int err;
1699 1839
1840 reply = ovs_vport_cmd_alloc_info();
1841 if (!reply)
1842 return -ENOMEM;
1843
1700 ovs_lock(); 1844 ovs_lock();
1701 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 1845 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1702 err = PTR_ERR(vport); 1846 err = PTR_ERR(vport);
1703 if (IS_ERR(vport)) 1847 if (IS_ERR(vport))
1704 goto exit_unlock; 1848 goto exit_unlock_free;
1705 1849
1706 if (vport->port_no == OVSP_LOCAL) { 1850 if (vport->port_no == OVSP_LOCAL) {
1707 err = -EINVAL; 1851 err = -EINVAL;
1708 goto exit_unlock; 1852 goto exit_unlock_free;
1709 } 1853 }
1710 1854
1711 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, 1855 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1712 info->snd_seq, OVS_VPORT_CMD_DEL); 1856 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1713 err = PTR_ERR(reply); 1857 BUG_ON(err < 0);
1714 if (IS_ERR(reply))
1715 goto exit_unlock;
1716
1717 err = 0;
1718 ovs_dp_detach_port(vport); 1858 ovs_dp_detach_port(vport);
1859 ovs_unlock();
1719 1860
1720 ovs_notify(&dp_vport_genl_family, reply, info); 1861 ovs_notify(&dp_vport_genl_family, reply, info);
1862 return 0;
1721 1863
1722exit_unlock: 1864exit_unlock_free:
1723 ovs_unlock(); 1865 ovs_unlock();
1866 kfree_skb(reply);
1724 return err; 1867 return err;
1725} 1868}
1726 1869
@@ -1732,24 +1875,25 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1732 struct vport *vport; 1875 struct vport *vport;
1733 int err; 1876 int err;
1734 1877
1878 reply = ovs_vport_cmd_alloc_info();
1879 if (!reply)
1880 return -ENOMEM;
1881
1735 rcu_read_lock(); 1882 rcu_read_lock();
1736 vport = lookup_vport(sock_net(skb->sk), ovs_header, a); 1883 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1737 err = PTR_ERR(vport); 1884 err = PTR_ERR(vport);
1738 if (IS_ERR(vport)) 1885 if (IS_ERR(vport))
1739 goto exit_unlock; 1886 goto exit_unlock_free;
1740 1887 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1741 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, 1888 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1742 info->snd_seq, OVS_VPORT_CMD_NEW); 1889 BUG_ON(err < 0);
1743 err = PTR_ERR(reply);
1744 if (IS_ERR(reply))
1745 goto exit_unlock;
1746
1747 rcu_read_unlock(); 1890 rcu_read_unlock();
1748 1891
1749 return genlmsg_reply(reply, info); 1892 return genlmsg_reply(reply, info);
1750 1893
1751exit_unlock: 1894exit_unlock_free:
1752 rcu_read_unlock(); 1895 rcu_read_unlock();
1896 kfree_skb(reply);
1753 return err; 1897 return err;
1754} 1898}
1755 1899
@@ -1792,7 +1936,16 @@ out:
1792 return skb->len; 1936 return skb->len;
1793} 1937}
1794 1938
1795static const struct genl_ops dp_vport_genl_ops[] = { 1939static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1940 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1941 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1942 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1943 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1944 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1945 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1946};
1947
1948static struct genl_ops dp_vport_genl_ops[] = {
1796 { .cmd = OVS_VPORT_CMD_NEW, 1949 { .cmd = OVS_VPORT_CMD_NEW,
1797 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1950 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1798 .policy = vport_policy, 1951 .policy = vport_policy,
@@ -1816,26 +1969,25 @@ static const struct genl_ops dp_vport_genl_ops[] = {
1816 }, 1969 },
1817}; 1970};
1818 1971
1819struct genl_family_and_ops { 1972struct genl_family dp_vport_genl_family = {
1820 struct genl_family *family; 1973 .id = GENL_ID_GENERATE,
1821 const struct genl_ops *ops; 1974 .hdrsize = sizeof(struct ovs_header),
1822 int n_ops; 1975 .name = OVS_VPORT_FAMILY,
1823 const struct genl_multicast_group *group; 1976 .version = OVS_VPORT_VERSION,
1977 .maxattr = OVS_VPORT_ATTR_MAX,
1978 .netnsok = true,
1979 .parallel_ops = true,
1980 .ops = dp_vport_genl_ops,
1981 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
1982 .mcgrps = &ovs_dp_vport_multicast_group,
1983 .n_mcgrps = 1,
1824}; 1984};
1825 1985
1826static const struct genl_family_and_ops dp_genl_families[] = { 1986static struct genl_family * const dp_genl_families[] = {
1827 { &dp_datapath_genl_family, 1987 &dp_datapath_genl_family,
1828 dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), 1988 &dp_vport_genl_family,
1829 &ovs_dp_datapath_multicast_group }, 1989 &dp_flow_genl_family,
1830 { &dp_vport_genl_family, 1990 &dp_packet_genl_family,
1831 dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1832 &ovs_dp_vport_multicast_group },
1833 { &dp_flow_genl_family,
1834 dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1835 &ovs_dp_flow_multicast_group },
1836 { &dp_packet_genl_family,
1837 dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1838 NULL },
1839}; 1991};
1840 1992
1841static void dp_unregister_genl(int n_families) 1993static void dp_unregister_genl(int n_families)
@@ -1843,33 +1995,25 @@ static void dp_unregister_genl(int n_families)
1843 int i; 1995 int i;
1844 1996
1845 for (i = 0; i < n_families; i++) 1997 for (i = 0; i < n_families; i++)
1846 genl_unregister_family(dp_genl_families[i].family); 1998 genl_unregister_family(dp_genl_families[i]);
1847} 1999}
1848 2000
1849static int dp_register_genl(void) 2001static int dp_register_genl(void)
1850{ 2002{
1851 int n_registered;
1852 int err; 2003 int err;
1853 int i; 2004 int i;
1854 2005
1855 n_registered = 0;
1856 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { 2006 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1857 const struct genl_family_and_ops *f = &dp_genl_families[i];
1858 2007
1859 f->family->ops = f->ops; 2008 err = genl_register_family(dp_genl_families[i]);
1860 f->family->n_ops = f->n_ops;
1861 f->family->mcgrps = f->group;
1862 f->family->n_mcgrps = f->group ? 1 : 0;
1863 err = genl_register_family(f->family);
1864 if (err) 2009 if (err)
1865 goto error; 2010 goto error;
1866 n_registered++;
1867 } 2011 }
1868 2012
1869 return 0; 2013 return 0;
1870 2014
1871error: 2015error:
1872 dp_unregister_genl(n_registered); 2016 dp_unregister_genl(i);
1873 return err; 2017 return err;
1874} 2018}
1875 2019
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 05317380fc03..7ede507500d7 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -194,7 +194,9 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
194int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); 194int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
195void ovs_dp_notify_wq(struct work_struct *work); 195void ovs_dp_notify_wq(struct work_struct *work);
196 196
197#define OVS_NLERR(fmt, ...) \ 197#define OVS_NLERR(fmt, ...) \
198 pr_info_once("netlink: " fmt, ##__VA_ARGS__) 198do { \
199 199 if (net_ratelimit()) \
200 pr_info("netlink: " fmt, ##__VA_ARGS__); \
201} while (0)
200#endif /* datapath.h */ 202#endif /* datapath.h */
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 2998989e76db..334751cb1528 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -64,88 +64,110 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
65{ 65{
66 struct flow_stats *stats; 66 struct flow_stats *stats;
67 __be16 tcp_flags = 0; 67 __be16 tcp_flags = flow->key.tp.flags;
68 68 int node = numa_node_id();
69 if (!flow->stats.is_percpu) 69
70 stats = flow->stats.stat; 70 stats = rcu_dereference(flow->stats[node]);
71 else 71
72 stats = this_cpu_ptr(flow->stats.cpu_stats); 72 /* Check if already have node-specific stats. */
73 73 if (likely(stats)) {
74 if ((flow->key.eth.type == htons(ETH_P_IP) || 74 spin_lock(&stats->lock);
75 flow->key.eth.type == htons(ETH_P_IPV6)) && 75 /* Mark if we write on the pre-allocated stats. */
76 flow->key.ip.frag != OVS_FRAG_TYPE_LATER && 76 if (node == 0 && unlikely(flow->stats_last_writer != node))
77 flow->key.ip.proto == IPPROTO_TCP && 77 flow->stats_last_writer = node;
78 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 78 } else {
79 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); 79 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
80 spin_lock(&stats->lock);
81
82 /* If the current NUMA-node is the only writer on the
83 * pre-allocated stats keep using them.
84 */
85 if (unlikely(flow->stats_last_writer != node)) {
86 /* A previous locker may have already allocated the
87 * stats, so we need to check again. If node-specific
88 * stats were already allocated, we update the pre-
89 * allocated stats as we have already locked them.
90 */
91 if (likely(flow->stats_last_writer != NUMA_NO_NODE)
92 && likely(!rcu_dereference(flow->stats[node]))) {
93 /* Try to allocate node-specific stats. */
94 struct flow_stats *new_stats;
95
96 new_stats =
97 kmem_cache_alloc_node(flow_stats_cache,
98 GFP_THISNODE |
99 __GFP_NOMEMALLOC,
100 node);
101 if (likely(new_stats)) {
102 new_stats->used = jiffies;
103 new_stats->packet_count = 1;
104 new_stats->byte_count = skb->len;
105 new_stats->tcp_flags = tcp_flags;
106 spin_lock_init(&new_stats->lock);
107
108 rcu_assign_pointer(flow->stats[node],
109 new_stats);
110 goto unlock;
111 }
112 }
113 flow->stats_last_writer = node;
114 }
80 } 115 }
81 116
82 spin_lock(&stats->lock);
83 stats->used = jiffies; 117 stats->used = jiffies;
84 stats->packet_count++; 118 stats->packet_count++;
85 stats->byte_count += skb->len; 119 stats->byte_count += skb->len;
86 stats->tcp_flags |= tcp_flags; 120 stats->tcp_flags |= tcp_flags;
121unlock:
87 spin_unlock(&stats->lock); 122 spin_unlock(&stats->lock);
88} 123}
89 124
90static void stats_read(struct flow_stats *stats, 125/* Must be called with rcu_read_lock or ovs_mutex. */
91 struct ovs_flow_stats *ovs_stats, 126void ovs_flow_stats_get(const struct sw_flow *flow,
92 unsigned long *used, __be16 *tcp_flags) 127 struct ovs_flow_stats *ovs_stats,
93{
94 spin_lock(&stats->lock);
95 if (!*used || time_after(stats->used, *used))
96 *used = stats->used;
97 *tcp_flags |= stats->tcp_flags;
98 ovs_stats->n_packets += stats->packet_count;
99 ovs_stats->n_bytes += stats->byte_count;
100 spin_unlock(&stats->lock);
101}
102
103void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
104 unsigned long *used, __be16 *tcp_flags) 128 unsigned long *used, __be16 *tcp_flags)
105{ 129{
106 int cpu; 130 int node;
107 131
108 *used = 0; 132 *used = 0;
109 *tcp_flags = 0; 133 *tcp_flags = 0;
110 memset(ovs_stats, 0, sizeof(*ovs_stats)); 134 memset(ovs_stats, 0, sizeof(*ovs_stats));
111 135
112 local_bh_disable(); 136 for_each_node(node) {
113 if (!flow->stats.is_percpu) { 137 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
114 stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
115 } else {
116 for_each_possible_cpu(cpu) {
117 struct flow_stats *stats;
118 138
119 stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); 139 if (stats) {
120 stats_read(stats, ovs_stats, used, tcp_flags); 140 /* Local CPU may write on non-local stats, so we must
141 * block bottom-halves here.
142 */
143 spin_lock_bh(&stats->lock);
144 if (!*used || time_after(stats->used, *used))
145 *used = stats->used;
146 *tcp_flags |= stats->tcp_flags;
147 ovs_stats->n_packets += stats->packet_count;
148 ovs_stats->n_bytes += stats->byte_count;
149 spin_unlock_bh(&stats->lock);
121 } 150 }
122 } 151 }
123 local_bh_enable();
124}
125
126static void stats_reset(struct flow_stats *stats)
127{
128 spin_lock(&stats->lock);
129 stats->used = 0;
130 stats->packet_count = 0;
131 stats->byte_count = 0;
132 stats->tcp_flags = 0;
133 spin_unlock(&stats->lock);
134} 152}
135 153
154/* Called with ovs_mutex. */
136void ovs_flow_stats_clear(struct sw_flow *flow) 155void ovs_flow_stats_clear(struct sw_flow *flow)
137{ 156{
138 int cpu; 157 int node;
139 158
140 local_bh_disable(); 159 for_each_node(node) {
141 if (!flow->stats.is_percpu) { 160 struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
142 stats_reset(flow->stats.stat); 161
143 } else { 162 if (stats) {
144 for_each_possible_cpu(cpu) { 163 spin_lock_bh(&stats->lock);
145 stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); 164 stats->used = 0;
165 stats->packet_count = 0;
166 stats->byte_count = 0;
167 stats->tcp_flags = 0;
168 spin_unlock_bh(&stats->lock);
146 } 169 }
147 } 170 }
148 local_bh_enable();
149} 171}
150 172
151static int check_header(struct sk_buff *skb, int len) 173static int check_header(struct sk_buff *skb, int len)
@@ -332,8 +354,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
332 /* The ICMPv6 type and code fields use the 16-bit transport port 354 /* The ICMPv6 type and code fields use the 16-bit transport port
333 * fields, so we need to store them in 16-bit network byte order. 355 * fields, so we need to store them in 16-bit network byte order.
334 */ 356 */
335 key->ipv6.tp.src = htons(icmp->icmp6_type); 357 key->tp.src = htons(icmp->icmp6_type);
336 key->ipv6.tp.dst = htons(icmp->icmp6_code); 358 key->tp.dst = htons(icmp->icmp6_code);
337 359
338 if (icmp->icmp6_code == 0 && 360 if (icmp->icmp6_code == 0 &&
339 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 361 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -372,14 +394,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
372 && opt_len == 8) { 394 && opt_len == 8) {
373 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) 395 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
374 goto invalid; 396 goto invalid;
375 memcpy(key->ipv6.nd.sll, 397 ether_addr_copy(key->ipv6.nd.sll,
376 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); 398 &nd->opt[offset+sizeof(*nd_opt)]);
377 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR 399 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
378 && opt_len == 8) { 400 && opt_len == 8) {
379 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) 401 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
380 goto invalid; 402 goto invalid;
381 memcpy(key->ipv6.nd.tll, 403 ether_addr_copy(key->ipv6.nd.tll,
382 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); 404 &nd->opt[offset+sizeof(*nd_opt)]);
383 } 405 }
384 406
385 icmp_len -= opt_len; 407 icmp_len -= opt_len;
@@ -439,8 +461,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
439 * header in the linear data area. 461 * header in the linear data area.
440 */ 462 */
441 eth = eth_hdr(skb); 463 eth = eth_hdr(skb);
442 memcpy(key->eth.src, eth->h_source, ETH_ALEN); 464 ether_addr_copy(key->eth.src, eth->h_source);
443 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); 465 ether_addr_copy(key->eth.dst, eth->h_dest);
444 466
445 __skb_pull(skb, 2 * ETH_ALEN); 467 __skb_pull(skb, 2 * ETH_ALEN);
446 /* We are going to push all headers that we pull, so no need to 468 /* We are going to push all headers that we pull, so no need to
@@ -495,21 +517,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
495 if (key->ip.proto == IPPROTO_TCP) { 517 if (key->ip.proto == IPPROTO_TCP) {
496 if (tcphdr_ok(skb)) { 518 if (tcphdr_ok(skb)) {
497 struct tcphdr *tcp = tcp_hdr(skb); 519 struct tcphdr *tcp = tcp_hdr(skb);
498 key->ipv4.tp.src = tcp->source; 520 key->tp.src = tcp->source;
499 key->ipv4.tp.dst = tcp->dest; 521 key->tp.dst = tcp->dest;
500 key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp); 522 key->tp.flags = TCP_FLAGS_BE16(tcp);
501 } 523 }
502 } else if (key->ip.proto == IPPROTO_UDP) { 524 } else if (key->ip.proto == IPPROTO_UDP) {
503 if (udphdr_ok(skb)) { 525 if (udphdr_ok(skb)) {
504 struct udphdr *udp = udp_hdr(skb); 526 struct udphdr *udp = udp_hdr(skb);
505 key->ipv4.tp.src = udp->source; 527 key->tp.src = udp->source;
506 key->ipv4.tp.dst = udp->dest; 528 key->tp.dst = udp->dest;
507 } 529 }
508 } else if (key->ip.proto == IPPROTO_SCTP) { 530 } else if (key->ip.proto == IPPROTO_SCTP) {
509 if (sctphdr_ok(skb)) { 531 if (sctphdr_ok(skb)) {
510 struct sctphdr *sctp = sctp_hdr(skb); 532 struct sctphdr *sctp = sctp_hdr(skb);
511 key->ipv4.tp.src = sctp->source; 533 key->tp.src = sctp->source;
512 key->ipv4.tp.dst = sctp->dest; 534 key->tp.dst = sctp->dest;
513 } 535 }
514 } else if (key->ip.proto == IPPROTO_ICMP) { 536 } else if (key->ip.proto == IPPROTO_ICMP) {
515 if (icmphdr_ok(skb)) { 537 if (icmphdr_ok(skb)) {
@@ -517,8 +539,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
517 /* The ICMP type and code fields use the 16-bit 539 /* The ICMP type and code fields use the 16-bit
518 * transport port fields, so we need to store 540 * transport port fields, so we need to store
519 * them in 16-bit network byte order. */ 541 * them in 16-bit network byte order. */
520 key->ipv4.tp.src = htons(icmp->type); 542 key->tp.src = htons(icmp->type);
521 key->ipv4.tp.dst = htons(icmp->code); 543 key->tp.dst = htons(icmp->code);
522 } 544 }
523 } 545 }
524 546
@@ -538,8 +560,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
538 key->ip.proto = ntohs(arp->ar_op); 560 key->ip.proto = ntohs(arp->ar_op);
539 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); 561 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
540 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); 562 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
541 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); 563 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
542 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); 564 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
543 } 565 }
544 } else if (key->eth.type == htons(ETH_P_IPV6)) { 566 } else if (key->eth.type == htons(ETH_P_IPV6)) {
545 int nh_len; /* IPv6 Header + Extensions */ 567 int nh_len; /* IPv6 Header + Extensions */
@@ -564,21 +586,21 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
564 if (key->ip.proto == NEXTHDR_TCP) { 586 if (key->ip.proto == NEXTHDR_TCP) {
565 if (tcphdr_ok(skb)) { 587 if (tcphdr_ok(skb)) {
566 struct tcphdr *tcp = tcp_hdr(skb); 588 struct tcphdr *tcp = tcp_hdr(skb);
567 key->ipv6.tp.src = tcp->source; 589 key->tp.src = tcp->source;
568 key->ipv6.tp.dst = tcp->dest; 590 key->tp.dst = tcp->dest;
569 key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp); 591 key->tp.flags = TCP_FLAGS_BE16(tcp);
570 } 592 }
571 } else if (key->ip.proto == NEXTHDR_UDP) { 593 } else if (key->ip.proto == NEXTHDR_UDP) {
572 if (udphdr_ok(skb)) { 594 if (udphdr_ok(skb)) {
573 struct udphdr *udp = udp_hdr(skb); 595 struct udphdr *udp = udp_hdr(skb);
574 key->ipv6.tp.src = udp->source; 596 key->tp.src = udp->source;
575 key->ipv6.tp.dst = udp->dest; 597 key->tp.dst = udp->dest;
576 } 598 }
577 } else if (key->ip.proto == NEXTHDR_SCTP) { 599 } else if (key->ip.proto == NEXTHDR_SCTP) {
578 if (sctphdr_ok(skb)) { 600 if (sctphdr_ok(skb)) {
579 struct sctphdr *sctp = sctp_hdr(skb); 601 struct sctphdr *sctp = sctp_hdr(skb);
580 key->ipv6.tp.src = sctp->source; 602 key->tp.src = sctp->source;
581 key->ipv6.tp.dst = sctp->dest; 603 key->tp.dst = sctp->dest;
582 } 604 }
583 } else if (key->ip.proto == NEXTHDR_ICMP) { 605 } else if (key->ip.proto == NEXTHDR_ICMP) {
584 if (icmp6hdr_ok(skb)) { 606 if (icmp6hdr_ok(skb)) {
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 2d770e28a3a3..ac395d2cd821 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -47,7 +47,7 @@ struct ovs_key_ipv4_tunnel {
47 __be16 tun_flags; 47 __be16 tun_flags;
48 u8 ipv4_tos; 48 u8 ipv4_tos;
49 u8 ipv4_ttl; 49 u8 ipv4_ttl;
50}; 50} __packed __aligned(4); /* Minimize padding. */
51 51
52static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key, 52static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
53 const struct iphdr *iph, __be64 tun_id, 53 const struct iphdr *iph, __be64 tun_id,
@@ -71,7 +71,7 @@ struct sw_flow_key {
71 u32 priority; /* Packet QoS priority. */ 71 u32 priority; /* Packet QoS priority. */
72 u32 skb_mark; /* SKB mark. */ 72 u32 skb_mark; /* SKB mark. */
73 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ 73 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
74 } phy; 74 } __packed phy; /* Safe when right after 'tun_key'. */
75 struct { 75 struct {
76 u8 src[ETH_ALEN]; /* Ethernet source address. */ 76 u8 src[ETH_ALEN]; /* Ethernet source address. */
77 u8 dst[ETH_ALEN]; /* Ethernet destination address. */ 77 u8 dst[ETH_ALEN]; /* Ethernet destination address. */
@@ -84,23 +84,21 @@ struct sw_flow_key {
84 u8 ttl; /* IP TTL/hop limit. */ 84 u8 ttl; /* IP TTL/hop limit. */
85 u8 frag; /* One of OVS_FRAG_TYPE_*. */ 85 u8 frag; /* One of OVS_FRAG_TYPE_*. */
86 } ip; 86 } ip;
87 struct {
88 __be16 src; /* TCP/UDP/SCTP source port. */
89 __be16 dst; /* TCP/UDP/SCTP destination port. */
90 __be16 flags; /* TCP flags. */
91 } tp;
87 union { 92 union {
88 struct { 93 struct {
89 struct { 94 struct {
90 __be32 src; /* IP source address. */ 95 __be32 src; /* IP source address. */
91 __be32 dst; /* IP destination address. */ 96 __be32 dst; /* IP destination address. */
92 } addr; 97 } addr;
93 union { 98 struct {
94 struct { 99 u8 sha[ETH_ALEN]; /* ARP source hardware address. */
95 __be16 src; /* TCP/UDP/SCTP source port. */ 100 u8 tha[ETH_ALEN]; /* ARP target hardware address. */
96 __be16 dst; /* TCP/UDP/SCTP destination port. */ 101 } arp;
97 __be16 flags; /* TCP flags. */
98 } tp;
99 struct {
100 u8 sha[ETH_ALEN]; /* ARP source hardware address. */
101 u8 tha[ETH_ALEN]; /* ARP target hardware address. */
102 } arp;
103 };
104 } ipv4; 102 } ipv4;
105 struct { 103 struct {
106 struct { 104 struct {
@@ -109,11 +107,6 @@ struct sw_flow_key {
109 } addr; 107 } addr;
110 __be32 label; /* IPv6 flow label. */ 108 __be32 label; /* IPv6 flow label. */
111 struct { 109 struct {
112 __be16 src; /* TCP/UDP/SCTP source port. */
113 __be16 dst; /* TCP/UDP/SCTP destination port. */
114 __be16 flags; /* TCP flags. */
115 } tp;
116 struct {
117 struct in6_addr target; /* ND target address. */ 110 struct in6_addr target; /* ND target address. */
118 u8 sll[ETH_ALEN]; /* ND source link layer address. */ 111 u8 sll[ETH_ALEN]; /* ND source link layer address. */
119 u8 tll[ETH_ALEN]; /* ND target link layer address. */ 112 u8 tll[ETH_ALEN]; /* ND target link layer address. */
@@ -155,24 +148,22 @@ struct flow_stats {
155 __be16 tcp_flags; /* Union of seen TCP flags. */ 148 __be16 tcp_flags; /* Union of seen TCP flags. */
156}; 149};
157 150
158struct sw_flow_stats {
159 bool is_percpu;
160 union {
161 struct flow_stats *stat;
162 struct flow_stats __percpu *cpu_stats;
163 };
164};
165
166struct sw_flow { 151struct sw_flow {
167 struct rcu_head rcu; 152 struct rcu_head rcu;
168 struct hlist_node hash_node[2]; 153 struct hlist_node hash_node[2];
169 u32 hash; 154 u32 hash;
170 155 int stats_last_writer; /* NUMA-node id of the last writer on
156 * 'stats[0]'.
157 */
171 struct sw_flow_key key; 158 struct sw_flow_key key;
172 struct sw_flow_key unmasked_key; 159 struct sw_flow_key unmasked_key;
173 struct sw_flow_mask *mask; 160 struct sw_flow_mask *mask;
174 struct sw_flow_actions __rcu *sf_acts; 161 struct sw_flow_actions __rcu *sf_acts;
175 struct sw_flow_stats stats; 162 struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one
163 * is allocated at flow creation time,
164 * the rest are allocated on demand
165 * while holding the 'stats[0].lock'.
166 */
176}; 167};
177 168
178struct arp_eth_header { 169struct arp_eth_header {
@@ -189,10 +180,10 @@ struct arp_eth_header {
189 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
190} __packed; 181} __packed;
191 182
192void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb); 183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
193void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *stats, 184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
194 unsigned long *used, __be16 *tcp_flags); 185 unsigned long *used, __be16 *tcp_flags);
195void ovs_flow_stats_clear(struct sw_flow *flow); 186void ovs_flow_stats_clear(struct sw_flow *);
196u64 ovs_flow_used_time(unsigned long flow_jiffies); 187u64 ovs_flow_used_time(unsigned long flow_jiffies);
197 188
198int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); 189int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 4d000acaed0d..d757848da89c 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -16,6 +16,8 @@
16 * 02110-1301, USA 16 * 02110-1301, USA
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include "flow.h" 21#include "flow.h"
20#include "datapath.h" 22#include "datapath.h"
21#include <linux/uaccess.h> 23#include <linux/uaccess.h>
@@ -202,11 +204,11 @@ static bool match_validate(const struct sw_flow_match *match,
202 if (match->mask && (match->mask->key.ip.proto == 0xff)) 204 if (match->mask && (match->mask->key.ip.proto == 0xff))
203 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; 205 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
204 206
205 if (match->key->ipv6.tp.src == 207 if (match->key->tp.src ==
206 htons(NDISC_NEIGHBOUR_SOLICITATION) || 208 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
207 match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { 209 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
208 key_expected |= 1 << OVS_KEY_ATTR_ND; 210 key_expected |= 1 << OVS_KEY_ATTR_ND;
209 if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) 211 if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
210 mask_allowed |= 1 << OVS_KEY_ATTR_ND; 212 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
211 } 213 }
212 } 214 }
@@ -216,14 +218,14 @@ static bool match_validate(const struct sw_flow_match *match,
216 if ((key_attrs & key_expected) != key_expected) { 218 if ((key_attrs & key_expected) != key_expected) {
217 /* Key attributes check failed. */ 219 /* Key attributes check failed. */
218 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", 220 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
219 key_attrs, key_expected); 221 (unsigned long long)key_attrs, (unsigned long long)key_expected);
220 return false; 222 return false;
221 } 223 }
222 224
223 if ((mask_attrs & mask_allowed) != mask_attrs) { 225 if ((mask_attrs & mask_allowed) != mask_attrs) {
224 /* Mask attributes check failed. */ 226 /* Mask attributes check failed. */
225 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", 227 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
226 mask_attrs, mask_allowed); 228 (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
227 return false; 229 return false;
228 } 230 }
229 231
@@ -266,20 +268,6 @@ static bool is_all_zero(const u8 *fp, size_t size)
266 return true; 268 return true;
267} 269}
268 270
269static bool is_all_set(const u8 *fp, size_t size)
270{
271 int i;
272
273 if (!fp)
274 return false;
275
276 for (i = 0; i < size; i++)
277 if (fp[i] != 0xff)
278 return false;
279
280 return true;
281}
282
283static int __parse_flow_nlattrs(const struct nlattr *attr, 271static int __parse_flow_nlattrs(const struct nlattr *attr,
284 const struct nlattr *a[], 272 const struct nlattr *a[],
285 u64 *attrsp, bool nz) 273 u64 *attrsp, bool nz)
@@ -501,9 +489,8 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
501 return 0; 489 return 0;
502} 490}
503 491
504static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple, 492static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
505 u64 attrs, const struct nlattr **a, 493 const struct nlattr **a, bool is_mask)
506 bool is_mask)
507{ 494{
508 int err; 495 int err;
509 u64 orig_attrs = attrs; 496 u64 orig_attrs = attrs;
@@ -560,11 +547,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
560 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); 547 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
561 } 548 }
562 549
563 if (is_mask && exact_5tuple) {
564 if (match->mask->key.eth.type != htons(0xffff))
565 *exact_5tuple = false;
566 }
567
568 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { 550 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
569 const struct ovs_key_ipv4 *ipv4_key; 551 const struct ovs_key_ipv4 *ipv4_key;
570 552
@@ -587,13 +569,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
587 SW_FLOW_KEY_PUT(match, ipv4.addr.dst, 569 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
588 ipv4_key->ipv4_dst, is_mask); 570 ipv4_key->ipv4_dst, is_mask);
589 attrs &= ~(1 << OVS_KEY_ATTR_IPV4); 571 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
590
591 if (is_mask && exact_5tuple && *exact_5tuple) {
592 if (ipv4_key->ipv4_proto != 0xff ||
593 ipv4_key->ipv4_src != htonl(0xffffffff) ||
594 ipv4_key->ipv4_dst != htonl(0xffffffff))
595 *exact_5tuple = false;
596 }
597 } 572 }
598 573
599 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { 574 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
@@ -625,13 +600,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
625 is_mask); 600 is_mask);
626 601
627 attrs &= ~(1 << OVS_KEY_ATTR_IPV6); 602 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
628
629 if (is_mask && exact_5tuple && *exact_5tuple) {
630 if (ipv6_key->ipv6_proto != 0xff ||
631 !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
632 !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
633 *exact_5tuple = false;
634 }
635 } 603 }
636 604
637 if (attrs & (1 << OVS_KEY_ATTR_ARP)) { 605 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
@@ -662,32 +630,18 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
662 const struct ovs_key_tcp *tcp_key; 630 const struct ovs_key_tcp *tcp_key;
663 631
664 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); 632 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
665 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 633 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
666 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 634 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
667 tcp_key->tcp_src, is_mask);
668 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
669 tcp_key->tcp_dst, is_mask);
670 } else {
671 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
672 tcp_key->tcp_src, is_mask);
673 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
674 tcp_key->tcp_dst, is_mask);
675 }
676 attrs &= ~(1 << OVS_KEY_ATTR_TCP); 635 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
677
678 if (is_mask && exact_5tuple && *exact_5tuple &&
679 (tcp_key->tcp_src != htons(0xffff) ||
680 tcp_key->tcp_dst != htons(0xffff)))
681 *exact_5tuple = false;
682 } 636 }
683 637
684 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { 638 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
685 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 639 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
686 SW_FLOW_KEY_PUT(match, ipv4.tp.flags, 640 SW_FLOW_KEY_PUT(match, tp.flags,
687 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), 641 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
688 is_mask); 642 is_mask);
689 } else { 643 } else {
690 SW_FLOW_KEY_PUT(match, ipv6.tp.flags, 644 SW_FLOW_KEY_PUT(match, tp.flags,
691 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), 645 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
692 is_mask); 646 is_mask);
693 } 647 }
@@ -698,40 +652,17 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
698 const struct ovs_key_udp *udp_key; 652 const struct ovs_key_udp *udp_key;
699 653
700 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); 654 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
701 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 655 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
702 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 656 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
703 udp_key->udp_src, is_mask);
704 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
705 udp_key->udp_dst, is_mask);
706 } else {
707 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
708 udp_key->udp_src, is_mask);
709 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
710 udp_key->udp_dst, is_mask);
711 }
712 attrs &= ~(1 << OVS_KEY_ATTR_UDP); 657 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
713
714 if (is_mask && exact_5tuple && *exact_5tuple &&
715 (udp_key->udp_src != htons(0xffff) ||
716 udp_key->udp_dst != htons(0xffff)))
717 *exact_5tuple = false;
718 } 658 }
719 659
720 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { 660 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
721 const struct ovs_key_sctp *sctp_key; 661 const struct ovs_key_sctp *sctp_key;
722 662
723 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); 663 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
724 if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { 664 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
725 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 665 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
726 sctp_key->sctp_src, is_mask);
727 SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
728 sctp_key->sctp_dst, is_mask);
729 } else {
730 SW_FLOW_KEY_PUT(match, ipv6.tp.src,
731 sctp_key->sctp_src, is_mask);
732 SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
733 sctp_key->sctp_dst, is_mask);
734 }
735 attrs &= ~(1 << OVS_KEY_ATTR_SCTP); 666 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
736 } 667 }
737 668
@@ -739,9 +670,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
739 const struct ovs_key_icmp *icmp_key; 670 const struct ovs_key_icmp *icmp_key;
740 671
741 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); 672 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
742 SW_FLOW_KEY_PUT(match, ipv4.tp.src, 673 SW_FLOW_KEY_PUT(match, tp.src,
743 htons(icmp_key->icmp_type), is_mask); 674 htons(icmp_key->icmp_type), is_mask);
744 SW_FLOW_KEY_PUT(match, ipv4.tp.dst, 675 SW_FLOW_KEY_PUT(match, tp.dst,
745 htons(icmp_key->icmp_code), is_mask); 676 htons(icmp_key->icmp_code), is_mask);
746 attrs &= ~(1 << OVS_KEY_ATTR_ICMP); 677 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
747 } 678 }
@@ -750,9 +681,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
750 const struct ovs_key_icmpv6 *icmpv6_key; 681 const struct ovs_key_icmpv6 *icmpv6_key;
751 682
752 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); 683 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
753 SW_FLOW_KEY_PUT(match, ipv6.tp.src, 684 SW_FLOW_KEY_PUT(match, tp.src,
754 htons(icmpv6_key->icmpv6_type), is_mask); 685 htons(icmpv6_key->icmpv6_type), is_mask);
755 SW_FLOW_KEY_PUT(match, ipv6.tp.dst, 686 SW_FLOW_KEY_PUT(match, tp.dst,
756 htons(icmpv6_key->icmpv6_code), is_mask); 687 htons(icmpv6_key->icmpv6_code), is_mask);
757 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); 688 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
758 } 689 }
@@ -800,7 +731,6 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
800 * attribute specifies the mask field of the wildcarded flow. 731 * attribute specifies the mask field of the wildcarded flow.
801 */ 732 */
802int ovs_nla_get_match(struct sw_flow_match *match, 733int ovs_nla_get_match(struct sw_flow_match *match,
803 bool *exact_5tuple,
804 const struct nlattr *key, 734 const struct nlattr *key,
805 const struct nlattr *mask) 735 const struct nlattr *mask)
806{ 736{
@@ -848,13 +778,10 @@ int ovs_nla_get_match(struct sw_flow_match *match,
848 } 778 }
849 } 779 }
850 780
851 err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false); 781 err = ovs_key_from_nlattrs(match, key_attrs, a, false);
852 if (err) 782 if (err)
853 return err; 783 return err;
854 784
855 if (exact_5tuple)
856 *exact_5tuple = true;
857
858 if (mask) { 785 if (mask) {
859 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); 786 err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
860 if (err) 787 if (err)
@@ -892,7 +819,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
892 } 819 }
893 } 820 }
894 821
895 err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true); 822 err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
896 if (err) 823 if (err)
897 return err; 824 return err;
898 } else { 825 } else {
@@ -982,8 +909,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
982 goto nla_put_failure; 909 goto nla_put_failure;
983 910
984 eth_key = nla_data(nla); 911 eth_key = nla_data(nla);
985 memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); 912 ether_addr_copy(eth_key->eth_src, output->eth.src);
986 memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); 913 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
987 914
988 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { 915 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
989 __be16 eth_type; 916 __be16 eth_type;
@@ -1055,8 +982,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1055 arp_key->arp_sip = output->ipv4.addr.src; 982 arp_key->arp_sip = output->ipv4.addr.src;
1056 arp_key->arp_tip = output->ipv4.addr.dst; 983 arp_key->arp_tip = output->ipv4.addr.dst;
1057 arp_key->arp_op = htons(output->ip.proto); 984 arp_key->arp_op = htons(output->ip.proto);
1058 memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); 985 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1059 memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); 986 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
1060 } 987 }
1061 988
1062 if ((swkey->eth.type == htons(ETH_P_IP) || 989 if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1070,19 +997,11 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1070 if (!nla) 997 if (!nla)
1071 goto nla_put_failure; 998 goto nla_put_failure;
1072 tcp_key = nla_data(nla); 999 tcp_key = nla_data(nla);
1073 if (swkey->eth.type == htons(ETH_P_IP)) { 1000 tcp_key->tcp_src = output->tp.src;
1074 tcp_key->tcp_src = output->ipv4.tp.src; 1001 tcp_key->tcp_dst = output->tp.dst;
1075 tcp_key->tcp_dst = output->ipv4.tp.dst; 1002 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1076 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS, 1003 output->tp.flags))
1077 output->ipv4.tp.flags)) 1004 goto nla_put_failure;
1078 goto nla_put_failure;
1079 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1080 tcp_key->tcp_src = output->ipv6.tp.src;
1081 tcp_key->tcp_dst = output->ipv6.tp.dst;
1082 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1083 output->ipv6.tp.flags))
1084 goto nla_put_failure;
1085 }
1086 } else if (swkey->ip.proto == IPPROTO_UDP) { 1005 } else if (swkey->ip.proto == IPPROTO_UDP) {
1087 struct ovs_key_udp *udp_key; 1006 struct ovs_key_udp *udp_key;
1088 1007
@@ -1090,13 +1009,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1090 if (!nla) 1009 if (!nla)
1091 goto nla_put_failure; 1010 goto nla_put_failure;
1092 udp_key = nla_data(nla); 1011 udp_key = nla_data(nla);
1093 if (swkey->eth.type == htons(ETH_P_IP)) { 1012 udp_key->udp_src = output->tp.src;
1094 udp_key->udp_src = output->ipv4.tp.src; 1013 udp_key->udp_dst = output->tp.dst;
1095 udp_key->udp_dst = output->ipv4.tp.dst;
1096 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1097 udp_key->udp_src = output->ipv6.tp.src;
1098 udp_key->udp_dst = output->ipv6.tp.dst;
1099 }
1100 } else if (swkey->ip.proto == IPPROTO_SCTP) { 1014 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1101 struct ovs_key_sctp *sctp_key; 1015 struct ovs_key_sctp *sctp_key;
1102 1016
@@ -1104,13 +1018,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1104 if (!nla) 1018 if (!nla)
1105 goto nla_put_failure; 1019 goto nla_put_failure;
1106 sctp_key = nla_data(nla); 1020 sctp_key = nla_data(nla);
1107 if (swkey->eth.type == htons(ETH_P_IP)) { 1021 sctp_key->sctp_src = output->tp.src;
1108 sctp_key->sctp_src = swkey->ipv4.tp.src; 1022 sctp_key->sctp_dst = output->tp.dst;
1109 sctp_key->sctp_dst = swkey->ipv4.tp.dst;
1110 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1111 sctp_key->sctp_src = swkey->ipv6.tp.src;
1112 sctp_key->sctp_dst = swkey->ipv6.tp.dst;
1113 }
1114 } else if (swkey->eth.type == htons(ETH_P_IP) && 1023 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1115 swkey->ip.proto == IPPROTO_ICMP) { 1024 swkey->ip.proto == IPPROTO_ICMP) {
1116 struct ovs_key_icmp *icmp_key; 1025 struct ovs_key_icmp *icmp_key;
@@ -1119,8 +1028,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1119 if (!nla) 1028 if (!nla)
1120 goto nla_put_failure; 1029 goto nla_put_failure;
1121 icmp_key = nla_data(nla); 1030 icmp_key = nla_data(nla);
1122 icmp_key->icmp_type = ntohs(output->ipv4.tp.src); 1031 icmp_key->icmp_type = ntohs(output->tp.src);
1123 icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); 1032 icmp_key->icmp_code = ntohs(output->tp.dst);
1124 } else if (swkey->eth.type == htons(ETH_P_IPV6) && 1033 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1125 swkey->ip.proto == IPPROTO_ICMPV6) { 1034 swkey->ip.proto == IPPROTO_ICMPV6) {
1126 struct ovs_key_icmpv6 *icmpv6_key; 1035 struct ovs_key_icmpv6 *icmpv6_key;
@@ -1130,8 +1039,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1130 if (!nla) 1039 if (!nla)
1131 goto nla_put_failure; 1040 goto nla_put_failure;
1132 icmpv6_key = nla_data(nla); 1041 icmpv6_key = nla_data(nla);
1133 icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); 1042 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1134 icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); 1043 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
1135 1044
1136 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || 1045 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1137 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { 1046 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
@@ -1143,8 +1052,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
1143 nd_key = nla_data(nla); 1052 nd_key = nla_data(nla);
1144 memcpy(nd_key->nd_target, &output->ipv6.nd.target, 1053 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1145 sizeof(nd_key->nd_target)); 1054 sizeof(nd_key->nd_target));
1146 memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); 1055 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1147 memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); 1056 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
1148 } 1057 }
1149 } 1058 }
1150 } 1059 }
@@ -1309,13 +1218,10 @@ static int validate_and_copy_sample(const struct nlattr *attr,
1309 1218
1310static int validate_tp_port(const struct sw_flow_key *flow_key) 1219static int validate_tp_port(const struct sw_flow_key *flow_key)
1311{ 1220{
1312 if (flow_key->eth.type == htons(ETH_P_IP)) { 1221 if ((flow_key->eth.type == htons(ETH_P_IP) ||
1313 if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) 1222 flow_key->eth.type == htons(ETH_P_IPV6)) &&
1314 return 0; 1223 (flow_key->tp.src || flow_key->tp.dst))
1315 } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { 1224 return 0;
1316 if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
1317 return 0;
1318 }
1319 1225
1320 return -EINVAL; 1226 return -EINVAL;
1321} 1227}
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index b31fbe28bc7a..440151045d39 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,7 +45,6 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
45int ovs_nla_get_flow_metadata(struct sw_flow *flow, 45int ovs_nla_get_flow_metadata(struct sw_flow *flow,
46 const struct nlattr *attr); 46 const struct nlattr *attr);
47int ovs_nla_get_match(struct sw_flow_match *match, 47int ovs_nla_get_match(struct sw_flow_match *match,
48 bool *exact_5tuple,
49 const struct nlattr *, 48 const struct nlattr *,
50 const struct nlattr *); 49 const struct nlattr *);
51 50
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 3c268b3d71c3..574c3abc9b30 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -48,6 +48,7 @@
48#define REHASH_INTERVAL (10 * 60 * HZ) 48#define REHASH_INTERVAL (10 * 60 * HZ)
49 49
50static struct kmem_cache *flow_cache; 50static struct kmem_cache *flow_cache;
51struct kmem_cache *flow_stats_cache __read_mostly;
51 52
52static u16 range_n_bytes(const struct sw_flow_key_range *range) 53static u16 range_n_bytes(const struct sw_flow_key_range *range)
53{ 54{
@@ -57,8 +58,10 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
57void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 58void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
58 const struct sw_flow_mask *mask) 59 const struct sw_flow_mask *mask)
59{ 60{
60 const long *m = (long *)((u8 *)&mask->key + mask->range.start); 61 const long *m = (const long *)((const u8 *)&mask->key +
61 const long *s = (long *)((u8 *)src + mask->range.start); 62 mask->range.start);
63 const long *s = (const long *)((const u8 *)src +
64 mask->range.start);
62 long *d = (long *)((u8 *)dst + mask->range.start); 65 long *d = (long *)((u8 *)dst + mask->range.start);
63 int i; 66 int i;
64 67
@@ -70,10 +73,11 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
70 *d++ = *s++ & *m++; 73 *d++ = *s++ & *m++;
71} 74}
72 75
73struct sw_flow *ovs_flow_alloc(bool percpu_stats) 76struct sw_flow *ovs_flow_alloc(void)
74{ 77{
75 struct sw_flow *flow; 78 struct sw_flow *flow;
76 int cpu; 79 struct flow_stats *stats;
80 int node;
77 81
78 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
79 if (!flow) 83 if (!flow)
@@ -81,27 +85,22 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
81 85
82 flow->sf_acts = NULL; 86 flow->sf_acts = NULL;
83 flow->mask = NULL; 87 flow->mask = NULL;
88 flow->stats_last_writer = NUMA_NO_NODE;
84 89
85 flow->stats.is_percpu = percpu_stats; 90 /* Initialize the default stat node. */
91 stats = kmem_cache_alloc_node(flow_stats_cache,
92 GFP_KERNEL | __GFP_ZERO, 0);
93 if (!stats)
94 goto err;
86 95
87 if (!percpu_stats) { 96 spin_lock_init(&stats->lock);
88 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
89 if (!flow->stats.stat)
90 goto err;
91 97
92 spin_lock_init(&flow->stats.stat->lock); 98 RCU_INIT_POINTER(flow->stats[0], stats);
93 } else {
94 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
95 if (!flow->stats.cpu_stats)
96 goto err;
97 99
98 for_each_possible_cpu(cpu) { 100 for_each_node(node)
99 struct flow_stats *cpu_stats; 101 if (node != 0)
102 RCU_INIT_POINTER(flow->stats[node], NULL);
100 103
101 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
102 spin_lock_init(&cpu_stats->lock);
103 }
104 }
105 return flow; 104 return flow;
106err: 105err:
107 kmem_cache_free(flow_cache, flow); 106 kmem_cache_free(flow_cache, flow);
@@ -138,11 +137,13 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
138 137
139static void flow_free(struct sw_flow *flow) 138static void flow_free(struct sw_flow *flow)
140{ 139{
141 kfree((struct sf_flow_acts __force *)flow->sf_acts); 140 int node;
142 if (flow->stats.is_percpu) 141
143 free_percpu(flow->stats.cpu_stats); 142 kfree((struct sw_flow_actions __force *)flow->sf_acts);
144 else 143 for_each_node(node)
145 kfree(flow->stats.stat); 144 if (flow->stats[node])
145 kmem_cache_free(flow_stats_cache,
146 (struct flow_stats __force *)flow->stats[node]);
146 kmem_cache_free(flow_cache, flow); 147 kmem_cache_free(flow_cache, flow);
147} 148}
148 149
@@ -158,25 +159,6 @@ void ovs_flow_free(struct sw_flow *flow, bool deferred)
158 if (!flow) 159 if (!flow)
159 return; 160 return;
160 161
161 if (flow->mask) {
162 struct sw_flow_mask *mask = flow->mask;
163
164 /* ovs-lock is required to protect mask-refcount and
165 * mask list.
166 */
167 ASSERT_OVSL();
168 BUG_ON(!mask->ref_count);
169 mask->ref_count--;
170
171 if (!mask->ref_count) {
172 list_del_rcu(&mask->list);
173 if (deferred)
174 kfree_rcu(mask, rcu);
175 else
176 kfree(mask);
177 }
178 }
179
180 if (deferred) 162 if (deferred)
181 call_rcu(&flow->rcu, rcu_free_flow_callback); 163 call_rcu(&flow->rcu, rcu_free_flow_callback);
182 else 164 else
@@ -375,7 +357,7 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
375static u32 flow_hash(const struct sw_flow_key *key, int key_start, 357static u32 flow_hash(const struct sw_flow_key *key, int key_start,
376 int key_end) 358 int key_end)
377{ 359{
378 u32 *hash_key = (u32 *)((u8 *)key + key_start); 360 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
379 int hash_u32s = (key_end - key_start) >> 2; 361 int hash_u32s = (key_end - key_start) >> 2;
380 362
381 /* Make sure number of hash bytes are multiple of u32. */ 363 /* Make sure number of hash bytes are multiple of u32. */
@@ -397,8 +379,8 @@ static bool cmp_key(const struct sw_flow_key *key1,
397 const struct sw_flow_key *key2, 379 const struct sw_flow_key *key2,
398 int key_start, int key_end) 380 int key_start, int key_end)
399{ 381{
400 const long *cp1 = (long *)((u8 *)key1 + key_start); 382 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
401 const long *cp2 = (long *)((u8 *)key2 + key_start); 383 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
402 long diffs = 0; 384 long diffs = 0;
403 int i; 385 int i;
404 386
@@ -490,6 +472,25 @@ static struct table_instance *table_instance_expand(struct table_instance *ti)
490 return table_instance_rehash(ti, ti->n_buckets * 2); 472 return table_instance_rehash(ti, ti->n_buckets * 2);
491} 473}
492 474
475/* Remove 'mask' from the mask list, if it is not needed any more. */
476static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
477{
478 if (mask) {
479 /* ovs-lock is required to protect mask-refcount and
480 * mask list.
481 */
482 ASSERT_OVSL();
483 BUG_ON(!mask->ref_count);
484 mask->ref_count--;
485
486 if (!mask->ref_count) {
487 list_del_rcu(&mask->list);
488 kfree_rcu(mask, rcu);
489 }
490 }
491}
492
493/* Must be called with OVS mutex held. */
493void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 494void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
494{ 495{
495 struct table_instance *ti = ovsl_dereference(table->ti); 496 struct table_instance *ti = ovsl_dereference(table->ti);
@@ -497,6 +498,11 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
497 BUG_ON(table->count == 0); 498 BUG_ON(table->count == 0);
498 hlist_del_rcu(&flow->hash_node[ti->node_ver]); 499 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
499 table->count--; 500 table->count--;
501
502 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
503 * accessible as long as the RCU read lock is held.
504 */
505 flow_mask_remove(table, flow->mask);
500} 506}
501 507
502static struct sw_flow_mask *mask_alloc(void) 508static struct sw_flow_mask *mask_alloc(void)
@@ -513,8 +519,8 @@ static struct sw_flow_mask *mask_alloc(void)
513static bool mask_equal(const struct sw_flow_mask *a, 519static bool mask_equal(const struct sw_flow_mask *a,
514 const struct sw_flow_mask *b) 520 const struct sw_flow_mask *b)
515{ 521{
516 u8 *a_ = (u8 *)&a->key + a->range.start; 522 const u8 *a_ = (const u8 *)&a->key + a->range.start;
517 u8 *b_ = (u8 *)&b->key + b->range.start; 523 const u8 *b_ = (const u8 *)&b->key + b->range.start;
518 524
519 return (a->range.end == b->range.end) 525 return (a->range.end == b->range.end)
520 && (a->range.start == b->range.start) 526 && (a->range.start == b->range.start)
@@ -559,6 +565,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
559 return 0; 565 return 0;
560} 566}
561 567
568/* Must be called with OVS mutex held. */
562int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 569int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
563 struct sw_flow_mask *mask) 570 struct sw_flow_mask *mask)
564{ 571{
@@ -597,16 +604,28 @@ int ovs_flow_init(void)
597 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); 604 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
598 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 605 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
599 606
600 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 607 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
601 0, NULL); 608 + (num_possible_nodes()
609 * sizeof(struct flow_stats *)),
610 0, 0, NULL);
602 if (flow_cache == NULL) 611 if (flow_cache == NULL)
603 return -ENOMEM; 612 return -ENOMEM;
604 613
614 flow_stats_cache
615 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
616 0, SLAB_HWCACHE_ALIGN, NULL);
617 if (flow_stats_cache == NULL) {
618 kmem_cache_destroy(flow_cache);
619 flow_cache = NULL;
620 return -ENOMEM;
621 }
622
605 return 0; 623 return 0;
606} 624}
607 625
608/* Uninitializes the flow module. */ 626/* Uninitializes the flow module. */
609void ovs_flow_exit(void) 627void ovs_flow_exit(void)
610{ 628{
629 kmem_cache_destroy(flow_stats_cache);
611 kmem_cache_destroy(flow_cache); 630 kmem_cache_destroy(flow_cache);
612} 631}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index baaeb101924d..ca8a5820f615 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -52,10 +52,12 @@ struct flow_table {
52 unsigned int count; 52 unsigned int count;
53}; 53};
54 54
55extern struct kmem_cache *flow_stats_cache;
56
55int ovs_flow_init(void); 57int ovs_flow_init(void);
56void ovs_flow_exit(void); 58void ovs_flow_exit(void);
57 59
58struct sw_flow *ovs_flow_alloc(bool percpu_stats); 60struct sw_flow *ovs_flow_alloc(void);
59void ovs_flow_free(struct sw_flow *, bool deferred); 61void ovs_flow_free(struct sw_flow *, bool deferred);
60 62
61int ovs_flow_tbl_init(struct flow_table *); 63int ovs_flow_tbl_init(struct flow_table *);
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index ebb6e2442554..35ec4fed09e2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -172,7 +172,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
172 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? 172 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
173 htons(IP_DF) : 0; 173 htons(IP_DF) : 0;
174 174
175 skb->local_df = 1; 175 skb->ignore_df = 1;
176 176
177 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, 177 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
178 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, 178 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
@@ -256,7 +256,7 @@ static void gre_tnl_destroy(struct vport *vport)
256 256
257 ovs_net = net_generic(net, ovs_net_id); 257 ovs_net = net_generic(net, ovs_net_id);
258 258
259 rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL); 259 RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
260 ovs_vport_deferred_free(vport); 260 ovs_vport_deferred_free(vport);
261 gre_exit(); 261 gre_exit();
262} 262}
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 729c68763fe7..789af9280e77 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -130,7 +130,7 @@ static void do_setup(struct net_device *netdev)
130 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 130 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
131 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 131 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
132 netdev->destructor = internal_dev_destructor; 132 netdev->destructor = internal_dev_destructor;
133 SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops); 133 netdev->ethtool_ops = &internal_dev_ethtool_ops;
134 netdev->tx_queue_len = 0; 134 netdev->tx_queue_len = 0;
135 135
136 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | 136 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index e797a50ac2be..0edbd95c60e7 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -122,7 +122,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
122 vxlan_port = vxlan_vport(vport); 122 vxlan_port = vxlan_vport(vport);
123 strncpy(vxlan_port->name, parms->name, IFNAMSIZ); 123 strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
124 124
125 vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, false); 125 vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, 0);
126 if (IS_ERR(vs)) { 126 if (IS_ERR(vs)) {
127 ovs_vport_free(vport); 127 ovs_vport_free(vport);
128 return (void *)vs; 128 return (void *)vs;
@@ -170,7 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
170 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? 170 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
171 htons(IP_DF) : 0; 171 htons(IP_DF) : 0;
172 172
173 skb->local_df = 1; 173 skb->ignore_df = 1;
174 174
175 inet_get_local_port_range(net, &port_min, &port_max); 175 inet_get_local_port_range(net, &port_min, &port_max);
176 src_port = vxlan_src_port(port_min, port_max, skb); 176 src_port = vxlan_src_port(port_min, port_max, skb);
@@ -180,7 +180,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
180 OVS_CB(skb)->tun_key->ipv4_tos, 180 OVS_CB(skb)->tun_key->ipv4_tos,
181 OVS_CB(skb)->tun_key->ipv4_ttl, df, 181 OVS_CB(skb)->tun_key->ipv4_ttl, df,
182 src_port, dst_port, 182 src_port, dst_port,
183 htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8)); 183 htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8),
184 false);
184 if (err < 0) 185 if (err < 0)
185 ip_rt_put(rt); 186 ip_rt_put(rt);
186error: 187error:
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index d7e50a17396c..8d721e62f388 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -172,7 +172,7 @@ void ovs_vport_deferred_free(struct vport *vport);
172 */ 172 */
173static inline void *vport_priv(const struct vport *vport) 173static inline void *vport_priv(const struct vport *vport)
174{ 174{
175 return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN); 175 return (u8 *)(uintptr_t)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
176} 176}
177 177
178/** 178/**
@@ -185,9 +185,9 @@ static inline void *vport_priv(const struct vport *vport)
185 * the result of a hash table lookup. @priv must point to the start of the 185 * the result of a hash table lookup. @priv must point to the start of the
186 * private data area. 186 * private data area.
187 */ 187 */
188static inline struct vport *vport_from_priv(const void *priv) 188static inline struct vport *vport_from_priv(void *priv)
189{ 189{
190 return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN)); 190 return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
191} 191}
192 192
193void ovs_vport_receive(struct vport *, struct sk_buff *, 193void ovs_vport_receive(struct vport *, struct sk_buff *,
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 37be6e226d1b..1dde91e3dc70 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -298,7 +298,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
298 rds_ib_stats_inc(s_ib_tx_cq_event); 298 rds_ib_stats_inc(s_ib_tx_cq_event);
299 299
300 if (wc.wr_id == RDS_IB_ACK_WR_ID) { 300 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
301 if (ic->i_ack_queued + HZ/2 < jiffies) 301 if (time_after(jiffies, ic->i_ack_queued + HZ/2))
302 rds_ib_stats_inc(s_ib_tx_stalled); 302 rds_ib_stats_inc(s_ib_tx_stalled);
303 rds_ib_ack_send_complete(ic); 303 rds_ib_ack_send_complete(ic);
304 continue; 304 continue;
@@ -315,7 +315,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
315 315
316 rm = rds_ib_send_unmap_op(ic, send, wc.status); 316 rm = rds_ib_send_unmap_op(ic, send, wc.status);
317 317
318 if (send->s_queued + HZ/2 < jiffies) 318 if (time_after(jiffies, send->s_queued + HZ/2))
319 rds_ib_stats_inc(s_ib_tx_stalled); 319 rds_ib_stats_inc(s_ib_tx_stalled);
320 320
321 if (send->s_op) { 321 if (send->s_op) {
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index e40c3c5db2c4..9105ea03aec5 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -232,7 +232,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
232 } 232 }
233 233
234 if (wc.wr_id == RDS_IW_ACK_WR_ID) { 234 if (wc.wr_id == RDS_IW_ACK_WR_ID) {
235 if (ic->i_ack_queued + HZ/2 < jiffies) 235 if (time_after(jiffies, ic->i_ack_queued + HZ/2))
236 rds_iw_stats_inc(s_iw_tx_stalled); 236 rds_iw_stats_inc(s_iw_tx_stalled);
237 rds_iw_ack_send_complete(ic); 237 rds_iw_ack_send_complete(ic);
238 continue; 238 continue;
@@ -267,7 +267,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
267 267
268 send->s_wr.opcode = 0xdead; 268 send->s_wr.opcode = 0xdead;
269 send->s_wr.num_sge = 1; 269 send->s_wr.num_sge = 1;
270 if (send->s_queued + HZ/2 < jiffies) 270 if (time_after(jiffies, send->s_queued + HZ/2))
271 rds_iw_stats_inc(s_iw_tx_stalled); 271 rds_iw_stats_inc(s_iw_tx_stalled);
272 272
273 /* If a RDMA operation produced an error, signal this right 273 /* If a RDMA operation produced an error, signal this right
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index 89c91515ed0c..139239d2cb22 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -111,8 +111,7 @@ static struct ctl_table rds_iw_sysctl_table[] = {
111 111
112void rds_iw_sysctl_exit(void) 112void rds_iw_sysctl_exit(void)
113{ 113{
114 if (rds_iw_sysctl_hdr) 114 unregister_net_sysctl_table(rds_iw_sysctl_hdr);
115 unregister_net_sysctl_table(rds_iw_sysctl_hdr);
116} 115}
117 116
118int rds_iw_sysctl_init(void) 117int rds_iw_sysctl_init(void)
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index c2be901d19ee..6cd9d1deafc3 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -168,7 +168,7 @@ static int rds_rdma_listen_init(void)
168 return ret; 168 return ret;
169 } 169 }
170 170
171 sin.sin_family = AF_INET, 171 sin.sin_family = AF_INET;
172 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); 172 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
173 sin.sin_port = (__force u16)htons(RDS_PORT); 173 sin.sin_port = (__force u16)htons(RDS_PORT);
174 174
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index b5cb2aa08f33..c3b0cd43eb56 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -94,8 +94,7 @@ static struct ctl_table rds_sysctl_rds_table[] = {
94 94
95void rds_sysctl_exit(void) 95void rds_sysctl_exit(void)
96{ 96{
97 if (rds_sysctl_reg_table) 97 unregister_net_sysctl_table(rds_sysctl_reg_table);
98 unregister_net_sysctl_table(rds_sysctl_reg_table);
99} 98}
100 99
101int rds_sysctl_init(void) 100int rds_sysctl_init(void)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 4e638f851185..23ab4dcd1d9f 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -153,7 +153,7 @@ int rds_tcp_listen_init(void)
153 sock->sk->sk_data_ready = rds_tcp_listen_data_ready; 153 sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
154 write_unlock_bh(&sock->sk->sk_callback_lock); 154 write_unlock_bh(&sock->sk->sk_callback_lock);
155 155
156 sin.sin_family = PF_INET, 156 sin.sin_family = PF_INET;
157 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); 157 sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
158 sin.sin_port = (__force u16)htons(RDS_TCP_PORT); 158 sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
159 159
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index bd2a5b90400c..14c98e48f261 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -36,8 +36,6 @@ struct rfkill_gpio_data {
36 struct gpio_desc *shutdown_gpio; 36 struct gpio_desc *shutdown_gpio;
37 37
38 struct rfkill *rfkill_dev; 38 struct rfkill *rfkill_dev;
39 char *reset_name;
40 char *shutdown_name;
41 struct clk *clk; 39 struct clk *clk;
42 40
43 bool clk_enabled; 41 bool clk_enabled;
@@ -47,17 +45,14 @@ static int rfkill_gpio_set_power(void *data, bool blocked)
47{ 45{
48 struct rfkill_gpio_data *rfkill = data; 46 struct rfkill_gpio_data *rfkill = data;
49 47
50 if (blocked) { 48 if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled)
51 gpiod_set_value(rfkill->shutdown_gpio, 0); 49 clk_enable(rfkill->clk);
52 gpiod_set_value(rfkill->reset_gpio, 0); 50
53 if (!IS_ERR(rfkill->clk) && rfkill->clk_enabled) 51 gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked);
54 clk_disable(rfkill->clk); 52 gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked);
55 } else { 53
56 if (!IS_ERR(rfkill->clk) && !rfkill->clk_enabled) 54 if (blocked && !IS_ERR(rfkill->clk) && rfkill->clk_enabled)
57 clk_enable(rfkill->clk); 55 clk_disable(rfkill->clk);
58 gpiod_set_value(rfkill->reset_gpio, 1);
59 gpiod_set_value(rfkill->shutdown_gpio, 1);
60 }
61 56
62 rfkill->clk_enabled = blocked; 57 rfkill->clk_enabled = blocked;
63 58
@@ -87,10 +82,8 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
87{ 82{
88 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; 83 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
89 struct rfkill_gpio_data *rfkill; 84 struct rfkill_gpio_data *rfkill;
90 const char *clk_name = NULL;
91 struct gpio_desc *gpio; 85 struct gpio_desc *gpio;
92 int ret; 86 int ret;
93 int len;
94 87
95 rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL); 88 rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
96 if (!rfkill) 89 if (!rfkill)
@@ -101,28 +94,15 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
101 if (ret) 94 if (ret)
102 return ret; 95 return ret;
103 } else if (pdata) { 96 } else if (pdata) {
104 clk_name = pdata->power_clk_name;
105 rfkill->name = pdata->name; 97 rfkill->name = pdata->name;
106 rfkill->type = pdata->type; 98 rfkill->type = pdata->type;
107 } else { 99 } else {
108 return -ENODEV; 100 return -ENODEV;
109 } 101 }
110 102
111 len = strlen(rfkill->name); 103 rfkill->clk = devm_clk_get(&pdev->dev, NULL);
112 rfkill->reset_name = devm_kzalloc(&pdev->dev, len + 7, GFP_KERNEL);
113 if (!rfkill->reset_name)
114 return -ENOMEM;
115
116 rfkill->shutdown_name = devm_kzalloc(&pdev->dev, len + 10, GFP_KERNEL);
117 if (!rfkill->shutdown_name)
118 return -ENOMEM;
119 104
120 snprintf(rfkill->reset_name, len + 6 , "%s_reset", rfkill->name); 105 gpio = devm_gpiod_get_index(&pdev->dev, "reset", 0);
121 snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", rfkill->name);
122
123 rfkill->clk = devm_clk_get(&pdev->dev, clk_name);
124
125 gpio = devm_gpiod_get_index(&pdev->dev, rfkill->reset_name, 0);
126 if (!IS_ERR(gpio)) { 106 if (!IS_ERR(gpio)) {
127 ret = gpiod_direction_output(gpio, 0); 107 ret = gpiod_direction_output(gpio, 0);
128 if (ret) 108 if (ret)
@@ -130,7 +110,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
130 rfkill->reset_gpio = gpio; 110 rfkill->reset_gpio = gpio;
131 } 111 }
132 112
133 gpio = devm_gpiod_get_index(&pdev->dev, rfkill->shutdown_name, 1); 113 gpio = devm_gpiod_get_index(&pdev->dev, "shutdown", 1);
134 if (!IS_ERR(gpio)) { 114 if (!IS_ERR(gpio)) {
135 ret = gpiod_direction_output(gpio, 0); 115 ret = gpiod_direction_output(gpio, 0);
136 if (ret) 116 if (ret)
@@ -146,14 +126,6 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
146 return -EINVAL; 126 return -EINVAL;
147 } 127 }
148 128
149 if (pdata && pdata->gpio_runtime_setup) {
150 ret = pdata->gpio_runtime_setup(pdev);
151 if (ret) {
152 dev_err(&pdev->dev, "can't set up gpio\n");
153 return ret;
154 }
155 }
156
157 rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev, 129 rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
158 rfkill->type, &rfkill_gpio_ops, 130 rfkill->type, &rfkill_gpio_ops,
159 rfkill); 131 rfkill);
@@ -174,20 +146,23 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
174static int rfkill_gpio_remove(struct platform_device *pdev) 146static int rfkill_gpio_remove(struct platform_device *pdev)
175{ 147{
176 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); 148 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
177 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
178 149
179 if (pdata && pdata->gpio_runtime_close)
180 pdata->gpio_runtime_close(pdev);
181 rfkill_unregister(rfkill->rfkill_dev); 150 rfkill_unregister(rfkill->rfkill_dev);
182 rfkill_destroy(rfkill->rfkill_dev); 151 rfkill_destroy(rfkill->rfkill_dev);
183 152
184 return 0; 153 return 0;
185} 154}
186 155
156#ifdef CONFIG_ACPI
187static const struct acpi_device_id rfkill_acpi_match[] = { 157static const struct acpi_device_id rfkill_acpi_match[] = {
158 { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
159 { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
160 { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
188 { "BCM4752", RFKILL_TYPE_GPS }, 161 { "BCM4752", RFKILL_TYPE_GPS },
162 { "LNV4752", RFKILL_TYPE_GPS },
189 { }, 163 { },
190}; 164};
165#endif
191 166
192static struct platform_driver rfkill_gpio_driver = { 167static struct platform_driver rfkill_gpio_driver = {
193 .probe = rfkill_gpio_probe, 168 .probe = rfkill_gpio_probe,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index bdbdb1a7920a..45527e6b52db 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -134,7 +134,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
134 int err; 134 int err;
135 int tp_created = 0; 135 int tp_created = 0;
136 136
137 if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) 137 if ((n->nlmsg_type != RTM_GETTFILTER) &&
138 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
138 return -EPERM; 139 return -EPERM;
139 140
140replay: 141replay:
@@ -317,7 +318,8 @@ replay:
317 } 318 }
318 } 319 }
319 320
320 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh); 321 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
322 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
321 if (err == 0) { 323 if (err == 0) {
322 if (tp_created) { 324 if (tp_created) {
323 spin_lock_bh(root_lock); 325 spin_lock_bh(root_lock);
@@ -504,7 +506,7 @@ void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
504EXPORT_SYMBOL(tcf_exts_destroy); 506EXPORT_SYMBOL(tcf_exts_destroy);
505 507
506int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 508int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
507 struct nlattr *rate_tlv, struct tcf_exts *exts) 509 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
508{ 510{
509#ifdef CONFIG_NET_CLS_ACT 511#ifdef CONFIG_NET_CLS_ACT
510 { 512 {
@@ -513,7 +515,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
513 INIT_LIST_HEAD(&exts->actions); 515 INIT_LIST_HEAD(&exts->actions);
514 if (exts->police && tb[exts->police]) { 516 if (exts->police && tb[exts->police]) {
515 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, 517 act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
516 "police", TCA_ACT_NOREPLACE, 518 "police", ovr,
517 TCA_ACT_BIND); 519 TCA_ACT_BIND);
518 if (IS_ERR(act)) 520 if (IS_ERR(act))
519 return PTR_ERR(act); 521 return PTR_ERR(act);
@@ -523,7 +525,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
523 } else if (exts->action && tb[exts->action]) { 525 } else if (exts->action && tb[exts->action]) {
524 int err; 526 int err;
525 err = tcf_action_init(net, tb[exts->action], rate_tlv, 527 err = tcf_action_init(net, tb[exts->action], rate_tlv,
526 NULL, TCA_ACT_NOREPLACE, 528 NULL, ovr,
527 TCA_ACT_BIND, &exts->actions); 529 TCA_ACT_BIND, &exts->actions);
528 if (err) 530 if (err)
529 return err; 531 return err;
@@ -543,14 +545,12 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
543 struct tcf_exts *src) 545 struct tcf_exts *src)
544{ 546{
545#ifdef CONFIG_NET_CLS_ACT 547#ifdef CONFIG_NET_CLS_ACT
546 if (!list_empty(&src->actions)) { 548 LIST_HEAD(tmp);
547 LIST_HEAD(tmp); 549 tcf_tree_lock(tp);
548 tcf_tree_lock(tp); 550 list_splice_init(&dst->actions, &tmp);
549 list_splice_init(&dst->actions, &tmp); 551 list_splice(&src->actions, &dst->actions);
550 list_splice(&src->actions, &dst->actions); 552 tcf_tree_unlock(tp);
551 tcf_tree_unlock(tp); 553 tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
552 tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
553 }
554#endif 554#endif
555} 555}
556EXPORT_SYMBOL(tcf_exts_change); 556EXPORT_SYMBOL(tcf_exts_change);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index e98ca99c202b..0ae1813e3e90 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -130,14 +130,14 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
130static int basic_set_parms(struct net *net, struct tcf_proto *tp, 130static int basic_set_parms(struct net *net, struct tcf_proto *tp,
131 struct basic_filter *f, unsigned long base, 131 struct basic_filter *f, unsigned long base,
132 struct nlattr **tb, 132 struct nlattr **tb,
133 struct nlattr *est) 133 struct nlattr *est, bool ovr)
134{ 134{
135 int err; 135 int err;
136 struct tcf_exts e; 136 struct tcf_exts e;
137 struct tcf_ematch_tree t; 137 struct tcf_ematch_tree t;
138 138
139 tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); 139 tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
140 err = tcf_exts_validate(net, tp, tb, est, &e); 140 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
141 if (err < 0) 141 if (err < 0)
142 return err; 142 return err;
143 143
@@ -161,7 +161,7 @@ errout:
161 161
162static int basic_change(struct net *net, struct sk_buff *in_skb, 162static int basic_change(struct net *net, struct sk_buff *in_skb,
163 struct tcf_proto *tp, unsigned long base, u32 handle, 163 struct tcf_proto *tp, unsigned long base, u32 handle,
164 struct nlattr **tca, unsigned long *arg) 164 struct nlattr **tca, unsigned long *arg, bool ovr)
165{ 165{
166 int err; 166 int err;
167 struct basic_head *head = tp->root; 167 struct basic_head *head = tp->root;
@@ -179,7 +179,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
179 if (f != NULL) { 179 if (f != NULL) {
180 if (handle && f->handle != handle) 180 if (handle && f->handle != handle)
181 return -EINVAL; 181 return -EINVAL;
182 return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]); 182 return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
183 } 183 }
184 184
185 err = -ENOBUFS; 185 err = -ENOBUFS;
@@ -206,7 +206,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
206 f->handle = head->hgenerator; 206 f->handle = head->hgenerator;
207 } 207 }
208 208
209 err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]); 209 err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
210 if (err < 0) 210 if (err < 0)
211 goto errout; 211 goto errout;
212 212
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 8e3cf49118e3..13f64df2c710 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -156,11 +156,11 @@ static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
156static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, 156static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
157 struct cls_bpf_prog *prog, 157 struct cls_bpf_prog *prog,
158 unsigned long base, struct nlattr **tb, 158 unsigned long base, struct nlattr **tb,
159 struct nlattr *est) 159 struct nlattr *est, bool ovr)
160{ 160{
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct sk_filter *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
@@ -170,7 +170,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
170 return -EINVAL; 170 return -EINVAL;
171 171
172 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); 172 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
173 ret = tcf_exts_validate(net, tp, tb, est, &exts); 173 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
174 if (ret < 0) 174 if (ret < 0)
175 return ret; 175 return ret;
176 176
@@ -191,7 +191,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
192 192
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = (struct sock_filter __user *) bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = sk_unattached_filter_create(&fp, &tmp);
197 if (ret) 197 if (ret)
@@ -242,7 +242,7 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
242static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 242static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
243 struct tcf_proto *tp, unsigned long base, 243 struct tcf_proto *tp, unsigned long base,
244 u32 handle, struct nlattr **tca, 244 u32 handle, struct nlattr **tca,
245 unsigned long *arg) 245 unsigned long *arg, bool ovr)
246{ 246{
247 struct cls_bpf_head *head = tp->root; 247 struct cls_bpf_head *head = tp->root;
248 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; 248 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
@@ -260,7 +260,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
260 if (handle && prog->handle != handle) 260 if (handle && prog->handle != handle)
261 return -EINVAL; 261 return -EINVAL;
262 return cls_bpf_modify_existing(net, tp, prog, base, tb, 262 return cls_bpf_modify_existing(net, tp, prog, base, tb,
263 tca[TCA_RATE]); 263 tca[TCA_RATE], ovr);
264 } 264 }
265 265
266 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 266 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
@@ -277,7 +277,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
277 goto errout; 277 goto errout;
278 } 278 }
279 279
280 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]); 280 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
281 if (ret < 0) 281 if (ret < 0)
282 goto errout; 282 goto errout;
283 283
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 8e2158ab551c..cacf01bd04f0 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -83,7 +83,7 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
83static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 83static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
84 struct tcf_proto *tp, unsigned long base, 84 struct tcf_proto *tp, unsigned long base,
85 u32 handle, struct nlattr **tca, 85 u32 handle, struct nlattr **tca,
86 unsigned long *arg) 86 unsigned long *arg, bool ovr)
87{ 87{
88 struct nlattr *tb[TCA_CGROUP_MAX + 1]; 88 struct nlattr *tb[TCA_CGROUP_MAX + 1];
89 struct cls_cgroup_head *head = tp->root; 89 struct cls_cgroup_head *head = tp->root;
@@ -119,7 +119,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
119 return err; 119 return err;
120 120
121 tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); 121 tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
122 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 122 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
123 if (err < 0) 123 if (err < 0)
124 return err; 124 return err;
125 125
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 257029c54332..35be16f7c192 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -349,7 +349,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
349static int flow_change(struct net *net, struct sk_buff *in_skb, 349static int flow_change(struct net *net, struct sk_buff *in_skb,
350 struct tcf_proto *tp, unsigned long base, 350 struct tcf_proto *tp, unsigned long base,
351 u32 handle, struct nlattr **tca, 351 u32 handle, struct nlattr **tca,
352 unsigned long *arg) 352 unsigned long *arg, bool ovr)
353{ 353{
354 struct flow_head *head = tp->root; 354 struct flow_head *head = tp->root;
355 struct flow_filter *f; 355 struct flow_filter *f;
@@ -393,7 +393,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
393 } 393 }
394 394
395 tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); 395 tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
396 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 396 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
397 if (err < 0) 397 if (err < 0)
398 return err; 398 return err;
399 399
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 63a3ce75c02e..861b03ccfed0 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -169,7 +169,7 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
169 169
170static int 170static int
171fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, 171fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
172 struct nlattr **tb, struct nlattr **tca, unsigned long base) 172 struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
173{ 173{
174 struct fw_head *head = tp->root; 174 struct fw_head *head = tp->root;
175 struct tcf_exts e; 175 struct tcf_exts e;
@@ -177,7 +177,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
177 int err; 177 int err;
178 178
179 tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE); 179 tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
180 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 180 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
181 if (err < 0) 181 if (err < 0)
182 return err; 182 return err;
183 183
@@ -218,7 +218,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
218 struct tcf_proto *tp, unsigned long base, 218 struct tcf_proto *tp, unsigned long base,
219 u32 handle, 219 u32 handle,
220 struct nlattr **tca, 220 struct nlattr **tca,
221 unsigned long *arg) 221 unsigned long *arg, bool ovr)
222{ 222{
223 struct fw_head *head = tp->root; 223 struct fw_head *head = tp->root;
224 struct fw_filter *f = (struct fw_filter *) *arg; 224 struct fw_filter *f = (struct fw_filter *) *arg;
@@ -236,7 +236,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
236 if (f != NULL) { 236 if (f != NULL) {
237 if (f->id != handle && handle) 237 if (f->id != handle && handle)
238 return -EINVAL; 238 return -EINVAL;
239 return fw_change_attrs(net, tp, f, tb, tca, base); 239 return fw_change_attrs(net, tp, f, tb, tca, base, ovr);
240 } 240 }
241 241
242 if (!handle) 242 if (!handle)
@@ -264,7 +264,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
264 tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); 264 tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
265 f->id = handle; 265 f->id = handle;
266 266
267 err = fw_change_attrs(net, tp, f, tb, tca, base); 267 err = fw_change_attrs(net, tp, f, tb, tca, base, ovr);
268 if (err < 0) 268 if (err < 0)
269 goto errout; 269 goto errout;
270 270
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 1ad3068f2ce1..dd9fc2523c76 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -333,7 +333,8 @@ static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
333static int route4_set_parms(struct net *net, struct tcf_proto *tp, 333static int route4_set_parms(struct net *net, struct tcf_proto *tp,
334 unsigned long base, struct route4_filter *f, 334 unsigned long base, struct route4_filter *f,
335 u32 handle, struct route4_head *head, 335 u32 handle, struct route4_head *head,
336 struct nlattr **tb, struct nlattr *est, int new) 336 struct nlattr **tb, struct nlattr *est, int new,
337 bool ovr)
337{ 338{
338 int err; 339 int err;
339 u32 id = 0, to = 0, nhandle = 0x8000; 340 u32 id = 0, to = 0, nhandle = 0x8000;
@@ -343,7 +344,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
343 struct tcf_exts e; 344 struct tcf_exts e;
344 345
345 tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 346 tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
346 err = tcf_exts_validate(net, tp, tb, est, &e); 347 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
347 if (err < 0) 348 if (err < 0)
348 return err; 349 return err;
349 350
@@ -428,7 +429,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
428 struct tcf_proto *tp, unsigned long base, 429 struct tcf_proto *tp, unsigned long base,
429 u32 handle, 430 u32 handle,
430 struct nlattr **tca, 431 struct nlattr **tca,
431 unsigned long *arg) 432 unsigned long *arg, bool ovr)
432{ 433{
433 struct route4_head *head = tp->root; 434 struct route4_head *head = tp->root;
434 struct route4_filter *f, *f1, **fp; 435 struct route4_filter *f, *f1, **fp;
@@ -455,7 +456,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
455 old_handle = f->handle; 456 old_handle = f->handle;
456 457
457 err = route4_set_parms(net, tp, base, f, handle, head, tb, 458 err = route4_set_parms(net, tp, base, f, handle, head, tb,
458 tca[TCA_RATE], 0); 459 tca[TCA_RATE], 0, ovr);
459 if (err < 0) 460 if (err < 0)
460 return err; 461 return err;
461 462
@@ -479,7 +480,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
479 480
480 tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 481 tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
481 err = route4_set_parms(net, tp, base, f, handle, head, tb, 482 err = route4_set_parms(net, tp, base, f, handle, head, tb,
482 tca[TCA_RATE], 1); 483 tca[TCA_RATE], 1, ovr);
483 if (err < 0) 484 if (err < 0)
484 goto errout; 485 goto errout;
485 486
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 19f8e5dfa8bd..1020e233a5d6 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -415,7 +415,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
415 struct tcf_proto *tp, unsigned long base, 415 struct tcf_proto *tp, unsigned long base,
416 u32 handle, 416 u32 handle,
417 struct nlattr **tca, 417 struct nlattr **tca,
418 unsigned long *arg) 418 unsigned long *arg, bool ovr)
419{ 419{
420 struct rsvp_head *data = tp->root; 420 struct rsvp_head *data = tp->root;
421 struct rsvp_filter *f, **fp; 421 struct rsvp_filter *f, **fp;
@@ -436,7 +436,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
436 return err; 436 return err;
437 437
438 tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE); 438 tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
439 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e); 439 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
440 if (err < 0) 440 if (err < 0)
441 return err; 441 return err;
442 442
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index f435a88d899a..c721cd4a469f 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -198,7 +198,7 @@ static int
198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, 198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
199 u32 handle, struct tcindex_data *p, 199 u32 handle, struct tcindex_data *p,
200 struct tcindex_filter_result *r, struct nlattr **tb, 200 struct tcindex_filter_result *r, struct nlattr **tb,
201 struct nlattr *est) 201 struct nlattr *est, bool ovr)
202{ 202{
203 int err, balloc = 0; 203 int err, balloc = 0;
204 struct tcindex_filter_result new_filter_result, *old_r = r; 204 struct tcindex_filter_result new_filter_result, *old_r = r;
@@ -208,7 +208,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
208 struct tcf_exts e; 208 struct tcf_exts e;
209 209
210 tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); 210 tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
211 err = tcf_exts_validate(net, tp, tb, est, &e); 211 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
212 if (err < 0) 212 if (err < 0)
213 return err; 213 return err;
214 214
@@ -341,7 +341,7 @@ errout:
341static int 341static int
342tcindex_change(struct net *net, struct sk_buff *in_skb, 342tcindex_change(struct net *net, struct sk_buff *in_skb,
343 struct tcf_proto *tp, unsigned long base, u32 handle, 343 struct tcf_proto *tp, unsigned long base, u32 handle,
344 struct nlattr **tca, unsigned long *arg) 344 struct nlattr **tca, unsigned long *arg, bool ovr)
345{ 345{
346 struct nlattr *opt = tca[TCA_OPTIONS]; 346 struct nlattr *opt = tca[TCA_OPTIONS];
347 struct nlattr *tb[TCA_TCINDEX_MAX + 1]; 347 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -361,7 +361,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
361 return err; 361 return err;
362 362
363 return tcindex_set_parms(net, tp, base, handle, p, r, tb, 363 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
364 tca[TCA_RATE]); 364 tca[TCA_RATE], ovr);
365} 365}
366 366
367 367
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 84c28daff848..c39b583ace32 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -486,13 +486,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
486static int u32_set_parms(struct net *net, struct tcf_proto *tp, 486static int u32_set_parms(struct net *net, struct tcf_proto *tp,
487 unsigned long base, struct tc_u_hnode *ht, 487 unsigned long base, struct tc_u_hnode *ht,
488 struct tc_u_knode *n, struct nlattr **tb, 488 struct tc_u_knode *n, struct nlattr **tb,
489 struct nlattr *est) 489 struct nlattr *est, bool ovr)
490{ 490{
491 int err; 491 int err;
492 struct tcf_exts e; 492 struct tcf_exts e;
493 493
494 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); 494 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
495 err = tcf_exts_validate(net, tp, tb, est, &e); 495 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
496 if (err < 0) 496 if (err < 0)
497 return err; 497 return err;
498 498
@@ -545,7 +545,7 @@ errout:
545static int u32_change(struct net *net, struct sk_buff *in_skb, 545static int u32_change(struct net *net, struct sk_buff *in_skb,
546 struct tcf_proto *tp, unsigned long base, u32 handle, 546 struct tcf_proto *tp, unsigned long base, u32 handle,
547 struct nlattr **tca, 547 struct nlattr **tca,
548 unsigned long *arg) 548 unsigned long *arg, bool ovr)
549{ 549{
550 struct tc_u_common *tp_c = tp->data; 550 struct tc_u_common *tp_c = tp->data;
551 struct tc_u_hnode *ht; 551 struct tc_u_hnode *ht;
@@ -569,7 +569,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
569 return -EINVAL; 569 return -EINVAL;
570 570
571 return u32_set_parms(net, tp, base, n->ht_up, n, tb, 571 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
572 tca[TCA_RATE]); 572 tca[TCA_RATE], ovr);
573 } 573 }
574 574
575 if (tb[TCA_U32_DIVISOR]) { 575 if (tb[TCA_U32_DIVISOR]) {
@@ -656,7 +656,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
656 } 656 }
657#endif 657#endif
658 658
659 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]); 659 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
660 if (err == 0) { 660 if (err == 0) {
661 struct tc_u_knode **ins; 661 struct tc_u_knode **ins;
662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) 662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 400769014bbd..58bed7599db7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -563,7 +563,7 @@ out:
563} 563}
564EXPORT_SYMBOL(__qdisc_calculate_pkt_len); 564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
565 565
566void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) 566void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
567{ 567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { 568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", 569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
@@ -1084,7 +1084,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1084 struct Qdisc *p = NULL; 1084 struct Qdisc *p = NULL;
1085 int err; 1085 int err;
1086 1086
1087 if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN)) 1087 if ((n->nlmsg_type != RTM_GETQDISC) &&
1088 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1088 return -EPERM; 1089 return -EPERM;
1089 1090
1090 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1091 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1151,7 +1152,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1151 struct Qdisc *q, *p; 1152 struct Qdisc *q, *p;
1152 int err; 1153 int err;
1153 1154
1154 if (!netlink_capable(skb, CAP_NET_ADMIN)) 1155 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1155 return -EPERM; 1156 return -EPERM;
1156 1157
1157replay: 1158replay:
@@ -1490,7 +1491,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1490 u32 qid; 1491 u32 qid;
1491 int err; 1492 int err;
1492 1493
1493 if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN)) 1494 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1495 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1494 return -EPERM; 1496 return -EPERM;
1495 1497
1496 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1498 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 2aee02802c27..ed30e436128b 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -391,12 +391,7 @@ static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
391 391
392static void choke_free(void *addr) 392static void choke_free(void *addr)
393{ 393{
394 if (addr) { 394 kvfree(addr);
395 if (is_vmalloc_addr(addr))
396 vfree(addr);
397 else
398 kfree(addr);
399 }
400} 395}
401 396
402static int choke_change(struct Qdisc *sch, struct nlattr *opt) 397static int choke_change(struct Qdisc *sch, struct nlattr *opt)
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 8302717ea303..7bbbfe112192 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -391,8 +391,10 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
391 while (1) { 391 while (1) {
392 cl = list_first_entry(&q->active, struct drr_class, alist); 392 cl = list_first_entry(&q->active, struct drr_class, alist);
393 skb = cl->qdisc->ops->peek(cl->qdisc); 393 skb = cl->qdisc->ops->peek(cl->qdisc);
394 if (skb == NULL) 394 if (skb == NULL) {
395 qdisc_warn_nonwc(__func__, cl->qdisc);
395 goto out; 396 goto out;
397 }
396 398
397 len = qdisc_pkt_len(skb); 399 len = qdisc_pkt_len(skb);
398 if (len <= cl->deficit) { 400 if (len <= cl->deficit) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 23c682b42f99..ba32c2b005d0 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -591,10 +591,7 @@ static void *fq_alloc_node(size_t sz, int node)
591 591
592static void fq_free(void *addr) 592static void fq_free(void *addr)
593{ 593{
594 if (addr && is_vmalloc_addr(addr)) 594 kvfree(addr);
595 vfree(addr);
596 else
597 kfree(addr);
598} 595}
599 596
600static int fq_resize(struct Qdisc *sch, u32 log) 597static int fq_resize(struct Qdisc *sch, u32 log)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 0bf432c782c1..063b726bf1f8 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -365,12 +365,7 @@ static void *fq_codel_zalloc(size_t sz)
365 365
366static void fq_codel_free(void *addr) 366static void fq_codel_free(void *addr)
367{ 367{
368 if (addr) { 368 kvfree(addr);
369 if (is_vmalloc_addr(addr))
370 vfree(addr);
371 else
372 kfree(addr);
373 }
374} 369}
375 370
376static void fq_codel_destroy(struct Qdisc *sch) 371static void fq_codel_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 6e957c3b9854..d85b6812a7d4 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -414,7 +414,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
414 } 414 }
415 bucket->deficit = weight * q->quantum; 415 bucket->deficit = weight * q->quantum;
416 } 416 }
417 if (++sch->q.qlen < sch->limit) 417 if (++sch->q.qlen <= sch->limit)
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS;
419 419
420 q->drop_overlimit++; 420 q->drop_overlimit++;
@@ -494,12 +494,7 @@ static void *hhf_zalloc(size_t sz)
494 494
495static void hhf_free(void *addr) 495static void hhf_free(void *addr)
496{ 496{
497 if (addr) { 497 kvfree(addr);
498 if (is_vmalloc_addr(addr))
499 vfree(addr);
500 else
501 kfree(addr);
502 }
503} 498}
504 499
505static void hhf_destroy(struct Qdisc *sch) 500static void hhf_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index f1669a00f571..111d70fddaea 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -648,12 +648,7 @@ static void netem_reset(struct Qdisc *sch)
648 648
649static void dist_free(struct disttable *d) 649static void dist_free(struct disttable *d)
650{ 650{
651 if (d) { 651 kvfree(d);
652 if (is_vmalloc_addr(d))
653 vfree(d);
654 else
655 kfree(d);
656 }
657} 652}
658 653
659/* 654/*
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 87317ff0b4ec..1af2f73906d0 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -716,12 +716,7 @@ static void *sfq_alloc(size_t sz)
716 716
717static void sfq_free(void *addr) 717static void sfq_free(void *addr)
718{ 718{
719 if (addr) { 719 kvfree(addr);
720 if (is_vmalloc_addr(addr))
721 vfree(addr);
722 else
723 kfree(addr);
724 }
725} 720}
726 721
727static void sfq_destroy(struct Qdisc *sch) 722static void sfq_destroy(struct Qdisc *sch)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 39579c3e0d14..9e0509ce2f84 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -55,6 +55,7 @@
55#include <net/sctp/sm.h> 55#include <net/sctp/sm.h>
56 56
57/* Forward declarations for internal functions. */ 57/* Forward declarations for internal functions. */
58static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
58static void sctp_assoc_bh_rcv(struct work_struct *work); 59static void sctp_assoc_bh_rcv(struct work_struct *work);
59static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 60static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
60static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); 61static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
@@ -774,9 +775,6 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
774 sctp_transport_cmd_t command, 775 sctp_transport_cmd_t command,
775 sctp_sn_error_t error) 776 sctp_sn_error_t error)
776{ 777{
777 struct sctp_transport *t = NULL;
778 struct sctp_transport *first;
779 struct sctp_transport *second;
780 struct sctp_ulpevent *event; 778 struct sctp_ulpevent *event;
781 struct sockaddr_storage addr; 779 struct sockaddr_storage addr;
782 int spc_state = 0; 780 int spc_state = 0;
@@ -829,13 +827,14 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
829 return; 827 return;
830 } 828 }
831 829
832 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the 830 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
833 * user. 831 * to the user.
834 */ 832 */
835 if (ulp_notify) { 833 if (ulp_notify) {
836 memset(&addr, 0, sizeof(struct sockaddr_storage)); 834 memset(&addr, 0, sizeof(struct sockaddr_storage));
837 memcpy(&addr, &transport->ipaddr, 835 memcpy(&addr, &transport->ipaddr,
838 transport->af_specific->sockaddr_len); 836 transport->af_specific->sockaddr_len);
837
839 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 838 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
840 0, spc_state, error, GFP_ATOMIC); 839 0, spc_state, error, GFP_ATOMIC);
841 if (event) 840 if (event)
@@ -843,60 +842,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
843 } 842 }
844 843
845 /* Select new active and retran paths. */ 844 /* Select new active and retran paths. */
846 845 sctp_select_active_and_retran_path(asoc);
847 /* Look for the two most recently used active transports.
848 *
849 * This code produces the wrong ordering whenever jiffies
850 * rolls over, but we still get usable transports, so we don't
851 * worry about it.
852 */
853 first = NULL; second = NULL;
854
855 list_for_each_entry(t, &asoc->peer.transport_addr_list,
856 transports) {
857
858 if ((t->state == SCTP_INACTIVE) ||
859 (t->state == SCTP_UNCONFIRMED) ||
860 (t->state == SCTP_PF))
861 continue;
862 if (!first || t->last_time_heard > first->last_time_heard) {
863 second = first;
864 first = t;
865 } else if (!second ||
866 t->last_time_heard > second->last_time_heard)
867 second = t;
868 }
869
870 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
871 *
872 * By default, an endpoint should always transmit to the
873 * primary path, unless the SCTP user explicitly specifies the
874 * destination transport address (and possibly source
875 * transport address) to use.
876 *
877 * [If the primary is active but not most recent, bump the most
878 * recently used transport.]
879 */
880 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
881 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
882 first != asoc->peer.primary_path) {
883 second = first;
884 first = asoc->peer.primary_path;
885 }
886
887 if (!second)
888 second = first;
889 /* If we failed to find a usable transport, just camp on the
890 * primary, even if it is inactive.
891 */
892 if (!first) {
893 first = asoc->peer.primary_path;
894 second = asoc->peer.primary_path;
895 }
896
897 /* Set the active and retran transports. */
898 asoc->peer.active_path = first;
899 asoc->peer.retran_path = second;
900} 846}
901 847
902/* Hold a reference to an association. */ 848/* Hold a reference to an association. */
@@ -1090,7 +1036,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1090 } 1036 }
1091 1037
1092 if (chunk->transport) 1038 if (chunk->transport)
1093 chunk->transport->last_time_heard = jiffies; 1039 chunk->transport->last_time_heard = ktime_get();
1094 1040
1095 /* Run through the state machine. */ 1041 /* Run through the state machine. */
1096 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, 1042 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
@@ -1278,13 +1224,41 @@ static u8 sctp_trans_score(const struct sctp_transport *trans)
1278 return sctp_trans_state_to_prio_map[trans->state]; 1224 return sctp_trans_state_to_prio_map[trans->state];
1279} 1225}
1280 1226
1227static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1228 struct sctp_transport *trans2)
1229{
1230 if (trans1->error_count > trans2->error_count) {
1231 return trans2;
1232 } else if (trans1->error_count == trans2->error_count &&
1233 ktime_after(trans2->last_time_heard,
1234 trans1->last_time_heard)) {
1235 return trans2;
1236 } else {
1237 return trans1;
1238 }
1239}
1240
1281static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, 1241static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1282 struct sctp_transport *best) 1242 struct sctp_transport *best)
1283{ 1243{
1244 u8 score_curr, score_best;
1245
1284 if (best == NULL) 1246 if (best == NULL)
1285 return curr; 1247 return curr;
1286 1248
1287 return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; 1249 score_curr = sctp_trans_score(curr);
1250 score_best = sctp_trans_score(best);
1251
1252 /* First, try a score-based selection if both transport states
1253 * differ. If we're in a tie, lets try to make a more clever
1254 * decision here based on error counts and last time heard.
1255 */
1256 if (score_curr > score_best)
1257 return curr;
1258 else if (score_curr == score_best)
1259 return sctp_trans_elect_tie(curr, best);
1260 else
1261 return best;
1288} 1262}
1289 1263
1290void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1264void sctp_assoc_update_retran_path(struct sctp_association *asoc)
@@ -1325,6 +1299,76 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1325 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); 1299 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1326} 1300}
1327 1301
1302static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1303{
1304 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1305 struct sctp_transport *trans_pf = NULL;
1306
1307 /* Look for the two most recently used active transports. */
1308 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1309 transports) {
1310 /* Skip uninteresting transports. */
1311 if (trans->state == SCTP_INACTIVE ||
1312 trans->state == SCTP_UNCONFIRMED)
1313 continue;
1314 /* Keep track of the best PF transport from our
1315 * list in case we don't find an active one.
1316 */
1317 if (trans->state == SCTP_PF) {
1318 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1319 continue;
1320 }
1321 /* For active transports, pick the most recent ones. */
1322 if (trans_pri == NULL ||
1323 ktime_after(trans->last_time_heard,
1324 trans_pri->last_time_heard)) {
1325 trans_sec = trans_pri;
1326 trans_pri = trans;
1327 } else if (trans_sec == NULL ||
1328 ktime_after(trans->last_time_heard,
1329 trans_sec->last_time_heard)) {
1330 trans_sec = trans;
1331 }
1332 }
1333
1334 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1335 *
1336 * By default, an endpoint should always transmit to the primary
1337 * path, unless the SCTP user explicitly specifies the
1338 * destination transport address (and possibly source transport
1339 * address) to use. [If the primary is active but not most recent,
1340 * bump the most recently used transport.]
1341 */
1342 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1343 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1344 asoc->peer.primary_path != trans_pri) {
1345 trans_sec = trans_pri;
1346 trans_pri = asoc->peer.primary_path;
1347 }
1348
1349 /* We did not find anything useful for a possible retransmission
1350 * path; either primary path that we found is the the same as
1351 * the current one, or we didn't generally find an active one.
1352 */
1353 if (trans_sec == NULL)
1354 trans_sec = trans_pri;
1355
1356 /* If we failed to find a usable transport, just camp on the
1357 * primary or retran, even if they are inactive, if possible
1358 * pick a PF iff it's the better choice.
1359 */
1360 if (trans_pri == NULL) {
1361 trans_pri = sctp_trans_elect_best(asoc->peer.primary_path,
1362 asoc->peer.retran_path);
1363 trans_pri = sctp_trans_elect_best(trans_pri, trans_pf);
1364 trans_sec = asoc->peer.primary_path;
1365 }
1366
1367 /* Set the active and retran transports. */
1368 asoc->peer.active_path = trans_pri;
1369 asoc->peer.retran_path = trans_sec;
1370}
1371
1328struct sctp_transport * 1372struct sctp_transport *
1329sctp_assoc_choose_alter_transport(struct sctp_association *asoc, 1373sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1330 struct sctp_transport *last_sent_to) 1374 struct sctp_transport *last_sent_to)
@@ -1547,7 +1591,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1547/* Set an association id for a given association */ 1591/* Set an association id for a given association */
1548int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1592int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1549{ 1593{
1550 bool preload = gfp & __GFP_WAIT; 1594 bool preload = !!(gfp & __GFP_WAIT);
1551 int ret; 1595 int ret;
1552 1596
1553 /* If the id is already assigned, keep it. */ 1597 /* If the id is already assigned, keep it. */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 3d9f429858dc..9da76ba4d10f 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -481,7 +481,7 @@ normal:
481 } 481 }
482 482
483 if (chunk->transport) 483 if (chunk->transport)
484 chunk->transport->last_time_heard = jiffies; 484 chunk->transport->last_time_heard = ktime_get();
485 485
486 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state, 486 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
487 ep, asoc, chunk, GFP_ATOMIC); 487 ep, asoc, chunk, GFP_ATOMIC);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2b1738ef9394..1999592ba88c 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -216,7 +216,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
216 IP6_ECN_flow_xmit(sk, fl6->flowlabel); 216 IP6_ECN_flow_xmit(sk, fl6->flowlabel);
217 217
218 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 218 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
219 skb->local_df = 1; 219 skb->ignore_df = 1;
220 220
221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
222 222
@@ -943,7 +943,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = {
943 .protocol = IPPROTO_SCTP, 943 .protocol = IPPROTO_SCTP,
944 .prot = &sctpv6_prot, 944 .prot = &sctpv6_prot,
945 .ops = &inet6_seqpacket_ops, 945 .ops = &inet6_seqpacket_ops,
946 .no_check = 0,
947 .flags = SCTP_PROTOSW_FLAG 946 .flags = SCTP_PROTOSW_FLAG
948}; 947};
949static struct inet_protosw sctpv6_stream_protosw = { 948static struct inet_protosw sctpv6_stream_protosw = {
@@ -951,7 +950,6 @@ static struct inet_protosw sctpv6_stream_protosw = {
951 .protocol = IPPROTO_SCTP, 950 .protocol = IPPROTO_SCTP,
952 .prot = &sctpv6_prot, 951 .prot = &sctpv6_prot,
953 .ops = &inet6_seqpacket_ops, 952 .ops = &inet6_seqpacket_ops,
954 .no_check = 0,
955 .flags = SCTP_PROTOSW_FLAG, 953 .flags = SCTP_PROTOSW_FLAG,
956}; 954};
957 955
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0f4d15fc2627..01ab8e0723f0 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -591,7 +591,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
591 591
592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); 592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
593 593
594 nskb->local_df = packet->ipfragok; 594 nskb->ignore_df = packet->ipfragok;
595 tp->af_specific->sctp_xmit(nskb, tp); 595 tp->af_specific->sctp_xmit(nskb, tp);
596 596
597out: 597out:
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 0947f1e15eb8..34229ee7f379 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -78,7 +78,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
78 78
79 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 79 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
80 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 80 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
81 snmp_fold_field((void __percpu **)net->sctp.sctp_statistics, 81 snmp_fold_field(net->sctp.sctp_statistics,
82 sctp_snmp_list[i].entry)); 82 sctp_snmp_list[i].entry));
83 83
84 return 0; 84 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 44cbb54c8574..6789d785e698 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1017,7 +1017,6 @@ static struct inet_protosw sctp_seqpacket_protosw = {
1017 .protocol = IPPROTO_SCTP, 1017 .protocol = IPPROTO_SCTP,
1018 .prot = &sctp_prot, 1018 .prot = &sctp_prot,
1019 .ops = &inet_seqpacket_ops, 1019 .ops = &inet_seqpacket_ops,
1020 .no_check = 0,
1021 .flags = SCTP_PROTOSW_FLAG 1020 .flags = SCTP_PROTOSW_FLAG
1022}; 1021};
1023static struct inet_protosw sctp_stream_protosw = { 1022static struct inet_protosw sctp_stream_protosw = {
@@ -1025,7 +1024,6 @@ static struct inet_protosw sctp_stream_protosw = {
1025 .protocol = IPPROTO_SCTP, 1024 .protocol = IPPROTO_SCTP,
1026 .prot = &sctp_prot, 1025 .prot = &sctp_prot,
1027 .ops = &inet_seqpacket_ops, 1026 .ops = &inet_seqpacket_ops,
1028 .no_check = 0,
1029 .flags = SCTP_PROTOSW_FLAG 1027 .flags = SCTP_PROTOSW_FLAG
1030}; 1028};
1031 1029
@@ -1105,14 +1103,15 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
1105 1103
1106static inline int init_sctp_mibs(struct net *net) 1104static inline int init_sctp_mibs(struct net *net)
1107{ 1105{
1108 return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics, 1106 net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
1109 sizeof(struct sctp_mib), 1107 if (!net->sctp.sctp_statistics)
1110 __alignof__(struct sctp_mib)); 1108 return -ENOMEM;
1109 return 0;
1111} 1110}
1112 1111
1113static inline void cleanup_sctp_mibs(struct net *net) 1112static inline void cleanup_sctp_mibs(struct net *net)
1114{ 1113{
1115 snmp_mib_free((void __percpu **)net->sctp.sctp_statistics); 1114 free_percpu(net->sctp.sctp_statistics);
1116} 1115}
1117 1116
1118static void sctp_v4_pf_init(void) 1117static void sctp_v4_pf_init(void)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fee5552ddf92..ae0e616a7ca5 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1782,7 +1782,7 @@ no_hmac:
1782 else 1782 else
1783 kt = ktime_get(); 1783 kt = ktime_get();
1784 1784
1785 if (!asoc && ktime_compare(bear_cookie->expiration, kt) < 0) { 1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
1786 /* 1786 /*
1787 * Section 3.3.10.3 Stale Cookie Error (3) 1787 * Section 3.3.10.3 Stale Cookie Error (3)
1788 * 1788 *
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fee06b99a4da..429899689408 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -71,6 +71,7 @@
71#include <net/route.h> 71#include <net/route.h>
72#include <net/ipv6.h> 72#include <net/ipv6.h>
73#include <net/inet_common.h> 73#include <net/inet_common.h>
74#include <net/busy_poll.h>
74 75
75#include <linux/socket.h> /* for sa_family_t */ 76#include <linux/socket.h> /* for sa_family_t */
76#include <linux/export.h> 77#include <linux/export.h>
@@ -5945,8 +5946,9 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5945 /* Search for an available port. */ 5946 /* Search for an available port. */
5946 int low, high, remaining, index; 5947 int low, high, remaining, index;
5947 unsigned int rover; 5948 unsigned int rover;
5949 struct net *net = sock_net(sk);
5948 5950
5949 inet_get_local_port_range(sock_net(sk), &low, &high); 5951 inet_get_local_port_range(net, &low, &high);
5950 remaining = (high - low) + 1; 5952 remaining = (high - low) + 1;
5951 rover = prandom_u32() % remaining + low; 5953 rover = prandom_u32() % remaining + low;
5952 5954
@@ -5954,7 +5956,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5954 rover++; 5956 rover++;
5955 if ((rover < low) || (rover > high)) 5957 if ((rover < low) || (rover > high))
5956 rover = low; 5958 rover = low;
5957 if (inet_is_reserved_local_port(rover)) 5959 if (inet_is_local_reserved_port(net, rover))
5958 continue; 5960 continue;
5959 index = sctp_phashfn(sock_net(sk), rover); 5961 index = sctp_phashfn(sock_net(sk), rover);
5960 head = &sctp_port_hashtable[index]; 5962 head = &sctp_port_hashtable[index];
@@ -6557,6 +6559,10 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6557 if (sk->sk_shutdown & RCV_SHUTDOWN) 6559 if (sk->sk_shutdown & RCV_SHUTDOWN)
6558 break; 6560 break;
6559 6561
6562 if (sk_can_busy_loop(sk) &&
6563 sk_busy_loop(sk, noblock))
6564 continue;
6565
6560 /* User doesn't want to wait. */ 6566 /* User doesn't want to wait. */
6561 error = -EAGAIN; 6567 error = -EAGAIN;
6562 if (!timeo) 6568 if (!timeo)
@@ -6940,7 +6946,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6940 newsk->sk_type = sk->sk_type; 6946 newsk->sk_type = sk->sk_type;
6941 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6947 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
6942 newsk->sk_flags = sk->sk_flags; 6948 newsk->sk_flags = sk->sk_flags;
6943 newsk->sk_no_check = sk->sk_no_check; 6949 newsk->sk_no_check_tx = sk->sk_no_check_tx;
6950 newsk->sk_no_check_rx = sk->sk_no_check_rx;
6944 newsk->sk_reuse = sk->sk_reuse; 6951 newsk->sk_reuse = sk->sk_reuse;
6945 6952
6946 newsk->sk_shutdown = sk->sk_shutdown; 6953 newsk->sk_shutdown = sk->sk_shutdown;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index c82fdc1eab7c..7e5eb7554990 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -436,20 +436,21 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
436 436
437int sctp_sysctl_net_register(struct net *net) 437int sctp_sysctl_net_register(struct net *net)
438{ 438{
439 struct ctl_table *table = sctp_net_table; 439 struct ctl_table *table;
440 440 int i;
441 if (!net_eq(net, &init_net)) {
442 int i;
443 441
444 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 442 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
445 if (!table) 443 if (!table)
446 return -ENOMEM; 444 return -ENOMEM;
447 445
448 for (i = 0; table[i].data; i++) 446 for (i = 0; table[i].data; i++)
449 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 447 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
450 }
451 448
452 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); 449 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
450 if (net->sctp.sysctl_header == NULL) {
451 kfree(table);
452 return -ENOMEM;
453 }
453 return 0; 454 return 0;
454} 455}
455 456
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1d348d15b33d..7dd672fa651f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -72,7 +72,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
72 */ 72 */
73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
74 74
75 peer->last_time_heard = jiffies; 75 peer->last_time_heard = ktime_get();
76 peer->last_time_ecne_reduced = jiffies; 76 peer->last_time_ecne_reduced = jiffies;
77 77
78 peer->param_flags = SPP_HB_DISABLE | 78 peer->param_flags = SPP_HB_DISABLE |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 7144eb6a1b95..d49dc2ed30ad 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -38,6 +38,7 @@
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <net/sock.h> 40#include <net/sock.h>
41#include <net/busy_poll.h>
41#include <net/sctp/structs.h> 42#include <net/sctp/structs.h>
42#include <net/sctp/sctp.h> 43#include <net/sctp/sctp.h>
43#include <net/sctp/sm.h> 44#include <net/sctp/sm.h>
@@ -204,6 +205,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
204 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) 205 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
205 goto out_free; 206 goto out_free;
206 207
208 if (!sctp_ulpevent_is_notification(event))
209 sk_mark_napi_id(sk, skb);
210
207 /* Check if the user wishes to receive this event. */ 211 /* Check if the user wishes to receive this event. */
208 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) 212 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
209 goto out_free; 213 goto out_free;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 0a648c502fc3..2df87f78e518 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -173,7 +173,8 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
173 return -1; 173 return -1;
174 if (csum_fold(desc.csum)) 174 if (csum_fold(desc.csum))
175 return -1; 175 return -1;
176 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) 176 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
177 !skb->csum_complete_sw)
177 netdev_rx_csum_fault(skb->dev); 178 netdev_rx_csum_fault(skb->dev);
178 return 0; 179 return 0;
179no_checksum: 180no_checksum:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 25a3dcf15cae..1dec6043e4de 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -866,8 +866,6 @@ static void xs_reset_transport(struct sock_xprt *transport)
866 xs_restore_old_callbacks(transport, sk); 866 xs_restore_old_callbacks(transport, sk);
867 write_unlock_bh(&sk->sk_callback_lock); 867 write_unlock_bh(&sk->sk_callback_lock);
868 868
869 sk->sk_no_check = 0;
870
871 trace_rpc_socket_close(&transport->xprt, sock); 869 trace_rpc_socket_close(&transport->xprt, sock);
872 sock_release(sock); 870 sock_release(sock);
873} 871}
@@ -2046,7 +2044,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2046 sk->sk_user_data = xprt; 2044 sk->sk_user_data = xprt;
2047 sk->sk_data_ready = xs_udp_data_ready; 2045 sk->sk_data_ready = xs_udp_data_ready;
2048 sk->sk_write_space = xs_udp_write_space; 2046 sk->sk_write_space = xs_udp_write_space;
2049 sk->sk_no_check = UDP_CSUM_NORCV;
2050 sk->sk_allocation = GFP_ATOMIC; 2047 sk->sk_allocation = GFP_ATOMIC;
2051 2048
2052 xprt_set_connected(xprt); 2049 xprt_set_connected(xprt);
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index b282f7130d2b..a080c66d819a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_TIPC) := tipc.o 5obj-$(CONFIG_TIPC) := tipc.o
6 6
7tipc-y += addr.o bcast.o bearer.o config.o \ 7tipc-y += addr.o bcast.o bearer.o config.o \
8 core.o handler.o link.o discover.o msg.o \ 8 core.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \ 10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o log.o eth_media.o server.o 11 socket.o log.o eth_media.o server.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 95ab5ef92920..26631679a1fa 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -71,7 +71,7 @@ struct tipc_bcbearer_pair {
71 * Note: The fields labelled "temporary" are incorporated into the bearer 71 * Note: The fields labelled "temporary" are incorporated into the bearer
72 * to avoid consuming potentially limited stack space through the use of 72 * to avoid consuming potentially limited stack space through the use of
73 * large local variables within multicast routines. Concurrent access is 73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock". 74 * prevented through use of the spinlock "bclink_lock".
75 */ 75 */
76struct tipc_bcbearer { 76struct tipc_bcbearer {
77 struct tipc_bearer bearer; 77 struct tipc_bearer bearer;
@@ -84,34 +84,64 @@ struct tipc_bcbearer {
84 84
85/** 85/**
86 * struct tipc_bclink - link used for broadcast messages 86 * struct tipc_bclink - link used for broadcast messages
87 * @lock: spinlock governing access to structure
87 * @link: (non-standard) broadcast link structure 88 * @link: (non-standard) broadcast link structure
88 * @node: (non-standard) node structure representing b'cast link's peer node 89 * @node: (non-standard) node structure representing b'cast link's peer node
90 * @flags: represent bclink states
89 * @bcast_nodes: map of broadcast-capable nodes 91 * @bcast_nodes: map of broadcast-capable nodes
90 * @retransmit_to: node that most recently requested a retransmit 92 * @retransmit_to: node that most recently requested a retransmit
91 * 93 *
92 * Handles sequence numbering, fragmentation, bundling, etc. 94 * Handles sequence numbering, fragmentation, bundling, etc.
93 */ 95 */
94struct tipc_bclink { 96struct tipc_bclink {
97 spinlock_t lock;
95 struct tipc_link link; 98 struct tipc_link link;
96 struct tipc_node node; 99 struct tipc_node node;
100 unsigned int flags;
97 struct tipc_node_map bcast_nodes; 101 struct tipc_node_map bcast_nodes;
98 struct tipc_node *retransmit_to; 102 struct tipc_node *retransmit_to;
99}; 103};
100 104
101static struct tipc_bcbearer bcast_bearer; 105static struct tipc_bcbearer *bcbearer;
102static struct tipc_bclink bcast_link; 106static struct tipc_bclink *bclink;
103 107static struct tipc_link *bcl;
104static struct tipc_bcbearer *bcbearer = &bcast_bearer;
105static struct tipc_bclink *bclink = &bcast_link;
106static struct tipc_link *bcl = &bcast_link.link;
107
108static DEFINE_SPINLOCK(bc_lock);
109 108
110const char tipc_bclink_name[] = "broadcast-link"; 109const char tipc_bclink_name[] = "broadcast-link";
111 110
112static void tipc_nmap_diff(struct tipc_node_map *nm_a, 111static void tipc_nmap_diff(struct tipc_node_map *nm_a,
113 struct tipc_node_map *nm_b, 112 struct tipc_node_map *nm_b,
114 struct tipc_node_map *nm_diff); 113 struct tipc_node_map *nm_diff);
114static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
115static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
116
117static void tipc_bclink_lock(void)
118{
119 spin_lock_bh(&bclink->lock);
120}
121
122static void tipc_bclink_unlock(void)
123{
124 struct tipc_node *node = NULL;
125
126 if (likely(!bclink->flags)) {
127 spin_unlock_bh(&bclink->lock);
128 return;
129 }
130
131 if (bclink->flags & TIPC_BCLINK_RESET) {
132 bclink->flags &= ~TIPC_BCLINK_RESET;
133 node = tipc_bclink_retransmit_to();
134 }
135 spin_unlock_bh(&bclink->lock);
136
137 if (node)
138 tipc_link_reset_all(node);
139}
140
141void tipc_bclink_set_flags(unsigned int flags)
142{
143 bclink->flags |= flags;
144}
115 145
116static u32 bcbuf_acks(struct sk_buff *buf) 146static u32 bcbuf_acks(struct sk_buff *buf)
117{ 147{
@@ -130,16 +160,16 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
130 160
131void tipc_bclink_add_node(u32 addr) 161void tipc_bclink_add_node(u32 addr)
132{ 162{
133 spin_lock_bh(&bc_lock); 163 tipc_bclink_lock();
134 tipc_nmap_add(&bclink->bcast_nodes, addr); 164 tipc_nmap_add(&bclink->bcast_nodes, addr);
135 spin_unlock_bh(&bc_lock); 165 tipc_bclink_unlock();
136} 166}
137 167
138void tipc_bclink_remove_node(u32 addr) 168void tipc_bclink_remove_node(u32 addr)
139{ 169{
140 spin_lock_bh(&bc_lock); 170 tipc_bclink_lock();
141 tipc_nmap_remove(&bclink->bcast_nodes, addr); 171 tipc_nmap_remove(&bclink->bcast_nodes, addr);
142 spin_unlock_bh(&bc_lock); 172 tipc_bclink_unlock();
143} 173}
144 174
145static void bclink_set_last_sent(void) 175static void bclink_set_last_sent(void)
@@ -165,7 +195,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
165/** 195/**
166 * tipc_bclink_retransmit_to - get most recent node to request retransmission 196 * tipc_bclink_retransmit_to - get most recent node to request retransmission
167 * 197 *
168 * Called with bc_lock locked 198 * Called with bclink_lock locked
169 */ 199 */
170struct tipc_node *tipc_bclink_retransmit_to(void) 200struct tipc_node *tipc_bclink_retransmit_to(void)
171{ 201{
@@ -177,7 +207,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
177 * @after: sequence number of last packet to *not* retransmit 207 * @after: sequence number of last packet to *not* retransmit
178 * @to: sequence number of last packet to retransmit 208 * @to: sequence number of last packet to retransmit
179 * 209 *
180 * Called with bc_lock locked 210 * Called with bclink_lock locked
181 */ 211 */
182static void bclink_retransmit_pkt(u32 after, u32 to) 212static void bclink_retransmit_pkt(u32 after, u32 to)
183{ 213{
@@ -194,7 +224,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
194 * @n_ptr: node that sent acknowledgement info 224 * @n_ptr: node that sent acknowledgement info
195 * @acked: broadcast sequence # that has been acknowledged 225 * @acked: broadcast sequence # that has been acknowledged
196 * 226 *
197 * Node is locked, bc_lock unlocked. 227 * Node is locked, bclink_lock unlocked.
198 */ 228 */
199void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 229void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
200{ 230{
@@ -202,8 +232,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
202 struct sk_buff *next; 232 struct sk_buff *next;
203 unsigned int released = 0; 233 unsigned int released = 0;
204 234
205 spin_lock_bh(&bc_lock); 235 tipc_bclink_lock();
206
207 /* Bail out if tx queue is empty (no clean up is required) */ 236 /* Bail out if tx queue is empty (no clean up is required) */
208 crs = bcl->first_out; 237 crs = bcl->first_out;
209 if (!crs) 238 if (!crs)
@@ -267,13 +296,13 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
267 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 296 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
268 tipc_link_wakeup_ports(bcl, 0); 297 tipc_link_wakeup_ports(bcl, 0);
269exit: 298exit:
270 spin_unlock_bh(&bc_lock); 299 tipc_bclink_unlock();
271} 300}
272 301
273/** 302/**
274 * tipc_bclink_update_link_state - update broadcast link state 303 * tipc_bclink_update_link_state - update broadcast link state
275 * 304 *
276 * tipc_net_lock and node lock set 305 * RCU and node lock set
277 */ 306 */
278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 307void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
279{ 308{
@@ -320,10 +349,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
320 ? buf_seqno(n_ptr->bclink.deferred_head) - 1 349 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
321 : n_ptr->bclink.last_sent); 350 : n_ptr->bclink.last_sent);
322 351
323 spin_lock_bh(&bc_lock); 352 tipc_bclink_lock();
324 tipc_bearer_send(&bcbearer->bearer, buf, NULL); 353 tipc_bearer_send(MAX_BEARERS, buf, NULL);
325 bcl->stats.sent_nacks++; 354 bcl->stats.sent_nacks++;
326 spin_unlock_bh(&bc_lock); 355 tipc_bclink_unlock();
327 kfree_skb(buf); 356 kfree_skb(buf);
328 357
329 n_ptr->bclink.oos_state++; 358 n_ptr->bclink.oos_state++;
@@ -335,8 +364,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
335 * 364 *
336 * Delay any upcoming NACK by this node if another node has already 365 * Delay any upcoming NACK by this node if another node has already
337 * requested the first message this node is going to ask for. 366 * requested the first message this node is going to ask for.
338 *
339 * Only tipc_net_lock set.
340 */ 367 */
341static void bclink_peek_nack(struct tipc_msg *msg) 368static void bclink_peek_nack(struct tipc_msg *msg)
342{ 369{
@@ -362,7 +389,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
362{ 389{
363 int res; 390 int res;
364 391
365 spin_lock_bh(&bc_lock); 392 tipc_bclink_lock();
366 393
367 if (!bclink->bcast_nodes.count) { 394 if (!bclink->bcast_nodes.count) {
368 res = msg_data_sz(buf_msg(buf)); 395 res = msg_data_sz(buf_msg(buf));
@@ -377,14 +404,14 @@ int tipc_bclink_xmit(struct sk_buff *buf)
377 bcl->stats.accu_queue_sz += bcl->out_queue_size; 404 bcl->stats.accu_queue_sz += bcl->out_queue_size;
378 } 405 }
379exit: 406exit:
380 spin_unlock_bh(&bc_lock); 407 tipc_bclink_unlock();
381 return res; 408 return res;
382} 409}
383 410
384/** 411/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 412 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 * 413 *
387 * Called with both sending node's lock and bc_lock taken. 414 * Called with both sending node's lock and bclink_lock taken.
388 */ 415 */
389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 416static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
390{ 417{
@@ -408,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
408/** 435/**
409 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards 436 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
410 * 437 *
411 * tipc_net_lock is read_locked, no other locks set 438 * RCU is locked, no other locks set
412 */ 439 */
413void tipc_bclink_rcv(struct sk_buff *buf) 440void tipc_bclink_rcv(struct sk_buff *buf)
414{ 441{
@@ -439,12 +466,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
439 if (msg_destnode(msg) == tipc_own_addr) { 466 if (msg_destnode(msg) == tipc_own_addr) {
440 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 467 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
441 tipc_node_unlock(node); 468 tipc_node_unlock(node);
442 spin_lock_bh(&bc_lock); 469 tipc_bclink_lock();
443 bcl->stats.recv_nacks++; 470 bcl->stats.recv_nacks++;
444 bclink->retransmit_to = node; 471 bclink->retransmit_to = node;
445 bclink_retransmit_pkt(msg_bcgap_after(msg), 472 bclink_retransmit_pkt(msg_bcgap_after(msg),
446 msg_bcgap_to(msg)); 473 msg_bcgap_to(msg));
447 spin_unlock_bh(&bc_lock); 474 tipc_bclink_unlock();
448 } else { 475 } else {
449 tipc_node_unlock(node); 476 tipc_node_unlock(node);
450 bclink_peek_nack(msg); 477 bclink_peek_nack(msg);
@@ -462,51 +489,47 @@ receive:
462 /* Deliver message to destination */ 489 /* Deliver message to destination */
463 490
464 if (likely(msg_isdata(msg))) { 491 if (likely(msg_isdata(msg))) {
465 spin_lock_bh(&bc_lock); 492 tipc_bclink_lock();
466 bclink_accept_pkt(node, seqno); 493 bclink_accept_pkt(node, seqno);
467 spin_unlock_bh(&bc_lock); 494 tipc_bclink_unlock();
468 tipc_node_unlock(node); 495 tipc_node_unlock(node);
469 if (likely(msg_mcast(msg))) 496 if (likely(msg_mcast(msg)))
470 tipc_port_mcast_rcv(buf, NULL); 497 tipc_port_mcast_rcv(buf, NULL);
471 else 498 else
472 kfree_skb(buf); 499 kfree_skb(buf);
473 } else if (msg_user(msg) == MSG_BUNDLER) { 500 } else if (msg_user(msg) == MSG_BUNDLER) {
474 spin_lock_bh(&bc_lock); 501 tipc_bclink_lock();
475 bclink_accept_pkt(node, seqno); 502 bclink_accept_pkt(node, seqno);
476 bcl->stats.recv_bundles++; 503 bcl->stats.recv_bundles++;
477 bcl->stats.recv_bundled += msg_msgcnt(msg); 504 bcl->stats.recv_bundled += msg_msgcnt(msg);
478 spin_unlock_bh(&bc_lock); 505 tipc_bclink_unlock();
479 tipc_node_unlock(node); 506 tipc_node_unlock(node);
480 tipc_link_bundle_rcv(buf); 507 tipc_link_bundle_rcv(buf);
481 } else if (msg_user(msg) == MSG_FRAGMENTER) { 508 } else if (msg_user(msg) == MSG_FRAGMENTER) {
482 int ret; 509 tipc_buf_append(&node->bclink.reasm_buf, &buf);
483 ret = tipc_link_frag_rcv(&node->bclink.reasm_head, 510 if (unlikely(!buf && !node->bclink.reasm_buf))
484 &node->bclink.reasm_tail,
485 &buf);
486 if (ret == LINK_REASM_ERROR)
487 goto unlock; 511 goto unlock;
488 spin_lock_bh(&bc_lock); 512 tipc_bclink_lock();
489 bclink_accept_pkt(node, seqno); 513 bclink_accept_pkt(node, seqno);
490 bcl->stats.recv_fragments++; 514 bcl->stats.recv_fragments++;
491 if (ret == LINK_REASM_COMPLETE) { 515 if (buf) {
492 bcl->stats.recv_fragmented++; 516 bcl->stats.recv_fragmented++;
493 /* Point msg to inner header */
494 msg = buf_msg(buf); 517 msg = buf_msg(buf);
495 spin_unlock_bh(&bc_lock); 518 tipc_bclink_unlock();
496 goto receive; 519 goto receive;
497 } 520 }
498 spin_unlock_bh(&bc_lock); 521 tipc_bclink_unlock();
499 tipc_node_unlock(node); 522 tipc_node_unlock(node);
500 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 523 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
501 spin_lock_bh(&bc_lock); 524 tipc_bclink_lock();
502 bclink_accept_pkt(node, seqno); 525 bclink_accept_pkt(node, seqno);
503 spin_unlock_bh(&bc_lock); 526 tipc_bclink_unlock();
504 tipc_node_unlock(node); 527 tipc_node_unlock(node);
505 tipc_named_rcv(buf); 528 tipc_named_rcv(buf);
506 } else { 529 } else {
507 spin_lock_bh(&bc_lock); 530 tipc_bclink_lock();
508 bclink_accept_pkt(node, seqno); 531 bclink_accept_pkt(node, seqno);
509 spin_unlock_bh(&bc_lock); 532 tipc_bclink_unlock();
510 tipc_node_unlock(node); 533 tipc_node_unlock(node);
511 kfree_skb(buf); 534 kfree_skb(buf);
512 } 535 }
@@ -552,14 +575,14 @@ receive:
552 } else 575 } else
553 deferred = 0; 576 deferred = 0;
554 577
555 spin_lock_bh(&bc_lock); 578 tipc_bclink_lock();
556 579
557 if (deferred) 580 if (deferred)
558 bcl->stats.deferred_recv++; 581 bcl->stats.deferred_recv++;
559 else 582 else
560 bcl->stats.duplicates++; 583 bcl->stats.duplicates++;
561 584
562 spin_unlock_bh(&bc_lock); 585 tipc_bclink_unlock();
563 586
564unlock: 587unlock:
565 tipc_node_unlock(node); 588 tipc_node_unlock(node);
@@ -627,13 +650,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
627 650
628 if (bp_index == 0) { 651 if (bp_index == 0) {
629 /* Use original buffer for first bearer */ 652 /* Use original buffer for first bearer */
630 tipc_bearer_send(b, buf, &b->bcast_addr); 653 tipc_bearer_send(b->identity, buf, &b->bcast_addr);
631 } else { 654 } else {
632 /* Avoid concurrent buffer access */ 655 /* Avoid concurrent buffer access */
633 tbuf = pskb_copy(buf, GFP_ATOMIC); 656 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
634 if (!tbuf) 657 if (!tbuf)
635 break; 658 break;
636 tipc_bearer_send(b, tbuf, &b->bcast_addr); 659 tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
637 kfree_skb(tbuf); /* Bearer keeps a clone */ 660 kfree_skb(tbuf); /* Bearer keeps a clone */
638 } 661 }
639 662
@@ -655,20 +678,27 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
655/** 678/**
656 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 679 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
657 */ 680 */
658void tipc_bcbearer_sort(void) 681void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
659{ 682{
660 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 683 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
661 struct tipc_bcbearer_pair *bp_curr; 684 struct tipc_bcbearer_pair *bp_curr;
685 struct tipc_bearer *b;
662 int b_index; 686 int b_index;
663 int pri; 687 int pri;
664 688
665 spin_lock_bh(&bc_lock); 689 tipc_bclink_lock();
690
691 if (action)
692 tipc_nmap_add(nm_ptr, node);
693 else
694 tipc_nmap_remove(nm_ptr, node);
666 695
667 /* Group bearers by priority (can assume max of two per priority) */ 696 /* Group bearers by priority (can assume max of two per priority) */
668 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 697 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
669 698
699 rcu_read_lock();
670 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 700 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
671 struct tipc_bearer *b = bearer_list[b_index]; 701 b = rcu_dereference_rtnl(bearer_list[b_index]);
672 if (!b || !b->nodes.count) 702 if (!b || !b->nodes.count)
673 continue; 703 continue;
674 704
@@ -677,6 +707,7 @@ void tipc_bcbearer_sort(void)
677 else 707 else
678 bp_temp[b->priority].secondary = b; 708 bp_temp[b->priority].secondary = b;
679 } 709 }
710 rcu_read_unlock();
680 711
681 /* Create array of bearer pairs for broadcasting */ 712 /* Create array of bearer pairs for broadcasting */
682 bp_curr = bcbearer->bpairs; 713 bp_curr = bcbearer->bpairs;
@@ -702,7 +733,7 @@ void tipc_bcbearer_sort(void)
702 bp_curr++; 733 bp_curr++;
703 } 734 }
704 735
705 spin_unlock_bh(&bc_lock); 736 tipc_bclink_unlock();
706} 737}
707 738
708 739
@@ -714,7 +745,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
714 if (!bcl) 745 if (!bcl)
715 return 0; 746 return 0;
716 747
717 spin_lock_bh(&bc_lock); 748 tipc_bclink_lock();
718 749
719 s = &bcl->stats; 750 s = &bcl->stats;
720 751
@@ -743,7 +774,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
743 s->queue_sz_counts ? 774 s->queue_sz_counts ?
744 (s->accu_queue_sz / s->queue_sz_counts) : 0); 775 (s->accu_queue_sz / s->queue_sz_counts) : 0);
745 776
746 spin_unlock_bh(&bc_lock); 777 tipc_bclink_unlock();
747 return ret; 778 return ret;
748} 779}
749 780
@@ -752,9 +783,9 @@ int tipc_bclink_reset_stats(void)
752 if (!bcl) 783 if (!bcl)
753 return -ENOPROTOOPT; 784 return -ENOPROTOOPT;
754 785
755 spin_lock_bh(&bc_lock); 786 tipc_bclink_lock();
756 memset(&bcl->stats, 0, sizeof(bcl->stats)); 787 memset(&bcl->stats, 0, sizeof(bcl->stats));
757 spin_unlock_bh(&bc_lock); 788 tipc_bclink_unlock();
758 return 0; 789 return 0;
759} 790}
760 791
@@ -765,46 +796,59 @@ int tipc_bclink_set_queue_limits(u32 limit)
765 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 796 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
766 return -EINVAL; 797 return -EINVAL;
767 798
768 spin_lock_bh(&bc_lock); 799 tipc_bclink_lock();
769 tipc_link_set_queue_limits(bcl, limit); 800 tipc_link_set_queue_limits(bcl, limit);
770 spin_unlock_bh(&bc_lock); 801 tipc_bclink_unlock();
771 return 0; 802 return 0;
772} 803}
773 804
774void tipc_bclink_init(void) 805int tipc_bclink_init(void)
775{ 806{
807 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
808 if (!bcbearer)
809 return -ENOMEM;
810
811 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
812 if (!bclink) {
813 kfree(bcbearer);
814 return -ENOMEM;
815 }
816
817 bcl = &bclink->link;
776 bcbearer->bearer.media = &bcbearer->media; 818 bcbearer->bearer.media = &bcbearer->media;
777 bcbearer->media.send_msg = tipc_bcbearer_send; 819 bcbearer->media.send_msg = tipc_bcbearer_send;
778 sprintf(bcbearer->media.name, "tipc-broadcast"); 820 sprintf(bcbearer->media.name, "tipc-broadcast");
779 821
822 spin_lock_init(&bclink->lock);
780 INIT_LIST_HEAD(&bcl->waiting_ports); 823 INIT_LIST_HEAD(&bcl->waiting_ports);
781 bcl->next_out_no = 1; 824 bcl->next_out_no = 1;
782 spin_lock_init(&bclink->node.lock); 825 spin_lock_init(&bclink->node.lock);
783 bcl->owner = &bclink->node; 826 bcl->owner = &bclink->node;
784 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 827 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
785 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 828 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
786 bcl->b_ptr = &bcbearer->bearer; 829 bcl->bearer_id = MAX_BEARERS;
787 bearer_list[BCBEARER] = &bcbearer->bearer; 830 rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
788 bcl->state = WORKING_WORKING; 831 bcl->state = WORKING_WORKING;
789 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 832 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
833 return 0;
790} 834}
791 835
792void tipc_bclink_stop(void) 836void tipc_bclink_stop(void)
793{ 837{
794 spin_lock_bh(&bc_lock); 838 tipc_bclink_lock();
795 tipc_link_purge_queues(bcl); 839 tipc_link_purge_queues(bcl);
796 spin_unlock_bh(&bc_lock); 840 tipc_bclink_unlock();
797 841
798 bearer_list[BCBEARER] = NULL; 842 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
799 memset(bclink, 0, sizeof(*bclink)); 843 synchronize_net();
800 memset(bcbearer, 0, sizeof(*bcbearer)); 844 kfree(bcbearer);
845 kfree(bclink);
801} 846}
802 847
803
804/** 848/**
805 * tipc_nmap_add - add a node to a node map 849 * tipc_nmap_add - add a node to a node map
806 */ 850 */
807void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 851static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
808{ 852{
809 int n = tipc_node(node); 853 int n = tipc_node(node);
810 int w = n / WSIZE; 854 int w = n / WSIZE;
@@ -819,7 +863,7 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
819/** 863/**
820 * tipc_nmap_remove - remove a node from a node map 864 * tipc_nmap_remove - remove a node from a node map
821 */ 865 */
822void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 866static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
823{ 867{
824 int n = tipc_node(node); 868 int n = tipc_node(node);
825 int w = n / WSIZE; 869 int w = n / WSIZE;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index a80ef54b818e..00330c45df3e 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -39,6 +39,7 @@
39 39
40#define MAX_NODES 4096 40#define MAX_NODES 4096
41#define WSIZE 32 41#define WSIZE 32
42#define TIPC_BCLINK_RESET 1
42 43
43/** 44/**
44 * struct tipc_node_map - set of node identifiers 45 * struct tipc_node_map - set of node identifiers
@@ -69,9 +70,6 @@ struct tipc_node;
69 70
70extern const char tipc_bclink_name[]; 71extern const char tipc_bclink_name[];
71 72
72void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
73void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
74
75/** 73/**
76 * tipc_nmap_equal - test for equality of node maps 74 * tipc_nmap_equal - test for equality of node maps
77 */ 75 */
@@ -84,8 +82,9 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
84void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port); 82void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
85void tipc_port_list_free(struct tipc_port_list *pl_ptr); 83void tipc_port_list_free(struct tipc_port_list *pl_ptr);
86 84
87void tipc_bclink_init(void); 85int tipc_bclink_init(void);
88void tipc_bclink_stop(void); 86void tipc_bclink_stop(void);
87void tipc_bclink_set_flags(unsigned int flags);
89void tipc_bclink_add_node(u32 addr); 88void tipc_bclink_add_node(u32 addr);
90void tipc_bclink_remove_node(u32 addr); 89void tipc_bclink_remove_node(u32 addr);
91struct tipc_node *tipc_bclink_retransmit_to(void); 90struct tipc_node *tipc_bclink_retransmit_to(void);
@@ -98,6 +97,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
98int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 97int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
99int tipc_bclink_reset_stats(void); 98int tipc_bclink_reset_stats(void);
100int tipc_bclink_set_queue_limits(u32 limit); 99int tipc_bclink_set_queue_limits(u32 limit);
101void tipc_bcbearer_sort(void); 100void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
102 101
103#endif 102#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3fef7eb776dc..264474394f9f 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -49,7 +49,7 @@ static struct tipc_media * const media_info_array[] = {
49 NULL 49 NULL
50}; 50};
51 51
52struct tipc_bearer *bearer_list[MAX_BEARERS + 1]; 52struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
53 53
54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); 54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
55 55
@@ -178,7 +178,7 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
178 u32 i; 178 u32 i;
179 179
180 for (i = 0; i < MAX_BEARERS; i++) { 180 for (i = 0; i < MAX_BEARERS; i++) {
181 b_ptr = bearer_list[i]; 181 b_ptr = rtnl_dereference(bearer_list[i]);
182 if (b_ptr && (!strcmp(b_ptr->name, name))) 182 if (b_ptr && (!strcmp(b_ptr->name, name)))
183 return b_ptr; 183 return b_ptr;
184 } 184 }
@@ -198,10 +198,9 @@ struct sk_buff *tipc_bearer_get_names(void)
198 if (!buf) 198 if (!buf)
199 return NULL; 199 return NULL;
200 200
201 read_lock_bh(&tipc_net_lock);
202 for (i = 0; media_info_array[i] != NULL; i++) { 201 for (i = 0; media_info_array[i] != NULL; i++) {
203 for (j = 0; j < MAX_BEARERS; j++) { 202 for (j = 0; j < MAX_BEARERS; j++) {
204 b = bearer_list[j]; 203 b = rtnl_dereference(bearer_list[j]);
205 if (!b) 204 if (!b)
206 continue; 205 continue;
207 if (b->media == media_info_array[i]) { 206 if (b->media == media_info_array[i]) {
@@ -211,22 +210,33 @@ struct sk_buff *tipc_bearer_get_names(void)
211 } 210 }
212 } 211 }
213 } 212 }
214 read_unlock_bh(&tipc_net_lock);
215 return buf; 213 return buf;
216} 214}
217 215
218void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest) 216void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
219{ 217{
220 tipc_nmap_add(&b_ptr->nodes, dest); 218 struct tipc_bearer *b_ptr;
221 tipc_bcbearer_sort(); 219
222 tipc_disc_add_dest(b_ptr->link_req); 220 rcu_read_lock();
221 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
222 if (b_ptr) {
223 tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
224 tipc_disc_add_dest(b_ptr->link_req);
225 }
226 rcu_read_unlock();
223} 227}
224 228
225void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) 229void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
226{ 230{
227 tipc_nmap_remove(&b_ptr->nodes, dest); 231 struct tipc_bearer *b_ptr;
228 tipc_bcbearer_sort(); 232
229 tipc_disc_remove_dest(b_ptr->link_req); 233 rcu_read_lock();
234 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
235 if (b_ptr) {
236 tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
237 tipc_disc_remove_dest(b_ptr->link_req);
238 }
239 rcu_read_unlock();
230} 240}
231 241
232/** 242/**
@@ -271,13 +281,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
271 return -EINVAL; 281 return -EINVAL;
272 } 282 }
273 283
274 write_lock_bh(&tipc_net_lock);
275
276 m_ptr = tipc_media_find(b_names.media_name); 284 m_ptr = tipc_media_find(b_names.media_name);
277 if (!m_ptr) { 285 if (!m_ptr) {
278 pr_warn("Bearer <%s> rejected, media <%s> not registered\n", 286 pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
279 name, b_names.media_name); 287 name, b_names.media_name);
280 goto exit; 288 return -EINVAL;
281 } 289 }
282 290
283 if (priority == TIPC_MEDIA_LINK_PRI) 291 if (priority == TIPC_MEDIA_LINK_PRI)
@@ -287,7 +295,7 @@ restart:
287 bearer_id = MAX_BEARERS; 295 bearer_id = MAX_BEARERS;
288 with_this_prio = 1; 296 with_this_prio = 1;
289 for (i = MAX_BEARERS; i-- != 0; ) { 297 for (i = MAX_BEARERS; i-- != 0; ) {
290 b_ptr = bearer_list[i]; 298 b_ptr = rtnl_dereference(bearer_list[i]);
291 if (!b_ptr) { 299 if (!b_ptr) {
292 bearer_id = i; 300 bearer_id = i;
293 continue; 301 continue;
@@ -295,14 +303,14 @@ restart:
295 if (!strcmp(name, b_ptr->name)) { 303 if (!strcmp(name, b_ptr->name)) {
296 pr_warn("Bearer <%s> rejected, already enabled\n", 304 pr_warn("Bearer <%s> rejected, already enabled\n",
297 name); 305 name);
298 goto exit; 306 return -EINVAL;
299 } 307 }
300 if ((b_ptr->priority == priority) && 308 if ((b_ptr->priority == priority) &&
301 (++with_this_prio > 2)) { 309 (++with_this_prio > 2)) {
302 if (priority-- == 0) { 310 if (priority-- == 0) {
303 pr_warn("Bearer <%s> rejected, duplicate priority\n", 311 pr_warn("Bearer <%s> rejected, duplicate priority\n",
304 name); 312 name);
305 goto exit; 313 return -EINVAL;
306 } 314 }
307 pr_warn("Bearer <%s> priority adjustment required %u->%u\n", 315 pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
308 name, priority + 1, priority); 316 name, priority + 1, priority);
@@ -312,21 +320,20 @@ restart:
312 if (bearer_id >= MAX_BEARERS) { 320 if (bearer_id >= MAX_BEARERS) {
313 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 321 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
314 name, MAX_BEARERS); 322 name, MAX_BEARERS);
315 goto exit; 323 return -EINVAL;
316 } 324 }
317 325
318 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC); 326 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
319 if (!b_ptr) { 327 if (!b_ptr)
320 res = -ENOMEM; 328 return -ENOMEM;
321 goto exit; 329
322 }
323 strcpy(b_ptr->name, name); 330 strcpy(b_ptr->name, name);
324 b_ptr->media = m_ptr; 331 b_ptr->media = m_ptr;
325 res = m_ptr->enable_media(b_ptr); 332 res = m_ptr->enable_media(b_ptr);
326 if (res) { 333 if (res) {
327 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 334 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
328 name, -res); 335 name, -res);
329 goto exit; 336 return -EINVAL;
330 } 337 }
331 338
332 b_ptr->identity = bearer_id; 339 b_ptr->identity = bearer_id;
@@ -341,16 +348,14 @@ restart:
341 bearer_disable(b_ptr, false); 348 bearer_disable(b_ptr, false);
342 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 349 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
343 name); 350 name);
344 goto exit; 351 return -EINVAL;
345 } 352 }
346 353
347 bearer_list[bearer_id] = b_ptr; 354 rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
348 355
349 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 356 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
350 name, 357 name,
351 tipc_addr_string_fill(addr_string, disc_domain), priority); 358 tipc_addr_string_fill(addr_string, disc_domain), priority);
352exit:
353 write_unlock_bh(&tipc_net_lock);
354 return res; 359 return res;
355} 360}
356 361
@@ -359,19 +364,16 @@ exit:
359 */ 364 */
360static int tipc_reset_bearer(struct tipc_bearer *b_ptr) 365static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
361{ 366{
362 read_lock_bh(&tipc_net_lock);
363 pr_info("Resetting bearer <%s>\n", b_ptr->name); 367 pr_info("Resetting bearer <%s>\n", b_ptr->name);
364 tipc_disc_delete(b_ptr->link_req);
365 tipc_link_reset_list(b_ptr->identity); 368 tipc_link_reset_list(b_ptr->identity);
366 tipc_disc_create(b_ptr, &b_ptr->bcast_addr); 369 tipc_disc_reset(b_ptr);
367 read_unlock_bh(&tipc_net_lock);
368 return 0; 370 return 0;
369} 371}
370 372
371/** 373/**
372 * bearer_disable 374 * bearer_disable
373 * 375 *
374 * Note: This routine assumes caller holds tipc_net_lock. 376 * Note: This routine assumes caller holds RTNL lock.
375 */ 377 */
376static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) 378static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
377{ 379{
@@ -385,12 +387,12 @@ static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
385 tipc_disc_delete(b_ptr->link_req); 387 tipc_disc_delete(b_ptr->link_req);
386 388
387 for (i = 0; i < MAX_BEARERS; i++) { 389 for (i = 0; i < MAX_BEARERS; i++) {
388 if (b_ptr == bearer_list[i]) { 390 if (b_ptr == rtnl_dereference(bearer_list[i])) {
389 bearer_list[i] = NULL; 391 RCU_INIT_POINTER(bearer_list[i], NULL);
390 break; 392 break;
391 } 393 }
392 } 394 }
393 kfree(b_ptr); 395 kfree_rcu(b_ptr, rcu);
394} 396}
395 397
396int tipc_disable_bearer(const char *name) 398int tipc_disable_bearer(const char *name)
@@ -398,7 +400,6 @@ int tipc_disable_bearer(const char *name)
398 struct tipc_bearer *b_ptr; 400 struct tipc_bearer *b_ptr;
399 int res; 401 int res;
400 402
401 write_lock_bh(&tipc_net_lock);
402 b_ptr = tipc_bearer_find(name); 403 b_ptr = tipc_bearer_find(name);
403 if (b_ptr == NULL) { 404 if (b_ptr == NULL) {
404 pr_warn("Attempt to disable unknown bearer <%s>\n", name); 405 pr_warn("Attempt to disable unknown bearer <%s>\n", name);
@@ -407,32 +408,9 @@ int tipc_disable_bearer(const char *name)
407 bearer_disable(b_ptr, false); 408 bearer_disable(b_ptr, false);
408 res = 0; 409 res = 0;
409 } 410 }
410 write_unlock_bh(&tipc_net_lock);
411 return res; 411 return res;
412} 412}
413 413
414
415/* tipc_l2_media_addr_set - initialize Ethernet media address structure
416 *
417 * Media-dependent "value" field stores MAC address in first 6 bytes
418 * and zeroes out the remaining bytes.
419 */
420void tipc_l2_media_addr_set(const struct tipc_bearer *b,
421 struct tipc_media_addr *a, char *mac)
422{
423 int len = b->media->hwaddr_len;
424
425 if (unlikely(sizeof(a->value) < len)) {
426 WARN_ONCE(1, "Media length invalid\n");
427 return;
428 }
429
430 memcpy(a->value, mac, len);
431 memset(a->value + len, 0, sizeof(a->value) - len);
432 a->media_id = b->media->type_id;
433 a->broadcast = !memcmp(mac, b->bcast_addr.value, len);
434}
435
436int tipc_enable_l2_media(struct tipc_bearer *b) 414int tipc_enable_l2_media(struct tipc_bearer *b)
437{ 415{
438 struct net_device *dev; 416 struct net_device *dev;
@@ -443,33 +421,37 @@ int tipc_enable_l2_media(struct tipc_bearer *b)
443 if (!dev) 421 if (!dev)
444 return -ENODEV; 422 return -ENODEV;
445 423
446 /* Associate TIPC bearer with Ethernet bearer */ 424 /* Associate TIPC bearer with L2 bearer */
447 b->media_ptr = dev; 425 rcu_assign_pointer(b->media_ptr, dev);
448 memset(b->bcast_addr.value, 0, sizeof(b->bcast_addr.value)); 426 memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
449 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); 427 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
450 b->bcast_addr.media_id = b->media->type_id; 428 b->bcast_addr.media_id = b->media->type_id;
451 b->bcast_addr.broadcast = 1; 429 b->bcast_addr.broadcast = 1;
452 b->mtu = dev->mtu; 430 b->mtu = dev->mtu;
453 tipc_l2_media_addr_set(b, &b->addr, (char *)dev->dev_addr); 431 b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
454 rcu_assign_pointer(dev->tipc_ptr, b); 432 rcu_assign_pointer(dev->tipc_ptr, b);
455 return 0; 433 return 0;
456} 434}
457 435
458/* tipc_disable_l2_media - detach TIPC bearer from an Ethernet interface 436/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
459 * 437 *
460 * Mark Ethernet bearer as inactive so that incoming buffers are thrown away, 438 * Mark L2 bearer as inactive so that incoming buffers are thrown away,
461 * then get worker thread to complete bearer cleanup. (Can't do cleanup 439 * then get worker thread to complete bearer cleanup. (Can't do cleanup
462 * here because cleanup code needs to sleep and caller holds spinlocks.) 440 * here because cleanup code needs to sleep and caller holds spinlocks.)
463 */ 441 */
464void tipc_disable_l2_media(struct tipc_bearer *b) 442void tipc_disable_l2_media(struct tipc_bearer *b)
465{ 443{
466 struct net_device *dev = (struct net_device *)b->media_ptr; 444 struct net_device *dev;
445
446 dev = (struct net_device *)rtnl_dereference(b->media_ptr);
447 RCU_INIT_POINTER(b->media_ptr, NULL);
467 RCU_INIT_POINTER(dev->tipc_ptr, NULL); 448 RCU_INIT_POINTER(dev->tipc_ptr, NULL);
449 synchronize_net();
468 dev_put(dev); 450 dev_put(dev);
469} 451}
470 452
471/** 453/**
472 * tipc_l2_send_msg - send a TIPC packet out over an Ethernet interface 454 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
473 * @buf: the packet to be sent 455 * @buf: the packet to be sent
474 * @b_ptr: the bearer through which the packet is to be sent 456 * @b_ptr: the bearer through which the packet is to be sent
475 * @dest: peer destination address 457 * @dest: peer destination address
@@ -478,8 +460,12 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
478 struct tipc_media_addr *dest) 460 struct tipc_media_addr *dest)
479{ 461{
480 struct sk_buff *clone; 462 struct sk_buff *clone;
463 struct net_device *dev;
481 int delta; 464 int delta;
482 struct net_device *dev = (struct net_device *)b->media_ptr; 465
466 dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
467 if (!dev)
468 return 0;
483 469
484 clone = skb_clone(buf, GFP_ATOMIC); 470 clone = skb_clone(buf, GFP_ATOMIC);
485 if (!clone) 471 if (!clone)
@@ -507,10 +493,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
507 * The media send routine must not alter the buffer being passed in 493 * The media send routine must not alter the buffer being passed in
508 * as it may be needed for later retransmission! 494 * as it may be needed for later retransmission!
509 */ 495 */
510void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, 496void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
511 struct tipc_media_addr *dest) 497 struct tipc_media_addr *dest)
512{ 498{
513 b->media->send_msg(buf, b, dest); 499 struct tipc_bearer *b_ptr;
500
501 rcu_read_lock();
502 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
503 if (likely(b_ptr))
504 b_ptr->media->send_msg(buf, b_ptr, dest);
505 rcu_read_unlock();
514} 506}
515 507
516/** 508/**
@@ -535,7 +527,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
535 } 527 }
536 528
537 rcu_read_lock(); 529 rcu_read_lock();
538 b_ptr = rcu_dereference(dev->tipc_ptr); 530 b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
539 if (likely(b_ptr)) { 531 if (likely(b_ptr)) {
540 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 532 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
541 buf->next = NULL; 533 buf->next = NULL;
@@ -568,12 +560,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
568 if (!net_eq(dev_net(dev), &init_net)) 560 if (!net_eq(dev_net(dev), &init_net))
569 return NOTIFY_DONE; 561 return NOTIFY_DONE;
570 562
571 rcu_read_lock(); 563 b_ptr = rtnl_dereference(dev->tipc_ptr);
572 b_ptr = rcu_dereference(dev->tipc_ptr); 564 if (!b_ptr)
573 if (!b_ptr) {
574 rcu_read_unlock();
575 return NOTIFY_DONE; 565 return NOTIFY_DONE;
576 }
577 566
578 b_ptr->mtu = dev->mtu; 567 b_ptr->mtu = dev->mtu;
579 568
@@ -586,17 +575,15 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
586 tipc_reset_bearer(b_ptr); 575 tipc_reset_bearer(b_ptr);
587 break; 576 break;
588 case NETDEV_CHANGEADDR: 577 case NETDEV_CHANGEADDR:
589 tipc_l2_media_addr_set(b_ptr, &b_ptr->addr, 578 b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
590 (char *)dev->dev_addr); 579 (char *)dev->dev_addr);
591 tipc_reset_bearer(b_ptr); 580 tipc_reset_bearer(b_ptr);
592 break; 581 break;
593 case NETDEV_UNREGISTER: 582 case NETDEV_UNREGISTER:
594 case NETDEV_CHANGENAME: 583 case NETDEV_CHANGENAME:
595 tipc_disable_bearer(b_ptr->name); 584 bearer_disable(b_ptr, false);
596 break; 585 break;
597 } 586 }
598 rcu_read_unlock();
599
600 return NOTIFY_OK; 587 return NOTIFY_OK;
601} 588}
602 589
@@ -633,7 +620,7 @@ void tipc_bearer_stop(void)
633 u32 i; 620 u32 i;
634 621
635 for (i = 0; i < MAX_BEARERS; i++) { 622 for (i = 0; i < MAX_BEARERS; i++) {
636 b_ptr = bearer_list[i]; 623 b_ptr = rtnl_dereference(bearer_list[i]);
637 if (b_ptr) { 624 if (b_ptr) {
638 bearer_disable(b_ptr, true); 625 bearer_disable(b_ptr, true);
639 bearer_list[i] = NULL; 626 bearer_list[i] = NULL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ba48145e871d..78fccc49de23 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -42,14 +42,12 @@
42#define MAX_BEARERS 2 42#define MAX_BEARERS 2
43#define MAX_MEDIA 2 43#define MAX_MEDIA 2
44 44
45/* 45/* Identifiers associated with TIPC message header media address info
46 * Identifiers associated with TIPC message header media address info 46 * - address info field is 32 bytes long
47 * 47 * - the field's actual content and length is defined per media
48 * - address info field is 20 bytes long 48 * - remaining unused bytes in the field are set to zero
49 * - media type identifier located at offset 3
50 * - remaining bytes vary according to media type
51 */ 49 */
52#define TIPC_MEDIA_ADDR_SIZE 20 50#define TIPC_MEDIA_ADDR_SIZE 32
53#define TIPC_MEDIA_TYPE_OFFSET 3 51#define TIPC_MEDIA_TYPE_OFFSET 3
54 52
55/* 53/*
@@ -77,9 +75,10 @@ struct tipc_bearer;
77 * @send_msg: routine which handles buffer transmission 75 * @send_msg: routine which handles buffer transmission
78 * @enable_media: routine which enables a media 76 * @enable_media: routine which enables a media
79 * @disable_media: routine which disables a media 77 * @disable_media: routine which disables a media
80 * @addr2str: routine which converts media address to string 78 * @addr2str: convert media address format to string
81 * @addr2msg: routine which converts media address to protocol message area 79 * @addr2msg: convert from media addr format to discovery msg addr format
82 * @msg2addr: routine which converts media address from protocol message area 80 * @msg2addr: convert from discovery msg addr format to media addr format
81 * @raw2addr: convert from raw addr format to media addr format
83 * @priority: default link (and bearer) priority 82 * @priority: default link (and bearer) priority
84 * @tolerance: default time (in ms) before declaring link failure 83 * @tolerance: default time (in ms) before declaring link failure
85 * @window: default window (in packets) before declaring link congestion 84 * @window: default window (in packets) before declaring link congestion
@@ -93,10 +92,16 @@ struct tipc_media {
93 struct tipc_media_addr *dest); 92 struct tipc_media_addr *dest);
94 int (*enable_media)(struct tipc_bearer *b_ptr); 93 int (*enable_media)(struct tipc_bearer *b_ptr);
95 void (*disable_media)(struct tipc_bearer *b_ptr); 94 void (*disable_media)(struct tipc_bearer *b_ptr);
96 int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size); 95 int (*addr2str)(struct tipc_media_addr *addr,
97 int (*addr2msg)(struct tipc_media_addr *a, char *msg_area); 96 char *strbuf,
98 int (*msg2addr)(const struct tipc_bearer *b_ptr, 97 int bufsz);
99 struct tipc_media_addr *a, char *msg_area); 98 int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
99 int (*msg2addr)(struct tipc_bearer *b,
100 struct tipc_media_addr *addr,
101 char *msg);
102 int (*raw2addr)(struct tipc_bearer *b,
103 struct tipc_media_addr *addr,
104 char *raw);
100 u32 priority; 105 u32 priority;
101 u32 tolerance; 106 u32 tolerance;
102 u32 window; 107 u32 window;
@@ -113,6 +118,7 @@ struct tipc_media {
113 * @name: bearer name (format = media:interface) 118 * @name: bearer name (format = media:interface)
114 * @media: ptr to media structure associated with bearer 119 * @media: ptr to media structure associated with bearer
115 * @bcast_addr: media address used in broadcasting 120 * @bcast_addr: media address used in broadcasting
121 * @rcu: rcu struct for tipc_bearer
116 * @priority: default link priority for bearer 122 * @priority: default link priority for bearer
117 * @window: default window size for bearer 123 * @window: default window size for bearer
118 * @tolerance: default link tolerance for bearer 124 * @tolerance: default link tolerance for bearer
@@ -127,12 +133,13 @@ struct tipc_media {
127 * care of initializing all other fields. 133 * care of initializing all other fields.
128 */ 134 */
129struct tipc_bearer { 135struct tipc_bearer {
130 void *media_ptr; /* initalized by media */ 136 void __rcu *media_ptr; /* initalized by media */
131 u32 mtu; /* initalized by media */ 137 u32 mtu; /* initalized by media */
132 struct tipc_media_addr addr; /* initalized by media */ 138 struct tipc_media_addr addr; /* initalized by media */
133 char name[TIPC_MAX_BEARER_NAME]; 139 char name[TIPC_MAX_BEARER_NAME];
134 struct tipc_media *media; 140 struct tipc_media *media;
135 struct tipc_media_addr bcast_addr; 141 struct tipc_media_addr bcast_addr;
142 struct rcu_head rcu;
136 u32 priority; 143 u32 priority;
137 u32 window; 144 u32 window;
138 u32 tolerance; 145 u32 tolerance;
@@ -150,7 +157,7 @@ struct tipc_bearer_names {
150 157
151struct tipc_link; 158struct tipc_link;
152 159
153extern struct tipc_bearer *bearer_list[]; 160extern struct tipc_bearer __rcu *bearer_list[];
154 161
155/* 162/*
156 * TIPC routines available to supported media types 163 * TIPC routines available to supported media types
@@ -173,22 +180,20 @@ int tipc_media_set_priority(const char *name, u32 new_value);
173int tipc_media_set_window(const char *name, u32 new_value); 180int tipc_media_set_window(const char *name, u32 new_value);
174void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); 181void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
175struct sk_buff *tipc_media_get_names(void); 182struct sk_buff *tipc_media_get_names(void);
176void tipc_l2_media_addr_set(const struct tipc_bearer *b,
177 struct tipc_media_addr *a, char *mac);
178int tipc_enable_l2_media(struct tipc_bearer *b); 183int tipc_enable_l2_media(struct tipc_bearer *b);
179void tipc_disable_l2_media(struct tipc_bearer *b); 184void tipc_disable_l2_media(struct tipc_bearer *b);
180int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, 185int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
181 struct tipc_media_addr *dest); 186 struct tipc_media_addr *dest);
182 187
183struct sk_buff *tipc_bearer_get_names(void); 188struct sk_buff *tipc_bearer_get_names(void);
184void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); 189void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
185void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); 190void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
186struct tipc_bearer *tipc_bearer_find(const char *name); 191struct tipc_bearer *tipc_bearer_find(const char *name);
187struct tipc_media *tipc_media_find(const char *name); 192struct tipc_media *tipc_media_find(const char *name);
188int tipc_bearer_setup(void); 193int tipc_bearer_setup(void);
189void tipc_bearer_cleanup(void); 194void tipc_bearer_cleanup(void);
190void tipc_bearer_stop(void); 195void tipc_bearer_stop(void);
191void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, 196void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
192 struct tipc_media_addr *dest); 197 struct tipc_media_addr *dest);
193 198
194#endif /* _TIPC_BEARER_H */ 199#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 4b981c053823..2b42403ad33a 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -42,8 +42,6 @@
42 42
43#define REPLY_TRUNCATED "<truncated>\n" 43#define REPLY_TRUNCATED "<truncated>\n"
44 44
45static DEFINE_MUTEX(config_mutex);
46
47static const void *req_tlv_area; /* request message TLV area */ 45static const void *req_tlv_area; /* request message TLV area */
48static int req_tlv_space; /* request message TLV area size */ 46static int req_tlv_space; /* request message TLV area size */
49static int rep_headroom; /* reply message headroom to use */ 47static int rep_headroom; /* reply message headroom to use */
@@ -179,8 +177,10 @@ static struct sk_buff *cfg_set_own_addr(void)
179 if (tipc_own_addr) 177 if (tipc_own_addr)
180 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 178 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
181 " (cannot change node address once assigned)"); 179 " (cannot change node address once assigned)");
182 tipc_net_start(addr); 180 if (!tipc_net_start(addr))
183 return tipc_cfg_reply_none(); 181 return tipc_cfg_reply_none();
182
183 return tipc_cfg_reply_error_string("cannot change to network mode");
184} 184}
185 185
186static struct sk_buff *cfg_set_max_ports(void) 186static struct sk_buff *cfg_set_max_ports(void)
@@ -223,7 +223,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
223{ 223{
224 struct sk_buff *rep_tlv_buf; 224 struct sk_buff *rep_tlv_buf;
225 225
226 mutex_lock(&config_mutex); 226 rtnl_lock();
227 227
228 /* Save request and reply details in a well-known location */ 228 /* Save request and reply details in a well-known location */
229 req_tlv_area = request_area; 229 req_tlv_area = request_area;
@@ -337,6 +337,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
337 337
338 /* Return reply buffer */ 338 /* Return reply buffer */
339exit: 339exit:
340 mutex_unlock(&config_mutex); 340 rtnl_unlock();
341 return rep_tlv_buf; 341 return rep_tlv_buf;
342} 342}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 50d57429ebca..676d18015dd8 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -80,7 +80,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
80 */ 80 */
81static void tipc_core_stop(void) 81static void tipc_core_stop(void)
82{ 82{
83 tipc_handler_stop();
84 tipc_net_stop(); 83 tipc_net_stop();
85 tipc_bearer_cleanup(); 84 tipc_bearer_cleanup();
86 tipc_netlink_stop(); 85 tipc_netlink_stop();
@@ -100,10 +99,6 @@ static int tipc_core_start(void)
100 99
101 get_random_bytes(&tipc_random, sizeof(tipc_random)); 100 get_random_bytes(&tipc_random, sizeof(tipc_random));
102 101
103 err = tipc_handler_start();
104 if (err)
105 goto out_handler;
106
107 err = tipc_ref_table_init(tipc_max_ports, tipc_random); 102 err = tipc_ref_table_init(tipc_max_ports, tipc_random);
108 if (err) 103 if (err)
109 goto out_reftbl; 104 goto out_reftbl;
@@ -146,8 +141,6 @@ out_netlink:
146out_nametbl: 141out_nametbl:
147 tipc_ref_table_stop(); 142 tipc_ref_table_stop();
148out_reftbl: 143out_reftbl:
149 tipc_handler_stop();
150out_handler:
151 return err; 144 return err;
152} 145}
153 146
@@ -161,10 +154,11 @@ static int __init tipc_init(void)
161 tipc_max_ports = CONFIG_TIPC_PORTS; 154 tipc_max_ports = CONFIG_TIPC_PORTS;
162 tipc_net_id = 4711; 155 tipc_net_id = 4711;
163 156
164 sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE; 157 sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
165 sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 << 158 TIPC_LOW_IMPORTANCE;
159 sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
166 TIPC_CRITICAL_IMPORTANCE; 160 TIPC_CRITICAL_IMPORTANCE;
167 sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT; 161 sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
168 162
169 res = tipc_core_start(); 163 res = tipc_core_start();
170 if (res) 164 if (res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 8985bbcb942b..bb26ed1ee966 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -56,7 +56,8 @@
56#include <linux/list.h> 56#include <linux/list.h>
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/vmalloc.h> 58#include <linux/vmalloc.h>
59 59#include <linux/rtnetlink.h>
60#include <linux/etherdevice.h>
60 61
61#define TIPC_MOD_VER "2.0.0" 62#define TIPC_MOD_VER "2.0.0"
62 63
@@ -89,8 +90,6 @@ extern int tipc_random __read_mostly;
89/* 90/*
90 * Routines available to privileged subsystems 91 * Routines available to privileged subsystems
91 */ 92 */
92int tipc_handler_start(void);
93void tipc_handler_stop(void);
94int tipc_netlink_start(void); 93int tipc_netlink_start(void);
95void tipc_netlink_stop(void); 94void tipc_netlink_stop(void);
96int tipc_socket_init(void); 95int tipc_socket_init(void);
@@ -109,12 +108,10 @@ void tipc_unregister_sysctl(void);
109#endif 108#endif
110 109
111/* 110/*
112 * TIPC timer and signal code 111 * TIPC timer code
113 */ 112 */
114typedef void (*Handler) (unsigned long); 113typedef void (*Handler) (unsigned long);
115 114
116u32 tipc_k_signal(Handler routine, unsigned long argument);
117
118/** 115/**
119 * k_init_timer - initialize a timer 116 * k_init_timer - initialize a timer
120 * @timer: pointer to timer structure 117 * @timer: pointer to timer structure
@@ -191,6 +188,7 @@ static inline void k_term_timer(struct timer_list *timer)
191struct tipc_skb_cb { 188struct tipc_skb_cb {
192 void *handle; 189 void *handle;
193 bool deferred; 190 bool deferred;
191 struct sk_buff *tail;
194}; 192};
195 193
196#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 194#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 542fe3413dc4..aa722a42ef8b 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, 2014, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -46,8 +46,9 @@
46 46
47/** 47/**
48 * struct tipc_link_req - information about an ongoing link setup request 48 * struct tipc_link_req - information about an ongoing link setup request
49 * @bearer: bearer issuing requests 49 * @bearer_id: identity of bearer issuing requests
50 * @dest: destination address for request messages 50 * @dest: destination address for request messages
51 * @domain: network domain to which links can be established
51 * @num_nodes: number of nodes currently discovered (i.e. with an active link) 52 * @num_nodes: number of nodes currently discovered (i.e. with an active link)
52 * @lock: spinlock for controlling access to requests 53 * @lock: spinlock for controlling access to requests
53 * @buf: request message to be (repeatedly) sent 54 * @buf: request message to be (repeatedly) sent
@@ -55,8 +56,9 @@
55 * @timer_intv: current interval between requests (in ms) 56 * @timer_intv: current interval between requests (in ms)
56 */ 57 */
57struct tipc_link_req { 58struct tipc_link_req {
58 struct tipc_bearer *bearer; 59 u32 bearer_id;
59 struct tipc_media_addr dest; 60 struct tipc_media_addr dest;
61 u32 domain;
60 int num_nodes; 62 int num_nodes;
61 spinlock_t lock; 63 spinlock_t lock;
62 struct sk_buff *buf; 64 struct sk_buff *buf;
@@ -69,22 +71,19 @@ struct tipc_link_req {
69 * @type: message type (request or response) 71 * @type: message type (request or response)
70 * @b_ptr: ptr to bearer issuing message 72 * @b_ptr: ptr to bearer issuing message
71 */ 73 */
72static struct sk_buff *tipc_disc_init_msg(u32 type, struct tipc_bearer *b_ptr) 74static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
75 struct tipc_bearer *b_ptr)
73{ 76{
74 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
75 struct tipc_msg *msg; 77 struct tipc_msg *msg;
76 u32 dest_domain = b_ptr->domain; 78 u32 dest_domain = b_ptr->domain;
77 79
78 if (buf) { 80 msg = buf_msg(buf);
79 msg = buf_msg(buf); 81 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
80 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); 82 msg_set_non_seq(msg, 1);
81 msg_set_non_seq(msg, 1); 83 msg_set_node_sig(msg, tipc_random);
82 msg_set_node_sig(msg, tipc_random); 84 msg_set_dest_domain(msg, dest_domain);
83 msg_set_dest_domain(msg, dest_domain); 85 msg_set_bc_netid(msg, tipc_net_id);
84 msg_set_bc_netid(msg, tipc_net_id); 86 b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
85 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
86 }
87 return buf;
88} 87}
89 88
90/** 89/**
@@ -107,146 +106,150 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
107} 106}
108 107
109/** 108/**
110 * tipc_disc_rcv - handle incoming link setup message (request or response) 109 * tipc_disc_rcv - handle incoming discovery message (request or response)
111 * @buf: buffer containing message 110 * @buf: buffer containing message
112 * @b_ptr: bearer that message arrived on 111 * @bearer: bearer that message arrived on
113 */ 112 */
114void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr) 113void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
115{ 114{
116 struct tipc_node *n_ptr; 115 struct tipc_node *node;
117 struct tipc_link *link; 116 struct tipc_link *link;
118 struct tipc_media_addr media_addr; 117 struct tipc_media_addr maddr;
119 struct sk_buff *rbuf; 118 struct sk_buff *rbuf;
120 struct tipc_msg *msg = buf_msg(buf); 119 struct tipc_msg *msg = buf_msg(buf);
121 u32 dest = msg_dest_domain(msg); 120 u32 ddom = msg_dest_domain(msg);
122 u32 orig = msg_prevnode(msg); 121 u32 onode = msg_prevnode(msg);
123 u32 net_id = msg_bc_netid(msg); 122 u32 net_id = msg_bc_netid(msg);
124 u32 type = msg_type(msg); 123 u32 mtyp = msg_type(msg);
125 u32 signature = msg_node_sig(msg); 124 u32 signature = msg_node_sig(msg);
126 int addr_mismatch; 125 bool addr_match = false;
127 int link_fully_up; 126 bool sign_match = false;
128 127 bool link_up = false;
129 media_addr.broadcast = 1; 128 bool accept_addr = false;
130 b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg)); 129 bool accept_sign = false;
130 bool respond = false;
131
132 bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
131 kfree_skb(buf); 133 kfree_skb(buf);
132 134
133 /* Ensure message from node is valid and communication is permitted */ 135 /* Ensure message from node is valid and communication is permitted */
134 if (net_id != tipc_net_id) 136 if (net_id != tipc_net_id)
135 return; 137 return;
136 if (media_addr.broadcast) 138 if (maddr.broadcast)
137 return; 139 return;
138 if (!tipc_addr_domain_valid(dest)) 140 if (!tipc_addr_domain_valid(ddom))
139 return; 141 return;
140 if (!tipc_addr_node_valid(orig)) 142 if (!tipc_addr_node_valid(onode))
141 return; 143 return;
142 if (orig == tipc_own_addr) { 144
143 if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr))) 145 if (in_own_node(onode)) {
144 disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr); 146 if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
147 disc_dupl_alert(bearer, tipc_own_addr, &maddr);
145 return; 148 return;
146 } 149 }
147 if (!tipc_in_scope(dest, tipc_own_addr)) 150 if (!tipc_in_scope(ddom, tipc_own_addr))
148 return; 151 return;
149 if (!tipc_in_scope(b_ptr->domain, orig)) 152 if (!tipc_in_scope(bearer->domain, onode))
150 return; 153 return;
151 154
152 /* Locate structure corresponding to requesting node */ 155 /* Locate, or if necessary, create, node: */
153 n_ptr = tipc_node_find(orig); 156 node = tipc_node_find(onode);
154 if (!n_ptr) { 157 if (!node)
155 n_ptr = tipc_node_create(orig); 158 node = tipc_node_create(onode);
156 if (!n_ptr) 159 if (!node)
157 return; 160 return;
158 }
159 tipc_node_lock(n_ptr);
160 161
161 /* Prepare to validate requesting node's signature and media address */ 162 tipc_node_lock(node);
162 link = n_ptr->links[b_ptr->identity]; 163 link = node->links[bearer->identity];
163 addr_mismatch = (link != NULL) &&
164 memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
165 164
166 /* 165 /* Prepare to validate requesting node's signature and media address */
167 * Ensure discovery message's signature is correct 166 sign_match = (signature == node->signature);
168 * 167 addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
169 * If signature is incorrect and there is no working link to the node, 168 link_up = link && tipc_link_is_up(link);
170 * accept the new signature but invalidate all existing links to the 169
171 * node so they won't re-activate without a new discovery message. 170
172 * 171 /* These three flags give us eight permutations: */
173 * If signature is incorrect and the requested link to the node is 172
174 * working, accept the new signature. (This is an instance of delayed 173 if (sign_match && addr_match && link_up) {
175 * rediscovery, where a link endpoint was able to re-establish contact 174 /* All is fine. Do nothing. */
176 * with its peer endpoint on a node that rebooted before receiving a 175 } else if (sign_match && addr_match && !link_up) {
177 * discovery message from that node.) 176 /* Respond. The link will come up in due time */
178 * 177 respond = true;
179 * If signature is incorrect and there is a working link to the node 178 } else if (sign_match && !addr_match && link_up) {
180 * that is not the requested link, reject the request (must be from 179 /* Peer has changed i/f address without rebooting.
181 * a duplicate node). 180 * If so, the link will reset soon, and the next
182 */ 181 * discovery will be accepted. So we can ignore it.
183 if (signature != n_ptr->signature) { 182 * It may also be an cloned or malicious peer having
184 if (n_ptr->working_links == 0) { 183 * chosen the same node address and signature as an
185 struct tipc_link *curr_link; 184 * existing one.
186 int i; 185 * Ignore requests until the link goes down, if ever.
187 186 */
188 for (i = 0; i < MAX_BEARERS; i++) { 187 disc_dupl_alert(bearer, onode, &maddr);
189 curr_link = n_ptr->links[i]; 188 } else if (sign_match && !addr_match && !link_up) {
190 if (curr_link) { 189 /* Peer link has changed i/f address without rebooting.
191 memset(&curr_link->media_addr, 0, 190 * It may also be a cloned or malicious peer; we can't
192 sizeof(media_addr)); 191 * distinguish between the two.
193 tipc_link_reset(curr_link); 192 * The signature is correct, so we must accept.
194 } 193 */
195 } 194 accept_addr = true;
196 addr_mismatch = (link != NULL); 195 respond = true;
197 } else if (tipc_link_is_up(link) && !addr_mismatch) { 196 } else if (!sign_match && addr_match && link_up) {
198 /* delayed rediscovery */ 197 /* Peer node rebooted. Two possibilities:
199 } else { 198 * - Delayed re-discovery; this link endpoint has already
200 disc_dupl_alert(b_ptr, orig, &media_addr); 199 * reset and re-established contact with the peer, before
201 tipc_node_unlock(n_ptr); 200 * receiving a discovery message from that node.
202 return; 201 * (The peer happened to receive one from this node first).
203 } 202 * - The peer came back so fast that our side has not
204 n_ptr->signature = signature; 203 * discovered it yet. Probing from this side will soon
204 * reset the link, since there can be no working link
205 * endpoint at the peer end, and the link will re-establish.
206 * Accept the signature, since it comes from a known peer.
207 */
208 accept_sign = true;
209 } else if (!sign_match && addr_match && !link_up) {
210 /* The peer node has rebooted.
211 * Accept signature, since it is a known peer.
212 */
213 accept_sign = true;
214 respond = true;
215 } else if (!sign_match && !addr_match && link_up) {
216 /* Peer rebooted with new address, or a new/duplicate peer.
217 * Ignore until the link goes down, if ever.
218 */
219 disc_dupl_alert(bearer, onode, &maddr);
220 } else if (!sign_match && !addr_match && !link_up) {
221 /* Peer rebooted with new address, or it is a new peer.
222 * Accept signature and address.
223 */
224 accept_sign = true;
225 accept_addr = true;
226 respond = true;
205 } 227 }
206 228
207 /* 229 if (accept_sign)
208 * Ensure requesting node's media address is correct 230 node->signature = signature;
209 *
210 * If media address doesn't match and the link is working, reject the
211 * request (must be from a duplicate node).
212 *
213 * If media address doesn't match and the link is not working, accept
214 * the new media address and reset the link to ensure it starts up
215 * cleanly.
216 */
217 if (addr_mismatch) {
218 if (tipc_link_is_up(link)) {
219 disc_dupl_alert(b_ptr, orig, &media_addr);
220 tipc_node_unlock(n_ptr);
221 return;
222 } else {
223 memcpy(&link->media_addr, &media_addr,
224 sizeof(media_addr));
225 tipc_link_reset(link);
226 }
227 }
228 231
229 /* Create a link endpoint for this bearer, if necessary */ 232 if (accept_addr) {
230 if (!link) { 233 if (!link)
231 link = tipc_link_create(n_ptr, b_ptr, &media_addr); 234 link = tipc_link_create(node, bearer, &maddr);
232 if (!link) { 235 if (link) {
233 tipc_node_unlock(n_ptr); 236 memcpy(&link->media_addr, &maddr, sizeof(maddr));
234 return; 237 tipc_link_reset(link);
238 } else {
239 respond = false;
235 } 240 }
236 } 241 }
237 242
238 /* Accept discovery message & send response, if necessary */ 243 /* Send response, if necessary */
239 link_fully_up = link_working_working(link); 244 if (respond && (mtyp == DSC_REQ_MSG)) {
240 245 rbuf = tipc_buf_acquire(INT_H_SIZE);
241 if ((type == DSC_REQ_MSG) && !link_fully_up) {
242 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, b_ptr);
243 if (rbuf) { 246 if (rbuf) {
244 tipc_bearer_send(b_ptr, rbuf, &media_addr); 247 tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
248 tipc_bearer_send(bearer->identity, rbuf, &maddr);
245 kfree_skb(rbuf); 249 kfree_skb(rbuf);
246 } 250 }
247 } 251 }
248 252 tipc_node_unlock(node);
249 tipc_node_unlock(n_ptr);
250} 253}
251 254
252/** 255/**
@@ -303,7 +306,7 @@ static void disc_timeout(struct tipc_link_req *req)
303 spin_lock_bh(&req->lock); 306 spin_lock_bh(&req->lock);
304 307
305 /* Stop searching if only desired node has been found */ 308 /* Stop searching if only desired node has been found */
306 if (tipc_node(req->bearer->domain) && req->num_nodes) { 309 if (tipc_node(req->domain) && req->num_nodes) {
307 req->timer_intv = TIPC_LINK_REQ_INACTIVE; 310 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
308 goto exit; 311 goto exit;
309 } 312 }
@@ -315,7 +318,7 @@ static void disc_timeout(struct tipc_link_req *req)
315 * hold at fast polling rate if don't have any associated nodes, 318 * hold at fast polling rate if don't have any associated nodes,
316 * otherwise hold at slow polling rate 319 * otherwise hold at slow polling rate
317 */ 320 */
318 tipc_bearer_send(req->bearer, req->buf, &req->dest); 321 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
319 322
320 323
321 req->timer_intv *= 2; 324 req->timer_intv *= 2;
@@ -347,21 +350,23 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
347 if (!req) 350 if (!req)
348 return -ENOMEM; 351 return -ENOMEM;
349 352
350 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, b_ptr); 353 req->buf = tipc_buf_acquire(INT_H_SIZE);
351 if (!req->buf) { 354 if (!req->buf) {
352 kfree(req); 355 kfree(req);
353 return -ENOMSG; 356 return -ENOMEM;
354 } 357 }
355 358
359 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
356 memcpy(&req->dest, dest, sizeof(*dest)); 360 memcpy(&req->dest, dest, sizeof(*dest));
357 req->bearer = b_ptr; 361 req->bearer_id = b_ptr->identity;
362 req->domain = b_ptr->domain;
358 req->num_nodes = 0; 363 req->num_nodes = 0;
359 req->timer_intv = TIPC_LINK_REQ_INIT; 364 req->timer_intv = TIPC_LINK_REQ_INIT;
360 spin_lock_init(&req->lock); 365 spin_lock_init(&req->lock);
361 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 366 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
362 k_start_timer(&req->timer, req->timer_intv); 367 k_start_timer(&req->timer, req->timer_intv);
363 b_ptr->link_req = req; 368 b_ptr->link_req = req;
364 tipc_bearer_send(req->bearer, req->buf, &req->dest); 369 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
365 return 0; 370 return 0;
366} 371}
367 372
@@ -376,3 +381,23 @@ void tipc_disc_delete(struct tipc_link_req *req)
376 kfree_skb(req->buf); 381 kfree_skb(req->buf);
377 kfree(req); 382 kfree(req);
378} 383}
384
385/**
386 * tipc_disc_reset - reset object to send periodic link setup requests
387 * @b_ptr: ptr to bearer issuing requests
388 * @dest_domain: network domain to which links can be established
389 */
390void tipc_disc_reset(struct tipc_bearer *b_ptr)
391{
392 struct tipc_link_req *req = b_ptr->link_req;
393
394 spin_lock_bh(&req->lock);
395 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
396 req->bearer_id = b_ptr->identity;
397 req->domain = b_ptr->domain;
398 req->num_nodes = 0;
399 req->timer_intv = TIPC_LINK_REQ_INIT;
400 k_start_timer(&req->timer, req->timer_intv);
401 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
402 spin_unlock_bh(&req->lock);
403}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 07f34729459d..515b57392f4d 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -41,6 +41,7 @@ struct tipc_link_req;
41 41
42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); 42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
43void tipc_disc_delete(struct tipc_link_req *req); 43void tipc_disc_delete(struct tipc_link_req *req);
44void tipc_disc_reset(struct tipc_bearer *b_ptr);
44void tipc_disc_add_dest(struct tipc_link_req *req); 45void tipc_disc_add_dest(struct tipc_link_req *req);
45void tipc_disc_remove_dest(struct tipc_link_req *req); 46void tipc_disc_remove_dest(struct tipc_link_req *req);
46void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr); 47void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 67cf3f935dba..5e1426f1751f 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2007, 2013, Ericsson AB 4 * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
5 * Copyright (c) 2005-2008, 2011-2013, Wind River Systems 5 * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,39 +37,52 @@
37#include "core.h" 37#include "core.h"
38#include "bearer.h" 38#include "bearer.h"
39 39
40#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */ 40#define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */
41 41
42/* convert Ethernet address to string */ 42/* Convert Ethernet address (media address format) to string */
43static int tipc_eth_addr2str(struct tipc_media_addr *a, char *str_buf, 43static int tipc_eth_addr2str(struct tipc_media_addr *addr,
44 int str_size) 44 char *strbuf, int bufsz)
45{ 45{
46 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ 46 if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
47 return 1; 47 return 1;
48 48
49 sprintf(str_buf, "%pM", a->value); 49 sprintf(strbuf, "%pM", addr->value);
50 return 0; 50 return 0;
51} 51}
52 52
53/* convert Ethernet address format to message header format */ 53/* Convert from media address format to discovery message addr format */
54static int tipc_eth_addr2msg(struct tipc_media_addr *a, char *msg_area) 54static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
55{ 55{
56 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 56 memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
57 msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; 57 msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
58 memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN); 58 memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
59 return 0; 59 return 0;
60} 60}
61 61
62/* convert message header address format to Ethernet format */ 62/* Convert raw mac address format to media addr format */
63static int tipc_eth_msg2addr(const struct tipc_bearer *tb_ptr, 63static int tipc_eth_raw2addr(struct tipc_bearer *b,
64 struct tipc_media_addr *a, char *msg_area) 64 struct tipc_media_addr *addr,
65 char *msg)
65{ 66{
66 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) 67 char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
67 return 1;
68 68
69 tipc_l2_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET); 69 memset(addr, 0, sizeof(*addr));
70 ether_addr_copy(addr->value, msg);
71 addr->media_id = TIPC_MEDIA_TYPE_ETH;
72 addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
70 return 0; 73 return 0;
71} 74}
72 75
76/* Convert discovery msg addr format to Ethernet media addr format */
77static int tipc_eth_msg2addr(struct tipc_bearer *b,
78 struct tipc_media_addr *addr,
79 char *msg)
80{
81 /* Skip past preamble: */
82 msg += ETH_ADDR_OFFSET;
83 return tipc_eth_raw2addr(b, addr, msg);
84}
85
73/* Ethernet media registration info */ 86/* Ethernet media registration info */
74struct tipc_media eth_media_info = { 87struct tipc_media eth_media_info = {
75 .send_msg = tipc_l2_send_msg, 88 .send_msg = tipc_l2_send_msg,
@@ -78,6 +91,7 @@ struct tipc_media eth_media_info = {
78 .addr2str = tipc_eth_addr2str, 91 .addr2str = tipc_eth_addr2str,
79 .addr2msg = tipc_eth_addr2msg, 92 .addr2msg = tipc_eth_addr2msg,
80 .msg2addr = tipc_eth_msg2addr, 93 .msg2addr = tipc_eth_msg2addr,
94 .raw2addr = tipc_eth_raw2addr,
81 .priority = TIPC_DEF_LINK_PRI, 95 .priority = TIPC_DEF_LINK_PRI,
82 .tolerance = TIPC_DEF_LINK_TOL, 96 .tolerance = TIPC_DEF_LINK_TOL,
83 .window = TIPC_DEF_LINK_WIN, 97 .window = TIPC_DEF_LINK_WIN,
@@ -85,4 +99,3 @@ struct tipc_media eth_media_info = {
85 .hwaddr_len = ETH_ALEN, 99 .hwaddr_len = ETH_ALEN,
86 .name = "eth" 100 .name = "eth"
87}; 101};
88
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
deleted file mode 100644
index 1fabf160501f..000000000000
--- a/net/tipc/handler.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * net/tipc/handler.c: TIPC signal handling
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38
39struct queue_item {
40 struct list_head next_signal;
41 void (*handler) (unsigned long);
42 unsigned long data;
43};
44
45static struct kmem_cache *tipc_queue_item_cache;
46static struct list_head signal_queue_head;
47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled __read_mostly;
49
50static void process_signal_queue(unsigned long dummy);
51
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53
54
55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{
57 struct queue_item *item;
58
59 spin_lock_bh(&qitem_lock);
60 if (!handler_enabled) {
61 spin_unlock_bh(&qitem_lock);
62 return -ENOPROTOOPT;
63 }
64
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) {
67 pr_err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM;
70 }
71 item->handler = routine;
72 item->data = argument;
73 list_add_tail(&item->next_signal, &signal_queue_head);
74 spin_unlock_bh(&qitem_lock);
75 tasklet_schedule(&tipc_tasklet);
76 return 0;
77}
78
79static void process_signal_queue(unsigned long dummy)
80{
81 struct queue_item *__volatile__ item;
82 struct list_head *l, *n;
83
84 spin_lock_bh(&qitem_lock);
85 list_for_each_safe(l, n, &signal_queue_head) {
86 item = list_entry(l, struct queue_item, next_signal);
87 list_del(&item->next_signal);
88 spin_unlock_bh(&qitem_lock);
89 item->handler(item->data);
90 spin_lock_bh(&qitem_lock);
91 kmem_cache_free(tipc_queue_item_cache, item);
92 }
93 spin_unlock_bh(&qitem_lock);
94}
95
96int tipc_handler_start(void)
97{
98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL);
101 if (!tipc_queue_item_cache)
102 return -ENOMEM;
103
104 INIT_LIST_HEAD(&signal_queue_head);
105 tasklet_enable(&tipc_tasklet);
106 handler_enabled = 1;
107 return 0;
108}
109
110void tipc_handler_stop(void)
111{
112 struct list_head *l, *n;
113 struct queue_item *item;
114
115 spin_lock_bh(&qitem_lock);
116 if (!handler_enabled) {
117 spin_unlock_bh(&qitem_lock);
118 return;
119 }
120 handler_enabled = 0;
121 spin_unlock_bh(&qitem_lock);
122
123 tasklet_kill(&tipc_tasklet);
124
125 spin_lock_bh(&qitem_lock);
126 list_for_each_safe(l, n, &signal_queue_head) {
127 item = list_entry(l, struct queue_item, next_signal);
128 list_del(&item->next_signal);
129 kmem_cache_free(tipc_queue_item_cache, item);
130 }
131 spin_unlock_bh(&qitem_lock);
132
133 kmem_cache_destroy(tipc_queue_item_cache);
134}
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 844a77e25828..8522eef9c136 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,7 +42,7 @@
42#include "core.h" 42#include "core.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/* convert InfiniBand address to string */ 45/* convert InfiniBand address (media address format) media address to string */
46static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, 46static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
47 int str_size) 47 int str_size)
48{ 48{
@@ -54,23 +54,35 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
54 return 0; 54 return 0;
55} 55}
56 56
57/* convert InfiniBand address format to message header format */ 57/* Convert from media address format to discovery message addr format */
58static int tipc_ib_addr2msg(struct tipc_media_addr *a, char *msg_area) 58static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
59{ 59{
60 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 60 memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
61 msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_IB; 61 memcpy(msg, addr->value, INFINIBAND_ALEN);
62 memcpy(msg_area, a->value, INFINIBAND_ALEN);
63 return 0; 62 return 0;
64} 63}
65 64
66/* convert message header address format to InfiniBand format */ 65/* Convert raw InfiniBand address format to media addr format */
67static int tipc_ib_msg2addr(const struct tipc_bearer *tb_ptr, 66static int tipc_ib_raw2addr(struct tipc_bearer *b,
68 struct tipc_media_addr *a, char *msg_area) 67 struct tipc_media_addr *addr,
68 char *msg)
69{ 69{
70 tipc_l2_media_addr_set(tb_ptr, a, msg_area); 70 memset(addr, 0, sizeof(*addr));
71 memcpy(addr->value, msg, INFINIBAND_ALEN);
72 addr->media_id = TIPC_MEDIA_TYPE_IB;
73 addr->broadcast = !memcmp(msg, b->bcast_addr.value,
74 INFINIBAND_ALEN);
71 return 0; 75 return 0;
72} 76}
73 77
78/* Convert discovery msg addr format to InfiniBand media addr format */
79static int tipc_ib_msg2addr(struct tipc_bearer *b,
80 struct tipc_media_addr *addr,
81 char *msg)
82{
83 return tipc_ib_raw2addr(b, addr, msg);
84}
85
74/* InfiniBand media registration info */ 86/* InfiniBand media registration info */
75struct tipc_media ib_media_info = { 87struct tipc_media ib_media_info = {
76 .send_msg = tipc_l2_send_msg, 88 .send_msg = tipc_l2_send_msg,
@@ -79,6 +91,7 @@ struct tipc_media ib_media_info = {
79 .addr2str = tipc_ib_addr2str, 91 .addr2str = tipc_ib_addr2str,
80 .addr2msg = tipc_ib_addr2msg, 92 .addr2msg = tipc_ib_addr2msg,
81 .msg2addr = tipc_ib_msg2addr, 93 .msg2addr = tipc_ib_msg2addr,
94 .raw2addr = tipc_ib_raw2addr,
82 .priority = TIPC_DEF_LINK_PRI, 95 .priority = TIPC_DEF_LINK_PRI,
83 .tolerance = TIPC_DEF_LINK_TOL, 96 .tolerance = TIPC_DEF_LINK_TOL,
84 .window = TIPC_DEF_LINK_WIN, 97 .window = TIPC_DEF_LINK_WIN,
@@ -86,4 +99,3 @@ struct tipc_media ib_media_info = {
86 .hwaddr_len = INFINIBAND_ALEN, 99 .hwaddr_len = INFINIBAND_ALEN,
87 .name = "ib" 100 .name = "ib"
88}; 101};
89
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c5190ab75290..ad2c57f5868d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -37,6 +37,7 @@
37#include "core.h" 37#include "core.h"
38#include "link.h" 38#include "link.h"
39#include "port.h" 39#include "port.h"
40#include "socket.h"
40#include "name_distr.h" 41#include "name_distr.h"
41#include "discover.h" 42#include "discover.h"
42#include "config.h" 43#include "config.h"
@@ -101,9 +102,18 @@ static unsigned int align(unsigned int i)
101 102
102static void link_init_max_pkt(struct tipc_link *l_ptr) 103static void link_init_max_pkt(struct tipc_link *l_ptr)
103{ 104{
105 struct tipc_bearer *b_ptr;
104 u32 max_pkt; 106 u32 max_pkt;
105 107
106 max_pkt = (l_ptr->b_ptr->mtu & ~3); 108 rcu_read_lock();
109 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
110 if (!b_ptr) {
111 rcu_read_unlock();
112 return;
113 }
114 max_pkt = (b_ptr->mtu & ~3);
115 rcu_read_unlock();
116
107 if (max_pkt > MAX_MSG_SIZE) 117 if (max_pkt > MAX_MSG_SIZE)
108 max_pkt = MAX_MSG_SIZE; 118 max_pkt = MAX_MSG_SIZE;
109 119
@@ -248,7 +258,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
248 l_ptr->owner = n_ptr; 258 l_ptr->owner = n_ptr;
249 l_ptr->checkpoint = 1; 259 l_ptr->checkpoint = 1;
250 l_ptr->peer_session = INVALID_SESSION; 260 l_ptr->peer_session = INVALID_SESSION;
251 l_ptr->b_ptr = b_ptr; 261 l_ptr->bearer_id = b_ptr->identity;
252 link_set_supervision_props(l_ptr, b_ptr->tolerance); 262 link_set_supervision_props(l_ptr, b_ptr->tolerance);
253 l_ptr->state = RESET_UNKNOWN; 263 l_ptr->state = RESET_UNKNOWN;
254 264
@@ -263,6 +273,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
263 l_ptr->priority = b_ptr->priority; 273 l_ptr->priority = b_ptr->priority;
264 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 274 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
265 275
276 l_ptr->net_plane = b_ptr->net_plane;
266 link_init_max_pkt(l_ptr); 277 link_init_max_pkt(l_ptr);
267 278
268 l_ptr->next_out_no = 1; 279 l_ptr->next_out_no = 1;
@@ -287,14 +298,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
287 298
288 rcu_read_lock(); 299 rcu_read_lock();
289 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 300 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
290 spin_lock_bh(&n_ptr->lock); 301 tipc_node_lock(n_ptr);
291 l_ptr = n_ptr->links[bearer_id]; 302 l_ptr = n_ptr->links[bearer_id];
292 if (l_ptr) { 303 if (l_ptr) {
293 tipc_link_reset(l_ptr); 304 tipc_link_reset(l_ptr);
294 if (shutting_down || !tipc_node_is_up(n_ptr)) { 305 if (shutting_down || !tipc_node_is_up(n_ptr)) {
295 tipc_node_detach_link(l_ptr->owner, l_ptr); 306 tipc_node_detach_link(l_ptr->owner, l_ptr);
296 tipc_link_reset_fragments(l_ptr); 307 tipc_link_reset_fragments(l_ptr);
297 spin_unlock_bh(&n_ptr->lock); 308 tipc_node_unlock(n_ptr);
298 309
299 /* Nobody else can access this link now: */ 310 /* Nobody else can access this link now: */
300 del_timer_sync(&l_ptr->timer); 311 del_timer_sync(&l_ptr->timer);
@@ -302,12 +313,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
302 } else { 313 } else {
303 /* Detach/delete when failover is finished: */ 314 /* Detach/delete when failover is finished: */
304 l_ptr->flags |= LINK_STOPPED; 315 l_ptr->flags |= LINK_STOPPED;
305 spin_unlock_bh(&n_ptr->lock); 316 tipc_node_unlock(n_ptr);
306 del_timer_sync(&l_ptr->timer); 317 del_timer_sync(&l_ptr->timer);
307 } 318 }
308 continue; 319 continue;
309 } 320 }
310 spin_unlock_bh(&n_ptr->lock); 321 tipc_node_unlock(n_ptr);
311 } 322 }
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
@@ -388,9 +399,8 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
388 */ 399 */
389void tipc_link_reset_fragments(struct tipc_link *l_ptr) 400void tipc_link_reset_fragments(struct tipc_link *l_ptr)
390{ 401{
391 kfree_skb(l_ptr->reasm_head); 402 kfree_skb(l_ptr->reasm_buf);
392 l_ptr->reasm_head = NULL; 403 l_ptr->reasm_buf = NULL;
393 l_ptr->reasm_tail = NULL;
394} 404}
395 405
396/** 406/**
@@ -426,7 +436,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
426 return; 436 return;
427 437
428 tipc_node_link_down(l_ptr->owner, l_ptr); 438 tipc_node_link_down(l_ptr->owner, l_ptr);
429 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 439 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
430 440
431 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 441 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
432 l_ptr->reset_checkpoint = checkpoint; 442 l_ptr->reset_checkpoint = checkpoint;
@@ -464,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
464 474
465 rcu_read_lock(); 475 rcu_read_lock();
466 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 476 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
467 spin_lock_bh(&n_ptr->lock); 477 tipc_node_lock(n_ptr);
468 l_ptr = n_ptr->links[bearer_id]; 478 l_ptr = n_ptr->links[bearer_id];
469 if (l_ptr) 479 if (l_ptr)
470 tipc_link_reset(l_ptr); 480 tipc_link_reset(l_ptr);
471 spin_unlock_bh(&n_ptr->lock); 481 tipc_node_unlock(n_ptr);
472 } 482 }
473 rcu_read_unlock(); 483 rcu_read_unlock();
474} 484}
@@ -477,7 +487,7 @@ static void link_activate(struct tipc_link *l_ptr)
477{ 487{
478 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 488 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
479 tipc_node_link_up(l_ptr->owner, l_ptr); 489 tipc_node_link_up(l_ptr->owner, l_ptr);
480 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 490 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
481} 491}
482 492
483/** 493/**
@@ -777,7 +787,7 @@ int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
777 if (likely(!link_congested(l_ptr))) { 787 if (likely(!link_congested(l_ptr))) {
778 link_add_to_outqueue(l_ptr, buf, msg); 788 link_add_to_outqueue(l_ptr, buf, msg);
779 789
780 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 790 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
781 l_ptr->unacked_window = 0; 791 l_ptr->unacked_window = 0;
782 return dsz; 792 return dsz;
783 } 793 }
@@ -825,7 +835,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
825 struct tipc_node *n_ptr; 835 struct tipc_node *n_ptr;
826 int res = -ELINKCONG; 836 int res = -ELINKCONG;
827 837
828 read_lock_bh(&tipc_net_lock);
829 n_ptr = tipc_node_find(dest); 838 n_ptr = tipc_node_find(dest);
830 if (n_ptr) { 839 if (n_ptr) {
831 tipc_node_lock(n_ptr); 840 tipc_node_lock(n_ptr);
@@ -838,7 +847,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
838 } else { 847 } else {
839 kfree_skb(buf); 848 kfree_skb(buf);
840 } 849 }
841 read_unlock_bh(&tipc_net_lock);
842 return res; 850 return res;
843} 851}
844 852
@@ -902,7 +910,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
902 if (list_empty(message_list)) 910 if (list_empty(message_list))
903 return; 911 return;
904 912
905 read_lock_bh(&tipc_net_lock);
906 n_ptr = tipc_node_find(dest); 913 n_ptr = tipc_node_find(dest);
907 if (n_ptr) { 914 if (n_ptr) {
908 tipc_node_lock(n_ptr); 915 tipc_node_lock(n_ptr);
@@ -917,7 +924,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
917 } 924 }
918 tipc_node_unlock(n_ptr); 925 tipc_node_unlock(n_ptr);
919 } 926 }
920 read_unlock_bh(&tipc_net_lock);
921 927
922 /* discard the messages if they couldn't be sent */ 928 /* discard the messages if they couldn't be sent */
923 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 929 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
@@ -941,7 +947,7 @@ static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
941 if (likely(!link_congested(l_ptr))) { 947 if (likely(!link_congested(l_ptr))) {
942 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 948 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
943 link_add_to_outqueue(l_ptr, buf, msg); 949 link_add_to_outqueue(l_ptr, buf, msg);
944 tipc_bearer_send(l_ptr->b_ptr, buf, 950 tipc_bearer_send(l_ptr->bearer_id, buf,
945 &l_ptr->media_addr); 951 &l_ptr->media_addr);
946 l_ptr->unacked_window = 0; 952 l_ptr->unacked_window = 0;
947 return res; 953 return res;
@@ -979,7 +985,6 @@ again:
979 if (unlikely(res < 0)) 985 if (unlikely(res < 0))
980 return res; 986 return res;
981 987
982 read_lock_bh(&tipc_net_lock);
983 node = tipc_node_find(destaddr); 988 node = tipc_node_find(destaddr);
984 if (likely(node)) { 989 if (likely(node)) {
985 tipc_node_lock(node); 990 tipc_node_lock(node);
@@ -990,7 +995,6 @@ again:
990 &sender->max_pkt); 995 &sender->max_pkt);
991exit: 996exit:
992 tipc_node_unlock(node); 997 tipc_node_unlock(node);
993 read_unlock_bh(&tipc_net_lock);
994 return res; 998 return res;
995 } 999 }
996 1000
@@ -1007,7 +1011,6 @@ exit:
1007 */ 1011 */
1008 sender->max_pkt = l_ptr->max_pkt; 1012 sender->max_pkt = l_ptr->max_pkt;
1009 tipc_node_unlock(node); 1013 tipc_node_unlock(node);
1010 read_unlock_bh(&tipc_net_lock);
1011 1014
1012 1015
1013 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1016 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1018,7 +1021,6 @@ exit:
1018 } 1021 }
1019 tipc_node_unlock(node); 1022 tipc_node_unlock(node);
1020 } 1023 }
1021 read_unlock_bh(&tipc_net_lock);
1022 1024
1023 /* Couldn't find a link to the destination node */ 1025 /* Couldn't find a link to the destination node */
1024 kfree_skb(buf); 1026 kfree_skb(buf);
@@ -1204,7 +1206,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1204 if (r_q_size && buf) { 1206 if (r_q_size && buf) {
1205 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1207 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1206 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1208 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1207 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1209 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1208 l_ptr->retransm_queue_head = mod(++r_q_head); 1210 l_ptr->retransm_queue_head = mod(++r_q_head);
1209 l_ptr->retransm_queue_size = --r_q_size; 1211 l_ptr->retransm_queue_size = --r_q_size;
1210 l_ptr->stats.retransmitted++; 1212 l_ptr->stats.retransmitted++;
@@ -1216,7 +1218,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1216 if (buf) { 1218 if (buf) {
1217 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1219 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1218 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1220 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1219 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1221 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1220 l_ptr->unacked_window = 0; 1222 l_ptr->unacked_window = 0;
1221 kfree_skb(buf); 1223 kfree_skb(buf);
1222 l_ptr->proto_msg_queue = NULL; 1224 l_ptr->proto_msg_queue = NULL;
@@ -1233,7 +1235,8 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1233 if (mod(next - first) < l_ptr->queue_limit[0]) { 1235 if (mod(next - first) < l_ptr->queue_limit[0]) {
1234 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1236 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1235 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1237 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1236 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1238 tipc_bearer_send(l_ptr->bearer_id, buf,
1239 &l_ptr->media_addr);
1237 if (msg_user(msg) == MSG_BUNDLER) 1240 if (msg_user(msg) == MSG_BUNDLER)
1238 msg_set_type(msg, CLOSED_MSG); 1241 msg_set_type(msg, CLOSED_MSG);
1239 l_ptr->next_out = buf->next; 1242 l_ptr->next_out = buf->next;
@@ -1256,33 +1259,24 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
1256 } while (!res); 1259 } while (!res);
1257} 1260}
1258 1261
1259static void link_reset_all(unsigned long addr) 1262void tipc_link_reset_all(struct tipc_node *node)
1260{ 1263{
1261 struct tipc_node *n_ptr;
1262 char addr_string[16]; 1264 char addr_string[16];
1263 u32 i; 1265 u32 i;
1264 1266
1265 read_lock_bh(&tipc_net_lock); 1267 tipc_node_lock(node);
1266 n_ptr = tipc_node_find((u32)addr);
1267 if (!n_ptr) {
1268 read_unlock_bh(&tipc_net_lock);
1269 return; /* node no longer exists */
1270 }
1271
1272 tipc_node_lock(n_ptr);
1273 1268
1274 pr_warn("Resetting all links to %s\n", 1269 pr_warn("Resetting all links to %s\n",
1275 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1270 tipc_addr_string_fill(addr_string, node->addr));
1276 1271
1277 for (i = 0; i < MAX_BEARERS; i++) { 1272 for (i = 0; i < MAX_BEARERS; i++) {
1278 if (n_ptr->links[i]) { 1273 if (node->links[i]) {
1279 link_print(n_ptr->links[i], "Resetting link\n"); 1274 link_print(node->links[i], "Resetting link\n");
1280 tipc_link_reset(n_ptr->links[i]); 1275 tipc_link_reset(node->links[i]);
1281 } 1276 }
1282 } 1277 }
1283 1278
1284 tipc_node_unlock(n_ptr); 1279 tipc_node_unlock(node);
1285 read_unlock_bh(&tipc_net_lock);
1286} 1280}
1287 1281
1288static void link_retransmit_failure(struct tipc_link *l_ptr, 1282static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -1319,10 +1313,9 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1319 n_ptr->bclink.oos_state, 1313 n_ptr->bclink.oos_state,
1320 n_ptr->bclink.last_sent); 1314 n_ptr->bclink.last_sent);
1321 1315
1322 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1323
1324 tipc_node_unlock(n_ptr); 1316 tipc_node_unlock(n_ptr);
1325 1317
1318 tipc_bclink_set_flags(TIPC_BCLINK_RESET);
1326 l_ptr->stale_count = 0; 1319 l_ptr->stale_count = 0;
1327 } 1320 }
1328} 1321}
@@ -1352,7 +1345,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1352 msg = buf_msg(buf); 1345 msg = buf_msg(buf);
1353 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1346 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1354 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1347 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1355 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1348 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1356 buf = buf->next; 1349 buf = buf->next;
1357 retransmits--; 1350 retransmits--;
1358 l_ptr->stats.retransmitted++; 1351 l_ptr->stats.retransmitted++;
@@ -1440,14 +1433,13 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1440/** 1433/**
1441 * tipc_rcv - process TIPC packets/messages arriving from off-node 1434 * tipc_rcv - process TIPC packets/messages arriving from off-node
1442 * @head: pointer to message buffer chain 1435 * @head: pointer to message buffer chain
1443 * @tb_ptr: pointer to bearer message arrived on 1436 * @b_ptr: pointer to bearer message arrived on
1444 * 1437 *
1445 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1438 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1446 * structure (i.e. cannot be NULL), but bearer can be inactive. 1439 * structure (i.e. cannot be NULL), but bearer can be inactive.
1447 */ 1440 */
1448void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1441void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1449{ 1442{
1450 read_lock_bh(&tipc_net_lock);
1451 while (head) { 1443 while (head) {
1452 struct tipc_node *n_ptr; 1444 struct tipc_node *n_ptr;
1453 struct tipc_link *l_ptr; 1445 struct tipc_link *l_ptr;
@@ -1497,14 +1489,14 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1497 goto unlock_discard; 1489 goto unlock_discard;
1498 1490
1499 /* Verify that communication with node is currently allowed */ 1491 /* Verify that communication with node is currently allowed */
1500 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1492 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1501 msg_user(msg) == LINK_PROTOCOL && 1493 msg_user(msg) == LINK_PROTOCOL &&
1502 (msg_type(msg) == RESET_MSG || 1494 (msg_type(msg) == RESET_MSG ||
1503 msg_type(msg) == ACTIVATE_MSG) && 1495 msg_type(msg) == ACTIVATE_MSG) &&
1504 !msg_redundant_link(msg)) 1496 !msg_redundant_link(msg))
1505 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1497 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1506 1498
1507 if (n_ptr->block_setup) 1499 if (tipc_node_blocked(n_ptr))
1508 goto unlock_discard; 1500 goto unlock_discard;
1509 1501
1510 /* Validate message sequence number info */ 1502 /* Validate message sequence number info */
@@ -1581,17 +1573,12 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1581 } 1573 }
1582 msg = buf_msg(buf); 1574 msg = buf_msg(buf);
1583 } else if (msg_user(msg) == MSG_FRAGMENTER) { 1575 } else if (msg_user(msg) == MSG_FRAGMENTER) {
1584 int rc;
1585
1586 l_ptr->stats.recv_fragments++; 1576 l_ptr->stats.recv_fragments++;
1587 rc = tipc_link_frag_rcv(&l_ptr->reasm_head, 1577 if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
1588 &l_ptr->reasm_tail,
1589 &buf);
1590 if (rc == LINK_REASM_COMPLETE) {
1591 l_ptr->stats.recv_fragmented++; 1578 l_ptr->stats.recv_fragmented++;
1592 msg = buf_msg(buf); 1579 msg = buf_msg(buf);
1593 } else { 1580 } else {
1594 if (rc == LINK_REASM_ERROR) 1581 if (!l_ptr->reasm_buf)
1595 tipc_link_reset(l_ptr); 1582 tipc_link_reset(l_ptr);
1596 tipc_node_unlock(n_ptr); 1583 tipc_node_unlock(n_ptr);
1597 continue; 1584 continue;
@@ -1604,7 +1591,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1604 case TIPC_HIGH_IMPORTANCE: 1591 case TIPC_HIGH_IMPORTANCE:
1605 case TIPC_CRITICAL_IMPORTANCE: 1592 case TIPC_CRITICAL_IMPORTANCE:
1606 tipc_node_unlock(n_ptr); 1593 tipc_node_unlock(n_ptr);
1607 tipc_port_rcv(buf); 1594 tipc_sk_rcv(buf);
1608 continue; 1595 continue;
1609 case MSG_BUNDLER: 1596 case MSG_BUNDLER:
1610 l_ptr->stats.recv_bundles++; 1597 l_ptr->stats.recv_bundles++;
@@ -1635,7 +1622,6 @@ unlock_discard:
1635discard: 1622discard:
1636 kfree_skb(buf); 1623 kfree_skb(buf);
1637 } 1624 }
1638 read_unlock_bh(&tipc_net_lock);
1639} 1625}
1640 1626
1641/** 1627/**
@@ -1747,12 +1733,12 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1747 return; 1733 return;
1748 1734
1749 /* Abort non-RESET send if communication with node is prohibited */ 1735 /* Abort non-RESET send if communication with node is prohibited */
1750 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1736 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1751 return; 1737 return;
1752 1738
1753 /* Create protocol message with "out-of-sequence" sequence number */ 1739 /* Create protocol message with "out-of-sequence" sequence number */
1754 msg_set_type(msg, msg_typ); 1740 msg_set_type(msg, msg_typ);
1755 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1741 msg_set_net_plane(msg, l_ptr->net_plane);
1756 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1742 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1757 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1743 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1758 1744
@@ -1818,7 +1804,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1818 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1804 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1819 buf->priority = TC_PRIO_CONTROL; 1805 buf->priority = TC_PRIO_CONTROL;
1820 1806
1821 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1807 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1822 l_ptr->unacked_window = 0; 1808 l_ptr->unacked_window = 0;
1823 kfree_skb(buf); 1809 kfree_skb(buf);
1824} 1810}
@@ -1840,12 +1826,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1840 if (l_ptr->exp_msg_count) 1826 if (l_ptr->exp_msg_count)
1841 goto exit; 1827 goto exit;
1842 1828
1843 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1829 if (l_ptr->net_plane != msg_net_plane(msg))
1844 l_ptr->checkpoint--;
1845
1846 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1847 if (tipc_own_addr > msg_prevnode(msg)) 1830 if (tipc_own_addr > msg_prevnode(msg))
1848 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1831 l_ptr->net_plane = msg_net_plane(msg);
1849 1832
1850 switch (msg_type(msg)) { 1833 switch (msg_type(msg)) {
1851 1834
@@ -1862,7 +1845,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1862 * peer has lost contact -- don't allow peer's links 1845 * peer has lost contact -- don't allow peer's links
1863 * to reactivate before we recognize loss & clean up 1846 * to reactivate before we recognize loss & clean up
1864 */ 1847 */
1865 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1848 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1866 } 1849 }
1867 1850
1868 link_state_event(l_ptr, RESET_MSG); 1851 link_state_event(l_ptr, RESET_MSG);
@@ -1918,6 +1901,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1918 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1901 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1919 break; 1902 break;
1920 } 1903 }
1904
1905 /* Record reception; force mismatch at next timeout: */
1906 l_ptr->checkpoint--;
1907
1921 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1908 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1922 l_ptr->stats.recv_states++; 1909 l_ptr->stats.recv_states++;
1923 if (link_reset_unknown(l_ptr)) 1910 if (link_reset_unknown(l_ptr))
@@ -2177,9 +2164,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
2177 } 2164 }
2178 if (msg_user(msg) == MSG_FRAGMENTER) { 2165 if (msg_user(msg) == MSG_FRAGMENTER) {
2179 l_ptr->stats.recv_fragments++; 2166 l_ptr->stats.recv_fragments++;
2180 tipc_link_frag_rcv(&l_ptr->reasm_head, 2167 tipc_buf_append(&l_ptr->reasm_buf, &buf);
2181 &l_ptr->reasm_tail,
2182 &buf);
2183 } 2168 }
2184 } 2169 }
2185exit: 2170exit:
@@ -2317,53 +2302,6 @@ static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
2317 return dsz; 2302 return dsz;
2318} 2303}
2319 2304
2320/* tipc_link_frag_rcv(): Called with node lock on. Returns
2321 * the reassembled buffer if message is complete.
2322 */
2323int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
2324 struct sk_buff **fbuf)
2325{
2326 struct sk_buff *frag = *fbuf;
2327 struct tipc_msg *msg = buf_msg(frag);
2328 u32 fragid = msg_type(msg);
2329 bool headstolen;
2330 int delta;
2331
2332 skb_pull(frag, msg_hdr_sz(msg));
2333 if (fragid == FIRST_FRAGMENT) {
2334 if (*head || skb_unclone(frag, GFP_ATOMIC))
2335 goto out_free;
2336 *head = frag;
2337 skb_frag_list_init(*head);
2338 *fbuf = NULL;
2339 return 0;
2340 } else if (*head &&
2341 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
2342 kfree_skb_partial(frag, headstolen);
2343 } else {
2344 if (!*head)
2345 goto out_free;
2346 if (!skb_has_frag_list(*head))
2347 skb_shinfo(*head)->frag_list = frag;
2348 else
2349 (*tail)->next = frag;
2350 *tail = frag;
2351 (*head)->truesize += frag->truesize;
2352 }
2353 if (fragid == LAST_FRAGMENT) {
2354 *fbuf = *head;
2355 *tail = *head = NULL;
2356 return LINK_REASM_COMPLETE;
2357 }
2358 *fbuf = NULL;
2359 return 0;
2360out_free:
2361 pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
2362 kfree_skb(*fbuf);
2363 *fbuf = NULL;
2364 return LINK_REASM_ERROR;
2365}
2366
2367static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2305static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2368{ 2306{
2369 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2307 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2397,8 +2335,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2397/* tipc_link_find_owner - locate owner node of link by link's name 2335/* tipc_link_find_owner - locate owner node of link by link's name
2398 * @name: pointer to link name string 2336 * @name: pointer to link name string
2399 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2337 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2400 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2401 * this also prevents link deletion.
2402 * 2338 *
2403 * Returns pointer to node owning the link, or 0 if no matching link is found. 2339 * Returns pointer to node owning the link, or 0 if no matching link is found.
2404 */ 2340 */
@@ -2460,7 +2396,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2460 * @new_value: new value of link, bearer, or media setting 2396 * @new_value: new value of link, bearer, or media setting
2461 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2397 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2462 * 2398 *
2463 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2399 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2464 * 2400 *
2465 * Returns 0 if value updated and negative value on error. 2401 * Returns 0 if value updated and negative value on error.
2466 */ 2402 */
@@ -2566,9 +2502,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2566 " (cannot change setting on broadcast link)"); 2502 " (cannot change setting on broadcast link)");
2567 } 2503 }
2568 2504
2569 read_lock_bh(&tipc_net_lock);
2570 res = link_cmd_set_value(args->name, new_value, cmd); 2505 res = link_cmd_set_value(args->name, new_value, cmd);
2571 read_unlock_bh(&tipc_net_lock);
2572 if (res) 2506 if (res)
2573 return tipc_cfg_reply_error_string("cannot change link setting"); 2507 return tipc_cfg_reply_error_string("cannot change link setting");
2574 2508
@@ -2602,22 +2536,18 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2602 return tipc_cfg_reply_error_string("link not found"); 2536 return tipc_cfg_reply_error_string("link not found");
2603 return tipc_cfg_reply_none(); 2537 return tipc_cfg_reply_none();
2604 } 2538 }
2605 read_lock_bh(&tipc_net_lock);
2606 node = tipc_link_find_owner(link_name, &bearer_id); 2539 node = tipc_link_find_owner(link_name, &bearer_id);
2607 if (!node) { 2540 if (!node)
2608 read_unlock_bh(&tipc_net_lock);
2609 return tipc_cfg_reply_error_string("link not found"); 2541 return tipc_cfg_reply_error_string("link not found");
2610 } 2542
2611 tipc_node_lock(node); 2543 tipc_node_lock(node);
2612 l_ptr = node->links[bearer_id]; 2544 l_ptr = node->links[bearer_id];
2613 if (!l_ptr) { 2545 if (!l_ptr) {
2614 tipc_node_unlock(node); 2546 tipc_node_unlock(node);
2615 read_unlock_bh(&tipc_net_lock);
2616 return tipc_cfg_reply_error_string("link not found"); 2547 return tipc_cfg_reply_error_string("link not found");
2617 } 2548 }
2618 link_reset_statistics(l_ptr); 2549 link_reset_statistics(l_ptr);
2619 tipc_node_unlock(node); 2550 tipc_node_unlock(node);
2620 read_unlock_bh(&tipc_net_lock);
2621 return tipc_cfg_reply_none(); 2551 return tipc_cfg_reply_none();
2622} 2552}
2623 2553
@@ -2650,18 +2580,15 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2650 if (!strcmp(name, tipc_bclink_name)) 2580 if (!strcmp(name, tipc_bclink_name))
2651 return tipc_bclink_stats(buf, buf_size); 2581 return tipc_bclink_stats(buf, buf_size);
2652 2582
2653 read_lock_bh(&tipc_net_lock);
2654 node = tipc_link_find_owner(name, &bearer_id); 2583 node = tipc_link_find_owner(name, &bearer_id);
2655 if (!node) { 2584 if (!node)
2656 read_unlock_bh(&tipc_net_lock);
2657 return 0; 2585 return 0;
2658 } 2586
2659 tipc_node_lock(node); 2587 tipc_node_lock(node);
2660 2588
2661 l = node->links[bearer_id]; 2589 l = node->links[bearer_id];
2662 if (!l) { 2590 if (!l) {
2663 tipc_node_unlock(node); 2591 tipc_node_unlock(node);
2664 read_unlock_bh(&tipc_net_lock);
2665 return 0; 2592 return 0;
2666 } 2593 }
2667 2594
@@ -2727,7 +2654,6 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2727 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2654 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2728 2655
2729 tipc_node_unlock(node); 2656 tipc_node_unlock(node);
2730 read_unlock_bh(&tipc_net_lock);
2731 return ret; 2657 return ret;
2732} 2658}
2733 2659
@@ -2778,7 +2704,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2778 if (dest == tipc_own_addr) 2704 if (dest == tipc_own_addr)
2779 return MAX_MSG_SIZE; 2705 return MAX_MSG_SIZE;
2780 2706
2781 read_lock_bh(&tipc_net_lock);
2782 n_ptr = tipc_node_find(dest); 2707 n_ptr = tipc_node_find(dest);
2783 if (n_ptr) { 2708 if (n_ptr) {
2784 tipc_node_lock(n_ptr); 2709 tipc_node_lock(n_ptr);
@@ -2787,13 +2712,18 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2787 res = l_ptr->max_pkt; 2712 res = l_ptr->max_pkt;
2788 tipc_node_unlock(n_ptr); 2713 tipc_node_unlock(n_ptr);
2789 } 2714 }
2790 read_unlock_bh(&tipc_net_lock);
2791 return res; 2715 return res;
2792} 2716}
2793 2717
2794static void link_print(struct tipc_link *l_ptr, const char *str) 2718static void link_print(struct tipc_link *l_ptr, const char *str)
2795{ 2719{
2796 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2720 struct tipc_bearer *b_ptr;
2721
2722 rcu_read_lock();
2723 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
2724 if (b_ptr)
2725 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2726 rcu_read_unlock();
2797 2727
2798 if (link_working_unknown(l_ptr)) 2728 if (link_working_unknown(l_ptr))
2799 pr_cont(":WU\n"); 2729 pr_cont(":WU\n");
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 8c0b49b5b2ee..200d518b218e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -40,11 +40,6 @@
40#include "msg.h" 40#include "msg.h"
41#include "node.h" 41#include "node.h"
42 42
43/* Link reassembly status codes
44 */
45#define LINK_REASM_ERROR -1
46#define LINK_REASM_COMPLETE 1
47
48/* Out-of-range value for link sequence numbers 43/* Out-of-range value for link sequence numbers
49 */ 44 */
50#define INVALID_LINK_SEQ 0x10000 45#define INVALID_LINK_SEQ 0x10000
@@ -107,7 +102,7 @@ struct tipc_stats {
107 * @checkpoint: reference point for triggering link continuity checking 102 * @checkpoint: reference point for triggering link continuity checking
108 * @peer_session: link session # being used by peer end of link 103 * @peer_session: link session # being used by peer end of link
109 * @peer_bearer_id: bearer id used by link's peer endpoint 104 * @peer_bearer_id: bearer id used by link's peer endpoint
110 * @b_ptr: pointer to bearer used by link 105 * @bearer_id: local bearer id used by link
111 * @tolerance: minimum link continuity loss needed to reset link [in ms] 106 * @tolerance: minimum link continuity loss needed to reset link [in ms]
112 * @continuity_interval: link continuity testing interval [in ms] 107 * @continuity_interval: link continuity testing interval [in ms]
113 * @abort_limit: # of unacknowledged continuity probes needed to reset link 108 * @abort_limit: # of unacknowledged continuity probes needed to reset link
@@ -116,6 +111,7 @@ struct tipc_stats {
116 * @proto_msg: template for control messages generated by link 111 * @proto_msg: template for control messages generated by link
117 * @pmsg: convenience pointer to "proto_msg" field 112 * @pmsg: convenience pointer to "proto_msg" field
118 * @priority: current link priority 113 * @priority: current link priority
114 * @net_plane: current link network plane ('A' through 'H')
119 * @queue_limit: outbound message queue congestion thresholds (indexed by user) 115 * @queue_limit: outbound message queue congestion thresholds (indexed by user)
120 * @exp_msg_count: # of tunnelled messages expected during link changeover 116 * @exp_msg_count: # of tunnelled messages expected during link changeover
121 * @reset_checkpoint: seq # of last acknowledged message at time of link reset 117 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
@@ -139,8 +135,7 @@ struct tipc_stats {
139 * @next_out: ptr to first unsent outbound message in queue 135 * @next_out: ptr to first unsent outbound message in queue
140 * @waiting_ports: linked list of ports waiting for link congestion to abate 136 * @waiting_ports: linked list of ports waiting for link congestion to abate
141 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 137 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
142 * @reasm_head: list head of partially reassembled inbound message fragments 138 * @reasm_buf: head of partially reassembled inbound message fragments
143 * @reasm_tail: last fragment received
144 * @stats: collects statistics regarding link activity 139 * @stats: collects statistics regarding link activity
145 */ 140 */
146struct tipc_link { 141struct tipc_link {
@@ -155,7 +150,7 @@ struct tipc_link {
155 u32 checkpoint; 150 u32 checkpoint;
156 u32 peer_session; 151 u32 peer_session;
157 u32 peer_bearer_id; 152 u32 peer_bearer_id;
158 struct tipc_bearer *b_ptr; 153 u32 bearer_id;
159 u32 tolerance; 154 u32 tolerance;
160 u32 continuity_interval; 155 u32 continuity_interval;
161 u32 abort_limit; 156 u32 abort_limit;
@@ -167,6 +162,7 @@ struct tipc_link {
167 } proto_msg; 162 } proto_msg;
168 struct tipc_msg *pmsg; 163 struct tipc_msg *pmsg;
169 u32 priority; 164 u32 priority;
165 char net_plane;
170 u32 queue_limit[15]; /* queue_limit[0]==window limit */ 166 u32 queue_limit[15]; /* queue_limit[0]==window limit */
171 167
172 /* Changeover */ 168 /* Changeover */
@@ -202,8 +198,7 @@ struct tipc_link {
202 198
203 /* Fragmentation/reassembly */ 199 /* Fragmentation/reassembly */
204 u32 long_msg_seq_no; 200 u32 long_msg_seq_no;
205 struct sk_buff *reasm_head; 201 struct sk_buff *reasm_buf;
206 struct sk_buff *reasm_tail;
207 202
208 /* Statistics */ 203 /* Statistics */
209 struct tipc_stats stats; 204 struct tipc_stats stats;
@@ -228,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
228 int req_tlv_space); 223 int req_tlv_space);
229struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, 224struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
230 int req_tlv_space); 225 int req_tlv_space);
226void tipc_link_reset_all(struct tipc_node *node);
231void tipc_link_reset(struct tipc_link *l_ptr); 227void tipc_link_reset(struct tipc_link *l_ptr);
232void tipc_link_reset_list(unsigned int bearer_id); 228void tipc_link_reset_list(unsigned int bearer_id);
233int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); 229int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
@@ -239,9 +235,6 @@ int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
239 struct iovec const *msg_sect, 235 struct iovec const *msg_sect,
240 unsigned int len, u32 destnode); 236 unsigned int len, u32 destnode);
241void tipc_link_bundle_rcv(struct sk_buff *buf); 237void tipc_link_bundle_rcv(struct sk_buff *buf);
242int tipc_link_frag_rcv(struct sk_buff **reasm_head,
243 struct sk_buff **reasm_tail,
244 struct sk_buff **fbuf);
245void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 238void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
246 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 239 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
247void tipc_link_push_queue(struct tipc_link *l_ptr); 240void tipc_link_push_queue(struct tipc_link *l_ptr);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index e525f8ce1dee..8be6e94a1ca9 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/msg.c: TIPC message header routines 2 * net/tipc/msg.c: TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -99,3 +99,56 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
99 } 99 }
100 return dsz; 100 return dsz;
101} 101}
102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
106 * Leaves headbuf pointer at NULL if failure
107 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{
110 struct sk_buff *head = *headbuf;
111 struct sk_buff *frag = *buf;
112 struct sk_buff *tail;
113 struct tipc_msg *msg = buf_msg(frag);
114 u32 fragid = msg_type(msg);
115 bool headstolen;
116 int delta;
117
118 skb_pull(frag, msg_hdr_sz(msg));
119
120 if (fragid == FIRST_FRAGMENT) {
121 if (head || skb_unclone(frag, GFP_ATOMIC))
122 goto out_free;
123 head = *headbuf = frag;
124 skb_frag_list_init(head);
125 return 0;
126 }
127 if (!head)
128 goto out_free;
129 tail = TIPC_SKB_CB(head)->tail;
130 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
131 kfree_skb_partial(frag, headstolen);
132 } else {
133 if (!skb_has_frag_list(head))
134 skb_shinfo(head)->frag_list = frag;
135 else
136 tail->next = frag;
137 head->truesize += frag->truesize;
138 head->data_len += frag->len;
139 head->len += frag->len;
140 TIPC_SKB_CB(head)->tail = frag;
141 }
142 if (fragid == LAST_FRAGMENT) {
143 *buf = head;
144 TIPC_SKB_CB(head)->tail = NULL;
145 *headbuf = NULL;
146 return 1;
147 }
148 *buf = NULL;
149 return 0;
150out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf);
153 return 0;
154}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 76d1269b9443..503511903d1d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2007, Ericsson AB 4 * Copyright (c) 2000-2007, 2014, Ericsson AB
5 * Copyright (c) 2005-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -711,4 +711,7 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
711 u32 destnode); 711 u32 destnode);
712int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 712int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
713 unsigned int len, int max_size, struct sk_buff **buf); 713 unsigned int len, int max_size, struct sk_buff **buf);
714
715int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
716
714#endif 717#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index aff8041dc157..8ce730984aa1 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -38,34 +38,6 @@
38#include "link.h" 38#include "link.h"
39#include "name_distr.h" 39#include "name_distr.h"
40 40
41#define ITEM_SIZE sizeof(struct distr_item)
42
43/**
44 * struct distr_item - publication info distributed to other nodes
45 * @type: name sequence type
46 * @lower: name sequence lower bound
47 * @upper: name sequence upper bound
48 * @ref: publishing port reference
49 * @key: publication key
50 *
51 * ===> All fields are stored in network byte order. <===
52 *
53 * First 3 fields identify (name or) name sequence being published.
54 * Reference field uniquely identifies port that published name sequence.
55 * Key field uniquely identifies publication, in the event a port has
56 * multiple publications of the same name sequence.
57 *
58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message.
60 */
61struct distr_item {
62 __be32 type;
63 __be32 lower;
64 __be32 upper;
65 __be32 ref;
66 __be32 key;
67};
68
69/** 41/**
70 * struct publ_list - list of publications made by this node 42 * struct publ_list - list of publications made by this node
71 * @list: circular list of publications 43 * @list: circular list of publications
@@ -127,7 +99,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
127 return buf; 99 return buf;
128} 100}
129 101
130static void named_cluster_distribute(struct sk_buff *buf) 102void named_cluster_distribute(struct sk_buff *buf)
131{ 103{
132 struct sk_buff *buf_copy; 104 struct sk_buff *buf_copy;
133 struct tipc_node *n_ptr; 105 struct tipc_node *n_ptr;
@@ -135,18 +107,18 @@ static void named_cluster_distribute(struct sk_buff *buf)
135 107
136 rcu_read_lock(); 108 rcu_read_lock();
137 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 109 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
138 spin_lock_bh(&n_ptr->lock); 110 tipc_node_lock(n_ptr);
139 l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 111 l_ptr = n_ptr->active_links[n_ptr->addr & 1];
140 if (l_ptr) { 112 if (l_ptr) {
141 buf_copy = skb_copy(buf, GFP_ATOMIC); 113 buf_copy = skb_copy(buf, GFP_ATOMIC);
142 if (!buf_copy) { 114 if (!buf_copy) {
143 spin_unlock_bh(&n_ptr->lock); 115 tipc_node_unlock(n_ptr);
144 break; 116 break;
145 } 117 }
146 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); 118 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
147 __tipc_link_xmit(l_ptr, buf_copy); 119 __tipc_link_xmit(l_ptr, buf_copy);
148 } 120 }
149 spin_unlock_bh(&n_ptr->lock); 121 tipc_node_unlock(n_ptr);
150 } 122 }
151 rcu_read_unlock(); 123 rcu_read_unlock();
152 124
@@ -156,7 +128,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
156/** 128/**
157 * tipc_named_publish - tell other nodes about a new publication by this node 129 * tipc_named_publish - tell other nodes about a new publication by this node
158 */ 130 */
159void tipc_named_publish(struct publication *publ) 131struct sk_buff *tipc_named_publish(struct publication *publ)
160{ 132{
161 struct sk_buff *buf; 133 struct sk_buff *buf;
162 struct distr_item *item; 134 struct distr_item *item;
@@ -165,23 +137,23 @@ void tipc_named_publish(struct publication *publ)
165 publ_lists[publ->scope]->size++; 137 publ_lists[publ->scope]->size++;
166 138
167 if (publ->scope == TIPC_NODE_SCOPE) 139 if (publ->scope == TIPC_NODE_SCOPE)
168 return; 140 return NULL;
169 141
170 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 142 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
171 if (!buf) { 143 if (!buf) {
172 pr_warn("Publication distribution failure\n"); 144 pr_warn("Publication distribution failure\n");
173 return; 145 return NULL;
174 } 146 }
175 147
176 item = (struct distr_item *)msg_data(buf_msg(buf)); 148 item = (struct distr_item *)msg_data(buf_msg(buf));
177 publ_to_item(item, publ); 149 publ_to_item(item, publ);
178 named_cluster_distribute(buf); 150 return buf;
179} 151}
180 152
181/** 153/**
182 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 154 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
183 */ 155 */
184void tipc_named_withdraw(struct publication *publ) 156struct sk_buff *tipc_named_withdraw(struct publication *publ)
185{ 157{
186 struct sk_buff *buf; 158 struct sk_buff *buf;
187 struct distr_item *item; 159 struct distr_item *item;
@@ -190,17 +162,17 @@ void tipc_named_withdraw(struct publication *publ)
190 publ_lists[publ->scope]->size--; 162 publ_lists[publ->scope]->size--;
191 163
192 if (publ->scope == TIPC_NODE_SCOPE) 164 if (publ->scope == TIPC_NODE_SCOPE)
193 return; 165 return NULL;
194 166
195 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 167 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
196 if (!buf) { 168 if (!buf) {
197 pr_warn("Withdrawal distribution failure\n"); 169 pr_warn("Withdrawal distribution failure\n");
198 return; 170 return NULL;
199 } 171 }
200 172
201 item = (struct distr_item *)msg_data(buf_msg(buf)); 173 item = (struct distr_item *)msg_data(buf_msg(buf));
202 publ_to_item(item, publ); 174 publ_to_item(item, publ);
203 named_cluster_distribute(buf); 175 return buf;
204} 176}
205 177
206/* 178/*
@@ -239,31 +211,9 @@ static void named_distribute(struct list_head *message_list, u32 node,
239/** 211/**
240 * tipc_named_node_up - tell specified node about all publications by this node 212 * tipc_named_node_up - tell specified node about all publications by this node
241 */ 213 */
242void tipc_named_node_up(unsigned long nodearg) 214void tipc_named_node_up(u32 max_item_buf, u32 node)
243{ 215{
244 struct tipc_node *n_ptr; 216 LIST_HEAD(message_list);
245 struct tipc_link *l_ptr;
246 struct list_head message_list;
247 u32 node = (u32)nodearg;
248 u32 max_item_buf = 0;
249
250 /* compute maximum amount of publication data to send per message */
251 read_lock_bh(&tipc_net_lock);
252 n_ptr = tipc_node_find(node);
253 if (n_ptr) {
254 tipc_node_lock(n_ptr);
255 l_ptr = n_ptr->active_links[0];
256 if (l_ptr)
257 max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
258 ITEM_SIZE) * ITEM_SIZE;
259 tipc_node_unlock(n_ptr);
260 }
261 read_unlock_bh(&tipc_net_lock);
262 if (!max_item_buf)
263 return;
264
265 /* create list of publication messages, then send them as a unit */
266 INIT_LIST_HEAD(&message_list);
267 217
268 read_lock_bh(&tipc_nametbl_lock); 218 read_lock_bh(&tipc_nametbl_lock);
269 named_distribute(&message_list, node, &publ_cluster, max_item_buf); 219 named_distribute(&message_list, node, &publ_cluster, max_item_buf);
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 9b312ccfd43e..b2eed4ec1526 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -39,9 +39,38 @@
39 39
40#include "name_table.h" 40#include "name_table.h"
41 41
42void tipc_named_publish(struct publication *publ); 42#define ITEM_SIZE sizeof(struct distr_item)
43void tipc_named_withdraw(struct publication *publ); 43
44void tipc_named_node_up(unsigned long node); 44/**
45 * struct distr_item - publication info distributed to other nodes
46 * @type: name sequence type
47 * @lower: name sequence lower bound
48 * @upper: name sequence upper bound
49 * @ref: publishing port reference
50 * @key: publication key
51 *
52 * ===> All fields are stored in network byte order. <===
53 *
54 * First 3 fields identify (name or) name sequence being published.
55 * Reference field uniquely identifies port that published name sequence.
56 * Key field uniquely identifies publication, in the event a port has
57 * multiple publications of the same name sequence.
58 *
59 * Note: There is no field that identifies the publishing node because it is
60 * the same for all items contained within a publication message.
61 */
62struct distr_item {
63 __be32 type;
64 __be32 lower;
65 __be32 upper;
66 __be32 ref;
67 __be32 key;
68};
69
70struct sk_buff *tipc_named_publish(struct publication *publ);
71struct sk_buff *tipc_named_withdraw(struct publication *publ);
72void named_cluster_distribute(struct sk_buff *buf);
73void tipc_named_node_up(u32 max_item_buf, u32 node);
45void tipc_named_rcv(struct sk_buff *buf); 74void tipc_named_rcv(struct sk_buff *buf);
46void tipc_named_reinit(void); 75void tipc_named_reinit(void);
47 76
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 042e8e3cabc0..9d7d37d95187 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -664,6 +664,7 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
664 u32 scope, u32 port_ref, u32 key) 664 u32 scope, u32 port_ref, u32 key)
665{ 665{
666 struct publication *publ; 666 struct publication *publ;
667 struct sk_buff *buf = NULL;
667 668
668 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) { 669 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
669 pr_warn("Publication failed, local publication limit reached (%u)\n", 670 pr_warn("Publication failed, local publication limit reached (%u)\n",
@@ -676,9 +677,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
676 tipc_own_addr, port_ref, key); 677 tipc_own_addr, port_ref, key);
677 if (likely(publ)) { 678 if (likely(publ)) {
678 table.local_publ_count++; 679 table.local_publ_count++;
679 tipc_named_publish(publ); 680 buf = tipc_named_publish(publ);
680 } 681 }
681 write_unlock_bh(&tipc_nametbl_lock); 682 write_unlock_bh(&tipc_nametbl_lock);
683
684 if (buf)
685 named_cluster_distribute(buf);
682 return publ; 686 return publ;
683} 687}
684 688
@@ -688,15 +692,19 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
688int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 692int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
689{ 693{
690 struct publication *publ; 694 struct publication *publ;
695 struct sk_buff *buf;
691 696
692 write_lock_bh(&tipc_nametbl_lock); 697 write_lock_bh(&tipc_nametbl_lock);
693 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 698 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
694 if (likely(publ)) { 699 if (likely(publ)) {
695 table.local_publ_count--; 700 table.local_publ_count--;
696 tipc_named_withdraw(publ); 701 buf = tipc_named_withdraw(publ);
697 write_unlock_bh(&tipc_nametbl_lock); 702 write_unlock_bh(&tipc_nametbl_lock);
698 list_del_init(&publ->pport_list); 703 list_del_init(&publ->pport_list);
699 kfree(publ); 704 kfree(publ);
705
706 if (buf)
707 named_cluster_distribute(buf);
700 return 1; 708 return 1;
701 } 709 }
702 write_unlock_bh(&tipc_nametbl_lock); 710 write_unlock_bh(&tipc_nametbl_lock);
@@ -961,6 +969,7 @@ static void tipc_purge_publications(struct name_seq *seq)
961 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { 969 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
962 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, 970 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
963 publ->ref, publ->key); 971 publ->ref, publ->key);
972 kfree(publ);
964 } 973 }
965} 974}
966 975
@@ -982,7 +991,6 @@ void tipc_nametbl_stop(void)
982 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { 991 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
983 tipc_purge_publications(seq); 992 tipc_purge_publications(seq);
984 } 993 }
985 continue;
986 } 994 }
987 kfree(table.types); 995 kfree(table.types);
988 table.types = NULL; 996 table.types = NULL;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 4c564eb69e1a..f64375e7f99f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -39,45 +39,41 @@
39#include "name_distr.h" 39#include "name_distr.h"
40#include "subscr.h" 40#include "subscr.h"
41#include "port.h" 41#include "port.h"
42#include "socket.h"
42#include "node.h" 43#include "node.h"
43#include "config.h" 44#include "config.h"
44 45
45/* 46/*
46 * The TIPC locking policy is designed to ensure a very fine locking 47 * The TIPC locking policy is designed to ensure a very fine locking
47 * granularity, permitting complete parallel access to individual 48 * granularity, permitting complete parallel access to individual
48 * port and node/link instances. The code consists of three major 49 * port and node/link instances. The code consists of four major
49 * locking domains, each protected with their own disjunct set of locks. 50 * locking domains, each protected with their own disjunct set of locks.
50 * 51 *
51 * 1: The routing hierarchy. 52 * 1: The bearer level.
52 * Comprises the structures 'zone', 'cluster', 'node', 'link' 53 * RTNL lock is used to serialize the process of configuring bearer
53 * and 'bearer'. The whole hierarchy is protected by a big 54 * on update side, and RCU lock is applied on read side to make
54 * read/write lock, tipc_net_lock, to enssure that nothing is added 55 * bearer instance valid on both paths of message transmission and
55 * or removed while code is accessing any of these structures. 56 * reception.
56 * This layer must not be called from the two others while they
57 * hold any of their own locks.
58 * Neither must it itself do any upcalls to the other two before
59 * it has released tipc_net_lock and other protective locks.
60 * 57 *
61 * Within the tipc_net_lock domain there are two sub-domains;'node' and 58 * 2: The node and link level.
62 * 'bearer', where local write operations are permitted, 59 * All node instances are saved into two tipc_node_list and node_htable
63 * provided that those are protected by individual spin_locks 60 * lists. The two lists are protected by node_list_lock on write side,
64 * per instance. Code holding tipc_net_lock(read) and a node spin_lock 61 * and they are guarded with RCU lock on read side. Especially node
65 * is permitted to poke around in both the node itself and its 62 * instance is destroyed only when TIPC module is removed, and we can
66 * subordinate links. I.e, it can update link counters and queues, 63 * confirm that there has no any user who is accessing the node at the
67 * change link state, send protocol messages, and alter the 64 * moment. Therefore, Except for iterating the two lists within RCU
68 * "active_links" array in the node; but it can _not_ remove a link 65 * protection, it's no needed to hold RCU that we access node instance
69 * or a node from the overall structure. 66 * in other places.
70 * Correspondingly, individual bearers may change status within a
71 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
72 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
73 * 67 *
68 * In addition, all members in node structure including link instances
69 * are protected by node spin lock.
74 * 70 *
75 * 2: The transport level of the protocol. 71 * 3: The transport level of the protocol.
76 * This consists of the structures port, (and its user level 72 * This consists of the structures port, (and its user level
77 * representations, such as user_port and tipc_sock), reference and 73 * representations, such as user_port and tipc_sock), reference and
78 * tipc_user (port.c, reg.c, socket.c). 74 * tipc_user (port.c, reg.c, socket.c).
79 * 75 *
80 * This layer has four different locks: 76 * This layer has four different locks:
81 * - The tipc_port spin_lock. This is protecting each port instance 77 * - The tipc_port spin_lock. This is protecting each port instance
82 * from parallel data access and removal. Since we can not place 78 * from parallel data access and removal. Since we can not place
83 * this lock in the port itself, it has been placed in the 79 * this lock in the port itself, it has been placed in the
@@ -96,7 +92,7 @@
96 * There are two such lists; 'port_list', which is used for management, 92 * There are two such lists; 'port_list', which is used for management,
97 * and 'wait_list', which is used to queue ports during congestion. 93 * and 'wait_list', which is used to queue ports during congestion.
98 * 94 *
99 * 3: The name table (name_table.c, name_distr.c, subscription.c) 95 * 4: The name table (name_table.c, name_distr.c, subscription.c)
100 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 96 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
101 * overall name table structure. Nothing must be added/removed to 97 * overall name table structure. Nothing must be added/removed to
102 * this structure without holding write access to it. 98 * this structure without holding write access to it.
@@ -108,8 +104,6 @@
108 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
109*/ 105*/
110 106
111DEFINE_RWLOCK(tipc_net_lock);
112
113static void net_route_named_msg(struct sk_buff *buf) 107static void net_route_named_msg(struct sk_buff *buf)
114{ 108{
115 struct tipc_msg *msg = buf_msg(buf); 109 struct tipc_msg *msg = buf_msg(buf);
@@ -148,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
148 if (msg_mcast(msg)) 142 if (msg_mcast(msg))
149 tipc_port_mcast_rcv(buf, NULL); 143 tipc_port_mcast_rcv(buf, NULL);
150 else if (msg_destport(msg)) 144 else if (msg_destport(msg))
151 tipc_port_rcv(buf); 145 tipc_sk_rcv(buf);
152 else 146 else
153 net_route_named_msg(buf); 147 net_route_named_msg(buf);
154 return; 148 return;
@@ -171,22 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf)
171 tipc_link_xmit(buf, dnode, msg_link_selector(msg)); 165 tipc_link_xmit(buf, dnode, msg_link_selector(msg));
172} 166}
173 167
174void tipc_net_start(u32 addr) 168int tipc_net_start(u32 addr)
175{ 169{
176 char addr_string[16]; 170 char addr_string[16];
171 int res;
177 172
178 write_lock_bh(&tipc_net_lock);
179 tipc_own_addr = addr; 173 tipc_own_addr = addr;
180 tipc_named_reinit(); 174 tipc_named_reinit();
181 tipc_port_reinit(); 175 tipc_port_reinit();
182 tipc_bclink_init(); 176 res = tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock); 177 if (res)
178 return res;
184 179
185 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 180 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
186 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 181 TIPC_ZONE_SCOPE, 0, tipc_own_addr);
182
187 pr_info("Started in network mode\n"); 183 pr_info("Started in network mode\n");
188 pr_info("Own node address %s, network identity %u\n", 184 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 185 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
186 return 0;
190} 187}
191 188
192void tipc_net_stop(void) 189void tipc_net_stop(void)
@@ -195,11 +192,11 @@ void tipc_net_stop(void)
195 return; 192 return;
196 193
197 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 194 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
198 write_lock_bh(&tipc_net_lock); 195 rtnl_lock();
199 tipc_bearer_stop(); 196 tipc_bearer_stop();
200 tipc_bclink_stop(); 197 tipc_bclink_stop();
201 tipc_node_stop(); 198 tipc_node_stop();
202 write_unlock_bh(&tipc_net_lock); 199 rtnl_unlock();
203 200
204 pr_info("Left network mode\n"); 201 pr_info("Left network mode\n");
205} 202}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 079daadb3f72..c6c2b46f7c28 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -37,11 +37,9 @@
37#ifndef _TIPC_NET_H 37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H 38#define _TIPC_NET_H
39 39
40extern rwlock_t tipc_net_lock;
41
42void tipc_net_route_msg(struct sk_buff *buf); 40void tipc_net_route_msg(struct sk_buff *buf);
43 41
44void tipc_net_start(u32 addr); 42int tipc_net_start(u32 addr);
45void tipc_net_stop(void); 43void tipc_net_stop(void);
46 44
47#endif 45#endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 1d3a4999a70f..5b44c3041be4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -108,7 +108,7 @@ struct tipc_node *tipc_node_create(u32 addr)
108 break; 108 break;
109 } 109 }
110 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 110 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
111 n_ptr->block_setup = WAIT_PEER_DOWN; 111 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
112 n_ptr->signature = INVALID_NODE_SIG; 112 n_ptr->signature = INVALID_NODE_SIG;
113 113
114 tipc_num_nodes++; 114 tipc_num_nodes++;
@@ -144,11 +144,13 @@ void tipc_node_stop(void)
144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
145{ 145{
146 struct tipc_link **active = &n_ptr->active_links[0]; 146 struct tipc_link **active = &n_ptr->active_links[0];
147 u32 addr = n_ptr->addr;
147 148
148 n_ptr->working_links++; 149 n_ptr->working_links++;
149 150 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
151 l_ptr->bearer_id, addr);
150 pr_info("Established link <%s> on network plane %c\n", 152 pr_info("Established link <%s> on network plane %c\n",
151 l_ptr->name, l_ptr->b_ptr->net_plane); 153 l_ptr->name, l_ptr->net_plane);
152 154
153 if (!active[0]) { 155 if (!active[0]) {
154 active[0] = active[1] = l_ptr; 156 active[0] = active[1] = l_ptr;
@@ -203,16 +205,18 @@ static void node_select_active_links(struct tipc_node *n_ptr)
203void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 205void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
204{ 206{
205 struct tipc_link **active; 207 struct tipc_link **active;
208 u32 addr = n_ptr->addr;
206 209
207 n_ptr->working_links--; 210 n_ptr->working_links--;
211 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
208 212
209 if (!tipc_link_is_active(l_ptr)) { 213 if (!tipc_link_is_active(l_ptr)) {
210 pr_info("Lost standby link <%s> on network plane %c\n", 214 pr_info("Lost standby link <%s> on network plane %c\n",
211 l_ptr->name, l_ptr->b_ptr->net_plane); 215 l_ptr->name, l_ptr->net_plane);
212 return; 216 return;
213 } 217 }
214 pr_info("Lost link <%s> on network plane %c\n", 218 pr_info("Lost link <%s> on network plane %c\n",
215 l_ptr->name, l_ptr->b_ptr->net_plane); 219 l_ptr->name, l_ptr->net_plane);
216 220
217 active = &n_ptr->active_links[0]; 221 active = &n_ptr->active_links[0];
218 if (active[0] == l_ptr) 222 if (active[0] == l_ptr)
@@ -239,7 +243,7 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
239 243
240void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 244void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
241{ 245{
242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 246 n_ptr->links[l_ptr->bearer_id] = l_ptr;
243 spin_lock_bh(&node_list_lock); 247 spin_lock_bh(&node_list_lock);
244 tipc_num_links++; 248 tipc_num_links++;
245 spin_unlock_bh(&node_list_lock); 249 spin_unlock_bh(&node_list_lock);
@@ -263,26 +267,12 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
263 267
264static void node_established_contact(struct tipc_node *n_ptr) 268static void node_established_contact(struct tipc_node *n_ptr)
265{ 269{
266 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 270 n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
267 n_ptr->bclink.oos_state = 0; 271 n_ptr->bclink.oos_state = 0;
268 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 272 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
269 tipc_bclink_add_node(n_ptr->addr); 273 tipc_bclink_add_node(n_ptr->addr);
270} 274}
271 275
272static void node_name_purge_complete(unsigned long node_addr)
273{
274 struct tipc_node *n_ptr;
275
276 read_lock_bh(&tipc_net_lock);
277 n_ptr = tipc_node_find(node_addr);
278 if (n_ptr) {
279 tipc_node_lock(n_ptr);
280 n_ptr->block_setup &= ~WAIT_NAMES_GONE;
281 tipc_node_unlock(n_ptr);
282 }
283 read_unlock_bh(&tipc_net_lock);
284}
285
286static void node_lost_contact(struct tipc_node *n_ptr) 276static void node_lost_contact(struct tipc_node *n_ptr)
287{ 277{
288 char addr_string[16]; 278 char addr_string[16];
@@ -296,10 +286,9 @@ static void node_lost_contact(struct tipc_node *n_ptr)
296 kfree_skb_list(n_ptr->bclink.deferred_head); 286 kfree_skb_list(n_ptr->bclink.deferred_head);
297 n_ptr->bclink.deferred_size = 0; 287 n_ptr->bclink.deferred_size = 0;
298 288
299 if (n_ptr->bclink.reasm_head) { 289 if (n_ptr->bclink.reasm_buf) {
300 kfree_skb(n_ptr->bclink.reasm_head); 290 kfree_skb(n_ptr->bclink.reasm_buf);
301 n_ptr->bclink.reasm_head = NULL; 291 n_ptr->bclink.reasm_buf = NULL;
302 n_ptr->bclink.reasm_tail = NULL;
303 } 292 }
304 293
305 tipc_bclink_remove_node(n_ptr->addr); 294 tipc_bclink_remove_node(n_ptr->addr);
@@ -318,12 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
318 tipc_link_reset_fragments(l_ptr); 307 tipc_link_reset_fragments(l_ptr);
319 } 308 }
320 309
321 /* Notify subscribers */ 310 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
322 tipc_nodesub_notify(n_ptr);
323 311
324 /* Prevent re-contact with node until cleanup is done */ 312 /* Notify subscribers and prevent re-contact with node until
325 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 313 * cleanup is done.
326 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 314 */
315 n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
316 TIPC_NOTIFY_NODE_DOWN;
327} 317}
328 318
329struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 319struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
@@ -436,3 +426,63 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
436 rcu_read_unlock(); 426 rcu_read_unlock();
437 return buf; 427 return buf;
438} 428}
429
430/**
431 * tipc_node_get_linkname - get the name of a link
432 *
433 * @bearer_id: id of the bearer
434 * @node: peer node address
435 * @linkname: link name output buffer
436 *
437 * Returns 0 on success
438 */
439int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
440{
441 struct tipc_link *link;
442 struct tipc_node *node = tipc_node_find(addr);
443
444 if ((bearer_id >= MAX_BEARERS) || !node)
445 return -EINVAL;
446 tipc_node_lock(node);
447 link = node->links[bearer_id];
448 if (link) {
449 strncpy(linkname, link->name, len);
450 tipc_node_unlock(node);
451 return 0;
452 }
453 tipc_node_unlock(node);
454 return -EINVAL;
455}
456
457void tipc_node_unlock(struct tipc_node *node)
458{
459 LIST_HEAD(nsub_list);
460 struct tipc_link *link;
461 int pkt_sz = 0;
462 u32 addr = 0;
463
464 if (likely(!node->action_flags)) {
465 spin_unlock_bh(&node->lock);
466 return;
467 }
468
469 if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
470 list_replace_init(&node->nsub, &nsub_list);
471 node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
472 }
473 if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
474 link = node->active_links[0];
475 node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
476 if (link) {
477 pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
478 ITEM_SIZE;
479 addr = node->addr;
480 }
481 }
482 spin_unlock_bh(&node->lock);
483
484 if (!list_empty(&nsub_list))
485 tipc_nodesub_notify(&nsub_list);
486 if (pkt_sz)
487 tipc_named_node_up(pkt_sz, addr);
488}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 7cbb8cec1a93..9087063793f2 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -47,62 +47,73 @@
47 */ 47 */
48#define INVALID_NODE_SIG 0x10000 48#define INVALID_NODE_SIG 0x10000
49 49
50/* Flags used to block (re)establishment of contact with a neighboring node */ 50/* Flags used to take different actions according to flag type
51#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ 51 * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
52#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ 52 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
53#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 */
56enum {
57 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
58 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
59 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
60 TIPC_NOTIFY_NODE_UP = (1 << 4)
61};
62
63/**
64 * struct tipc_node_bclink - TIPC node bclink structure
65 * @acked: sequence # of last outbound b'cast message acknowledged by node
66 * @last_in: sequence # of last in-sequence b'cast message received from node
67 * @last_sent: sequence # of last b'cast message sent by node
68 * @oos_state: state tracker for handling OOS b'cast messages
69 * @deferred_size: number of OOS b'cast messages in deferred queue
70 * @deferred_head: oldest OOS b'cast message received from node
71 * @deferred_tail: newest OOS b'cast message received from node
72 * @reasm_buf: broadcast reassembly queue head from node
73 * @recv_permitted: true if node is allowed to receive b'cast messages
74 */
75struct tipc_node_bclink {
76 u32 acked;
77 u32 last_in;
78 u32 last_sent;
79 u32 oos_state;
80 u32 deferred_size;
81 struct sk_buff *deferred_head;
82 struct sk_buff *deferred_tail;
83 struct sk_buff *reasm_buf;
84 bool recv_permitted;
85};
54 86
55/** 87/**
56 * struct tipc_node - TIPC node structure 88 * struct tipc_node - TIPC node structure
57 * @addr: network address of node 89 * @addr: network address of node
58 * @lock: spinlock governing access to structure 90 * @lock: spinlock governing access to structure
59 * @hash: links to adjacent nodes in unsorted hash chain 91 * @hash: links to adjacent nodes in unsorted hash chain
60 * @list: links to adjacent nodes in sorted list of cluster's nodes
61 * @nsub: list of "node down" subscriptions monitoring node
62 * @active_links: pointers to active links to node 92 * @active_links: pointers to active links to node
63 * @links: pointers to all links to node 93 * @links: pointers to all links to node
94 * @action_flags: bit mask of different types of node actions
95 * @bclink: broadcast-related info
96 * @list: links to adjacent nodes in sorted list of cluster's nodes
64 * @working_links: number of working links to node (both active and standby) 97 * @working_links: number of working links to node (both active and standby)
65 * @block_setup: bit mask of conditions preventing link establishment to node
66 * @link_cnt: number of links to node 98 * @link_cnt: number of links to node
67 * @signature: node instance identifier 99 * @signature: node instance identifier
68 * @bclink: broadcast-related info 100 * @nsub: list of "node down" subscriptions monitoring node
69 * @rcu: rcu struct for tipc_node 101 * @rcu: rcu struct for tipc_node
70 * @acked: sequence # of last outbound b'cast message acknowledged by node
71 * @last_in: sequence # of last in-sequence b'cast message received from node
72 * @last_sent: sequence # of last b'cast message sent by node
73 * @oos_state: state tracker for handling OOS b'cast messages
74 * @deferred_size: number of OOS b'cast messages in deferred queue
75 * @deferred_head: oldest OOS b'cast message received from node
76 * @deferred_tail: newest OOS b'cast message received from node
77 * @reasm_head: broadcast reassembly queue head from node
78 * @reasm_tail: last broadcast fragment received from node
79 * @recv_permitted: true if node is allowed to receive b'cast messages
80 */ 102 */
81struct tipc_node { 103struct tipc_node {
82 u32 addr; 104 u32 addr;
83 spinlock_t lock; 105 spinlock_t lock;
84 struct hlist_node hash; 106 struct hlist_node hash;
85 struct list_head list;
86 struct list_head nsub;
87 struct tipc_link *active_links[2]; 107 struct tipc_link *active_links[2];
88 struct tipc_link *links[MAX_BEARERS]; 108 struct tipc_link *links[MAX_BEARERS];
109 unsigned int action_flags;
110 struct tipc_node_bclink bclink;
111 struct list_head list;
89 int link_cnt; 112 int link_cnt;
90 int working_links; 113 int working_links;
91 int block_setup;
92 u32 signature; 114 u32 signature;
115 struct list_head nsub;
93 struct rcu_head rcu; 116 struct rcu_head rcu;
94 struct {
95 u32 acked;
96 u32 last_in;
97 u32 last_sent;
98 u32 oos_state;
99 u32 deferred_size;
100 struct sk_buff *deferred_head;
101 struct sk_buff *deferred_tail;
102 struct sk_buff *reasm_head;
103 struct sk_buff *reasm_tail;
104 bool recv_permitted;
105 } bclink;
106}; 117};
107 118
108extern struct list_head tipc_node_list; 119extern struct list_head tipc_node_list;
@@ -118,15 +129,18 @@ int tipc_node_active_links(struct tipc_node *n_ptr);
118int tipc_node_is_up(struct tipc_node *n_ptr); 129int tipc_node_is_up(struct tipc_node *n_ptr);
119struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); 130struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
120struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 131struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
132int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
133void tipc_node_unlock(struct tipc_node *node);
121 134
122static inline void tipc_node_lock(struct tipc_node *n_ptr) 135static inline void tipc_node_lock(struct tipc_node *node)
123{ 136{
124 spin_lock_bh(&n_ptr->lock); 137 spin_lock_bh(&node->lock);
125} 138}
126 139
127static inline void tipc_node_unlock(struct tipc_node *n_ptr) 140static inline bool tipc_node_blocked(struct tipc_node *node)
128{ 141{
129 spin_unlock_bh(&n_ptr->lock); 142 return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
143 TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
130} 144}
131 145
132#endif 146#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 8a7384c04add..7c59ab1d6ecb 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -81,14 +81,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
81 * 81 *
82 * Note: node is locked by caller 82 * Note: node is locked by caller
83 */ 83 */
84void tipc_nodesub_notify(struct tipc_node *node) 84void tipc_nodesub_notify(struct list_head *nsub_list)
85{ 85{
86 struct tipc_node_subscr *ns; 86 struct tipc_node_subscr *ns, *safe;
87 87
88 list_for_each_entry(ns, &node->nsub, nodesub_list) { 88 list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
89 if (ns->handle_node_down) { 89 if (ns->handle_node_down) {
90 tipc_k_signal((Handler)ns->handle_node_down, 90 ns->handle_node_down(ns->usr_handle);
91 (unsigned long)ns->usr_handle);
92 ns->handle_node_down = NULL; 91 ns->handle_node_down = NULL;
93 } 92 }
94 } 93 }
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index c95d20727ded..d91b8cc81e3d 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -58,6 +58,6 @@ struct tipc_node_subscr {
58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, 58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
59 void *usr_handle, net_ev_handler handle_down); 59 void *usr_handle, net_ev_handler handle_down);
60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub); 60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
61void tipc_nodesub_notify(struct tipc_node *node); 61void tipc_nodesub_notify(struct list_head *nsub_list);
62 62
63#endif 63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 5c14c7801ee6..5fd7acce01ea 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -165,7 +165,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
165 msg_set_destnode(msg, tipc_own_addr); 165 msg_set_destnode(msg, tipc_own_addr);
166 if (dp->count == 1) { 166 if (dp->count == 1) {
167 msg_set_destport(msg, dp->ports[0]); 167 msg_set_destport(msg, dp->ports[0]);
168 tipc_port_rcv(buf); 168 tipc_sk_rcv(buf);
169 tipc_port_list_free(dp); 169 tipc_port_list_free(dp);
170 return; 170 return;
171 } 171 }
@@ -180,7 +180,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
180 if ((index == 0) && (cnt != 0)) 180 if ((index == 0) && (cnt != 0))
181 item = item->next; 181 item = item->next;
182 msg_set_destport(buf_msg(b), item->ports[index]); 182 msg_set_destport(buf_msg(b), item->ports[index]);
183 tipc_port_rcv(b); 183 tipc_sk_rcv(b);
184 } 184 }
185 } 185 }
186exit: 186exit:
@@ -343,7 +343,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
343 /* send returned message & dispose of rejected message */ 343 /* send returned message & dispose of rejected message */
344 src_node = msg_prevnode(msg); 344 src_node = msg_prevnode(msg);
345 if (in_own_node(src_node)) 345 if (in_own_node(src_node))
346 tipc_port_rcv(rbuf); 346 tipc_sk_rcv(rbuf);
347 else 347 else
348 tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg)); 348 tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
349exit: 349exit:
@@ -754,37 +754,6 @@ int tipc_port_shutdown(u32 ref)
754 return tipc_port_disconnect(ref); 754 return tipc_port_disconnect(ref);
755} 755}
756 756
757/**
758 * tipc_port_rcv - receive message from lower layer and deliver to port user
759 */
760int tipc_port_rcv(struct sk_buff *buf)
761{
762 struct tipc_port *p_ptr;
763 struct tipc_msg *msg = buf_msg(buf);
764 u32 destport = msg_destport(msg);
765 u32 dsz = msg_data_sz(msg);
766 u32 err;
767
768 /* forward unresolved named message */
769 if (unlikely(!destport)) {
770 tipc_net_route_msg(buf);
771 return dsz;
772 }
773
774 /* validate destination & pass to port, otherwise reject message */
775 p_ptr = tipc_port_lock(destport);
776 if (likely(p_ptr)) {
777 err = tipc_sk_rcv(&tipc_port_to_sock(p_ptr)->sk, buf);
778 tipc_port_unlock(p_ptr);
779 if (likely(!err))
780 return dsz;
781 } else {
782 err = TIPC_ERR_NO_PORT;
783 }
784
785 return tipc_reject_msg(buf, err);
786}
787
788/* 757/*
789 * tipc_port_iovec_rcv: Concatenate and deliver sectioned 758 * tipc_port_iovec_rcv: Concatenate and deliver sectioned
790 * message for this node. 759 * message for this node.
@@ -798,7 +767,7 @@ static int tipc_port_iovec_rcv(struct tipc_port *sender,
798 767
799 res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf); 768 res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
800 if (likely(buf)) 769 if (likely(buf))
801 tipc_port_rcv(buf); 770 tipc_sk_rcv(buf);
802 return res; 771 return res;
803} 772}
804 773
diff --git a/net/tipc/port.h b/net/tipc/port.h
index a00397393bd1..cf4ca5b1d9a4 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -42,9 +42,10 @@
42#include "msg.h" 42#include "msg.h"
43#include "node_subscr.h" 43#include "node_subscr.h"
44 44
45#define TIPC_FLOW_CONTROL_WIN 512 45#define TIPC_CONNACK_INTV 256
46#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \ 46#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
47 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) 47#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
48 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
48 49
49/** 50/**
50 * struct tipc_port - TIPC port structure 51 * struct tipc_port - TIPC port structure
@@ -134,7 +135,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
134/* 135/*
135 * TIPC messaging routines 136 * TIPC messaging routines
136 */ 137 */
137int tipc_port_rcv(struct sk_buff *buf);
138 138
139int tipc_send(struct tipc_port *port, 139int tipc_send(struct tipc_port *port,
140 struct iovec const *msg_sect, 140 struct iovec const *msg_sect,
@@ -187,7 +187,7 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
187 187
188static inline int tipc_port_congested(struct tipc_port *p_ptr) 188static inline int tipc_port_congested(struct tipc_port *p_ptr)
189{ 189{
190 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); 190 return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
191} 191}
192 192
193 193
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3c0256962f7d..ef0475568f9e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -36,6 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "port.h" 38#include "port.h"
39#include "node.h"
39 40
40#include <linux/export.h> 41#include <linux/export.h>
41 42
@@ -44,7 +45,7 @@
44 45
45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 46#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
46 47
47static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 48static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
48static void tipc_data_ready(struct sock *sk); 49static void tipc_data_ready(struct sock *sk);
49static void tipc_write_space(struct sock *sk); 50static void tipc_write_space(struct sock *sk);
50static int tipc_release(struct socket *sock); 51static int tipc_release(struct socket *sock);
@@ -195,11 +196,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
195 sock->state = state; 196 sock->state = state;
196 197
197 sock_init_data(sock, sk); 198 sock_init_data(sock, sk);
198 sk->sk_backlog_rcv = backlog_rcv; 199 sk->sk_backlog_rcv = tipc_backlog_rcv;
199 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 200 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
200 sk->sk_data_ready = tipc_data_ready; 201 sk->sk_data_ready = tipc_data_ready;
201 sk->sk_write_space = tipc_write_space; 202 sk->sk_write_space = tipc_write_space;
202 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; 203 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
204 atomic_set(&tsk->dupl_rcvcnt, 0);
203 tipc_port_unlock(port); 205 tipc_port_unlock(port);
204 206
205 if (sock->state == SS_READY) { 207 if (sock->state == SS_READY) {
@@ -983,10 +985,11 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
983 return 0; 985 return 0;
984} 986}
985 987
986static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo) 988static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
987{ 989{
988 struct sock *sk = sock->sk; 990 struct sock *sk = sock->sk;
989 DEFINE_WAIT(wait); 991 DEFINE_WAIT(wait);
992 long timeo = *timeop;
990 int err; 993 int err;
991 994
992 for (;;) { 995 for (;;) {
@@ -1011,6 +1014,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
1011 break; 1014 break;
1012 } 1015 }
1013 finish_wait(sk_sleep(sk), &wait); 1016 finish_wait(sk_sleep(sk), &wait);
1017 *timeop = timeo;
1014 return err; 1018 return err;
1015} 1019}
1016 1020
@@ -1054,7 +1058,7 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1054restart: 1058restart:
1055 1059
1056 /* Look for a message in receive queue; wait if necessary */ 1060 /* Look for a message in receive queue; wait if necessary */
1057 res = tipc_wait_for_rcvmsg(sock, timeo); 1061 res = tipc_wait_for_rcvmsg(sock, &timeo);
1058 if (res) 1062 if (res)
1059 goto exit; 1063 goto exit;
1060 1064
@@ -1100,7 +1104,7 @@ restart:
1100 /* Consume received message (optional) */ 1104 /* Consume received message (optional) */
1101 if (likely(!(flags & MSG_PEEK))) { 1105 if (likely(!(flags & MSG_PEEK))) {
1102 if ((sock->state != SS_READY) && 1106 if ((sock->state != SS_READY) &&
1103 (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1107 (++port->conn_unacked >= TIPC_CONNACK_INTV))
1104 tipc_acknowledge(port->ref, port->conn_unacked); 1108 tipc_acknowledge(port->ref, port->conn_unacked);
1105 advance_rx_queue(sk); 1109 advance_rx_queue(sk);
1106 } 1110 }
@@ -1152,7 +1156,7 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1152 1156
1153restart: 1157restart:
1154 /* Look for a message in receive queue; wait if necessary */ 1158 /* Look for a message in receive queue; wait if necessary */
1155 res = tipc_wait_for_rcvmsg(sock, timeo); 1159 res = tipc_wait_for_rcvmsg(sock, &timeo);
1156 if (res) 1160 if (res)
1157 goto exit; 1161 goto exit;
1158 1162
@@ -1209,7 +1213,7 @@ restart:
1209 1213
1210 /* Consume received message (optional) */ 1214 /* Consume received message (optional) */
1211 if (likely(!(flags & MSG_PEEK))) { 1215 if (likely(!(flags & MSG_PEEK))) {
1212 if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1216 if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
1213 tipc_acknowledge(port->ref, port->conn_unacked); 1217 tipc_acknowledge(port->ref, port->conn_unacked);
1214 advance_rx_queue(sk); 1218 advance_rx_queue(sk);
1215 } 1219 }
@@ -1415,7 +1419,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1415} 1419}
1416 1420
1417/** 1421/**
1418 * backlog_rcv - handle incoming message from backlog queue 1422 * tipc_backlog_rcv - handle incoming message from backlog queue
1419 * @sk: socket 1423 * @sk: socket
1420 * @buf: message 1424 * @buf: message
1421 * 1425 *
@@ -1423,47 +1427,74 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1423 * 1427 *
1424 * Returns 0 1428 * Returns 0
1425 */ 1429 */
1426static int backlog_rcv(struct sock *sk, struct sk_buff *buf) 1430static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
1427{ 1431{
1428 u32 res; 1432 u32 res;
1433 struct tipc_sock *tsk = tipc_sk(sk);
1434 uint truesize = buf->truesize;
1429 1435
1430 res = filter_rcv(sk, buf); 1436 res = filter_rcv(sk, buf);
1431 if (res) 1437 if (unlikely(res))
1432 tipc_reject_msg(buf, res); 1438 tipc_reject_msg(buf, res);
1439
1440 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1441 atomic_add(truesize, &tsk->dupl_rcvcnt);
1442
1433 return 0; 1443 return 0;
1434} 1444}
1435 1445
1436/** 1446/**
1437 * tipc_sk_rcv - handle incoming message 1447 * tipc_sk_rcv - handle incoming message
1438 * @sk: socket receiving message 1448 * @buf: buffer containing arriving message
1439 * @buf: message 1449 * Consumes buffer
1440 * 1450 * Returns 0 if success, or errno: -EHOSTUNREACH
1441 * Called with port lock already taken.
1442 *
1443 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1444 */ 1451 */
1445u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf) 1452int tipc_sk_rcv(struct sk_buff *buf)
1446{ 1453{
1447 u32 res; 1454 struct tipc_sock *tsk;
1455 struct tipc_port *port;
1456 struct sock *sk;
1457 u32 dport = msg_destport(buf_msg(buf));
1458 int err = TIPC_OK;
1459 uint limit;
1448 1460
1449 /* 1461 /* Forward unresolved named message */
1450 * Process message if socket is unlocked; otherwise add to backlog queue 1462 if (unlikely(!dport)) {
1451 * 1463 tipc_net_route_msg(buf);
1452 * This code is based on sk_receive_skb(), but must be distinct from it 1464 return 0;
1453 * since a TIPC-specific filter/reject mechanism is utilized 1465 }
1454 */ 1466
1467 /* Validate destination */
1468 port = tipc_port_lock(dport);
1469 if (unlikely(!port)) {
1470 err = TIPC_ERR_NO_PORT;
1471 goto exit;
1472 }
1473
1474 tsk = tipc_port_to_sock(port);
1475 sk = &tsk->sk;
1476
1477 /* Queue message */
1455 bh_lock_sock(sk); 1478 bh_lock_sock(sk);
1479
1456 if (!sock_owned_by_user(sk)) { 1480 if (!sock_owned_by_user(sk)) {
1457 res = filter_rcv(sk, buf); 1481 err = filter_rcv(sk, buf);
1458 } else { 1482 } else {
1459 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf))) 1483 if (sk->sk_backlog.len == 0)
1460 res = TIPC_ERR_OVERLOAD; 1484 atomic_set(&tsk->dupl_rcvcnt, 0);
1461 else 1485 limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
1462 res = TIPC_OK; 1486 if (sk_add_backlog(sk, buf, limit))
1487 err = TIPC_ERR_OVERLOAD;
1463 } 1488 }
1489
1464 bh_unlock_sock(sk); 1490 bh_unlock_sock(sk);
1491 tipc_port_unlock(port);
1465 1492
1466 return res; 1493 if (likely(!err))
1494 return 0;
1495exit:
1496 tipc_reject_msg(buf, err);
1497 return -EHOSTUNREACH;
1467} 1498}
1468 1499
1469static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 1500static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1905,6 +1936,28 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
1905 return put_user(sizeof(value), ol); 1936 return put_user(sizeof(value), ol);
1906} 1937}
1907 1938
1939int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
1940{
1941 struct tipc_sioc_ln_req lnr;
1942 void __user *argp = (void __user *)arg;
1943
1944 switch (cmd) {
1945 case SIOCGETLINKNAME:
1946 if (copy_from_user(&lnr, argp, sizeof(lnr)))
1947 return -EFAULT;
1948 if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
1949 lnr.linkname, TIPC_MAX_LINK_NAME)) {
1950 if (copy_to_user(argp, &lnr, sizeof(lnr)))
1951 return -EFAULT;
1952 return 0;
1953 }
1954 return -EADDRNOTAVAIL;
1955 break;
1956 default:
1957 return -ENOIOCTLCMD;
1958 }
1959}
1960
1908/* Protocol switches for the various types of TIPC sockets */ 1961/* Protocol switches for the various types of TIPC sockets */
1909 1962
1910static const struct proto_ops msg_ops = { 1963static const struct proto_ops msg_ops = {
@@ -1917,7 +1970,7 @@ static const struct proto_ops msg_ops = {
1917 .accept = sock_no_accept, 1970 .accept = sock_no_accept,
1918 .getname = tipc_getname, 1971 .getname = tipc_getname,
1919 .poll = tipc_poll, 1972 .poll = tipc_poll,
1920 .ioctl = sock_no_ioctl, 1973 .ioctl = tipc_ioctl,
1921 .listen = sock_no_listen, 1974 .listen = sock_no_listen,
1922 .shutdown = tipc_shutdown, 1975 .shutdown = tipc_shutdown,
1923 .setsockopt = tipc_setsockopt, 1976 .setsockopt = tipc_setsockopt,
@@ -1938,7 +1991,7 @@ static const struct proto_ops packet_ops = {
1938 .accept = tipc_accept, 1991 .accept = tipc_accept,
1939 .getname = tipc_getname, 1992 .getname = tipc_getname,
1940 .poll = tipc_poll, 1993 .poll = tipc_poll,
1941 .ioctl = sock_no_ioctl, 1994 .ioctl = tipc_ioctl,
1942 .listen = tipc_listen, 1995 .listen = tipc_listen,
1943 .shutdown = tipc_shutdown, 1996 .shutdown = tipc_shutdown,
1944 .setsockopt = tipc_setsockopt, 1997 .setsockopt = tipc_setsockopt,
@@ -1959,7 +2012,7 @@ static const struct proto_ops stream_ops = {
1959 .accept = tipc_accept, 2012 .accept = tipc_accept,
1960 .getname = tipc_getname, 2013 .getname = tipc_getname,
1961 .poll = tipc_poll, 2014 .poll = tipc_poll,
1962 .ioctl = sock_no_ioctl, 2015 .ioctl = tipc_ioctl,
1963 .listen = tipc_listen, 2016 .listen = tipc_listen,
1964 .shutdown = tipc_shutdown, 2017 .shutdown = tipc_shutdown,
1965 .setsockopt = tipc_setsockopt, 2018 .setsockopt = tipc_setsockopt,
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 74e5c7f195a6..3afcd2a70b31 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,12 +44,14 @@
44 * @port: port - interacts with 'sk' and with the rest of the TIPC stack 44 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
45 * @peer_name: the peer of the connection, if any 45 * @peer_name: the peer of the connection, if any
46 * @conn_timeout: the time we can wait for an unresponded setup request 46 * @conn_timeout: the time we can wait for an unresponded setup request
47 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
47 */ 48 */
48 49
49struct tipc_sock { 50struct tipc_sock {
50 struct sock sk; 51 struct sock sk;
51 struct tipc_port port; 52 struct tipc_port port;
52 unsigned int conn_timeout; 53 unsigned int conn_timeout;
54 atomic_t dupl_rcvcnt;
53}; 55};
54 56
55static inline struct tipc_sock *tipc_sk(const struct sock *sk) 57static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -67,6 +69,6 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
67 tsk->sk.sk_write_space(&tsk->sk); 69 tsk->sk.sk_write_space(&tsk->sk);
68} 70}
69 71
70u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf); 72int tipc_sk_rcv(struct sk_buff *buf);
71 73
72#endif 74#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index bb7e8ba821f4..7b9114e0a5b1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1492,10 +1492,14 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1492 if (len > sk->sk_sndbuf - 32) 1492 if (len > sk->sk_sndbuf - 32)
1493 goto out; 1493 goto out;
1494 1494
1495 if (len > SKB_MAX_ALLOC) 1495 if (len > SKB_MAX_ALLOC) {
1496 data_len = min_t(size_t, 1496 data_len = min_t(size_t,
1497 len - SKB_MAX_ALLOC, 1497 len - SKB_MAX_ALLOC,
1498 MAX_SKB_FRAGS * PAGE_SIZE); 1498 MAX_SKB_FRAGS * PAGE_SIZE);
1499 data_len = PAGE_ALIGN(data_len);
1500
1501 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1502 }
1499 1503
1500 skb = sock_alloc_send_pskb(sk, len - data_len, data_len, 1504 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1501 msg->msg_flags & MSG_DONTWAIT, &err, 1505 msg->msg_flags & MSG_DONTWAIT, &err,
@@ -1670,6 +1674,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1670 1674
1671 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); 1675 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1672 1676
1677 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1678
1673 skb = sock_alloc_send_pskb(sk, size - data_len, data_len, 1679 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1674 msg->msg_flags & MSG_DONTWAIT, &err, 1680 msg->msg_flags & MSG_DONTWAIT, &err,
1675 get_order(UNIX_SKB_FRAGS_SZ)); 1681 get_order(UNIX_SKB_FRAGS_SZ));
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 16d08b399210..405f3c4cf70c 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,6 +95,43 @@ config CFG80211_CERTIFICATION_ONUS
95 you are a wireless researcher and are working in a controlled 95 you are a wireless researcher and are working in a controlled
96 and approved environment by your local regulatory agency. 96 and approved environment by your local regulatory agency.
97 97
98config CFG80211_REG_CELLULAR_HINTS
99 bool "cfg80211 regulatory support for cellular base station hints"
100 depends on CFG80211_CERTIFICATION_ONUS
101 ---help---
102 This option enables support for parsing regulatory hints
103 from cellular base stations. If enabled and at least one driver
104 claims support for parsing cellular base station hints the
105 regulatory core will allow and parse these regulatory hints.
106 The regulatory core will only apply these regulatory hints on
107 drivers that support this feature. You should only enable this
108 feature if you have tested and validated this feature on your
109 systems.
110
111config CFG80211_REG_RELAX_NO_IR
112 bool "cfg80211 support for NO_IR relaxation"
113 depends on CFG80211_CERTIFICATION_ONUS
114 ---help---
115 This option enables support for relaxation of the NO_IR flag for
116 situations that certain regulatory bodies have provided clarifications
117 on how relaxation can occur. This feature has an inherent dependency on
118 userspace features which must have been properly tested and as such is
119 not enabled by default.
120
121 A relaxation feature example is allowing the operation of a P2P group
122 owner (GO) on channels marked with NO_IR if there is an additional BSS
123 interface which associated to an AP which userspace assumes or confirms
124 to be an authorized master, i.e., with radar detection support and DFS
125 capabilities. However, note that in order to not create daisy chain
126 scenarios, this relaxation is not allowed in cases that the BSS client
127 is associated to P2P GO and in addition the P2P GO instantiated on
128 a channel due to this relaxation should not allow connection from
129 non P2P clients.
130
131 The regulatory core will apply these relaxations only for drivers that
132 support this feature by declaring the appropriate channel flags and
133 capabilities in their registration flow.
134
98config CFG80211_DEFAULT_PS 135config CFG80211_DEFAULT_PS
99 bool "enable powersave by default" 136 bool "enable powersave by default"
100 depends on CFG80211 137 depends on CFG80211
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 3e02ade508d8..bdad1f951561 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -6,8 +6,8 @@
6#include "rdev-ops.h" 6#include "rdev-ops.h"
7 7
8 8
9static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, 9int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
10 struct net_device *dev, bool notify) 10 struct net_device *dev, bool notify)
11{ 11{
12 struct wireless_dev *wdev = dev->ieee80211_ptr; 12 struct wireless_dev *wdev = dev->ieee80211_ptr;
13 int err; 13 int err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 9c9501a35fb5..992b34070bcb 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -326,28 +326,57 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
326 326
327 327
328int cfg80211_chandef_dfs_required(struct wiphy *wiphy, 328int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
329 const struct cfg80211_chan_def *chandef) 329 const struct cfg80211_chan_def *chandef,
330 enum nl80211_iftype iftype)
330{ 331{
331 int width; 332 int width;
332 int r; 333 int ret;
333 334
334 if (WARN_ON(!cfg80211_chandef_valid(chandef))) 335 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
335 return -EINVAL; 336 return -EINVAL;
336 337
337 width = cfg80211_chandef_get_width(chandef); 338 switch (iftype) {
338 if (width < 0) 339 case NL80211_IFTYPE_ADHOC:
339 return -EINVAL; 340 case NL80211_IFTYPE_AP:
341 case NL80211_IFTYPE_P2P_GO:
342 case NL80211_IFTYPE_MESH_POINT:
343 width = cfg80211_chandef_get_width(chandef);
344 if (width < 0)
345 return -EINVAL;
340 346
341 r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1, 347 ret = cfg80211_get_chans_dfs_required(wiphy,
342 width); 348 chandef->center_freq1,
343 if (r) 349 width);
344 return r; 350 if (ret < 0)
351 return ret;
352 else if (ret > 0)
353 return BIT(chandef->width);
345 354
346 if (!chandef->center_freq2) 355 if (!chandef->center_freq2)
347 return 0; 356 return 0;
357
358 ret = cfg80211_get_chans_dfs_required(wiphy,
359 chandef->center_freq2,
360 width);
361 if (ret < 0)
362 return ret;
363 else if (ret > 0)
364 return BIT(chandef->width);
348 365
349 return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2, 366 break;
350 width); 367 case NL80211_IFTYPE_STATION:
368 case NL80211_IFTYPE_P2P_CLIENT:
369 case NL80211_IFTYPE_MONITOR:
370 case NL80211_IFTYPE_AP_VLAN:
371 case NL80211_IFTYPE_WDS:
372 case NL80211_IFTYPE_P2P_DEVICE:
373 break;
374 case NL80211_IFTYPE_UNSPECIFIED:
375 case NUM_NL80211_IFTYPES:
376 WARN_ON(1);
377 }
378
379 return 0;
351} 380}
352EXPORT_SYMBOL(cfg80211_chandef_dfs_required); 381EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
353 382
@@ -587,12 +616,14 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
587 width = 5; 616 width = 5;
588 break; 617 break;
589 case NL80211_CHAN_WIDTH_10: 618 case NL80211_CHAN_WIDTH_10:
619 prohibited_flags |= IEEE80211_CHAN_NO_10MHZ;
590 width = 10; 620 width = 10;
591 break; 621 break;
592 case NL80211_CHAN_WIDTH_20: 622 case NL80211_CHAN_WIDTH_20:
593 if (!ht_cap->ht_supported) 623 if (!ht_cap->ht_supported)
594 return false; 624 return false;
595 case NL80211_CHAN_WIDTH_20_NOHT: 625 case NL80211_CHAN_WIDTH_20_NOHT:
626 prohibited_flags |= IEEE80211_CHAN_NO_20MHZ;
596 width = 20; 627 width = 20;
597 break; 628 break;
598 case NL80211_CHAN_WIDTH_40: 629 case NL80211_CHAN_WIDTH_40:
@@ -661,17 +692,111 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
661} 692}
662EXPORT_SYMBOL(cfg80211_chandef_usable); 693EXPORT_SYMBOL(cfg80211_chandef_usable);
663 694
695/*
696 * For GO only, check if the channel can be used under permissive conditions
697 * mandated by the some regulatory bodies, i.e., the channel is marked with
698 * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
699 * associated to an AP on the same channel or on the same UNII band
700 * (assuming that the AP is an authorized master).
701 * In addition allow the GO to operate on a channel on which indoor operation is
702 * allowed, iff we are currently operating in an indoor environment.
703 */
704static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
705 struct ieee80211_channel *chan)
706{
707 struct wireless_dev *wdev_iter;
708 struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
709
710 ASSERT_RTNL();
711
712 if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
713 !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
714 return false;
715
716 if (regulatory_indoor_allowed() &&
717 (chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
718 return true;
719
720 if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
721 return false;
722
723 /*
724 * Generally, it is possible to rely on another device/driver to allow
725 * the GO concurrent relaxation, however, since the device can further
726 * enforce the relaxation (by doing a similar verifications as this),
727 * and thus fail the GO instantiation, consider only the interfaces of
728 * the current registered device.
729 */
730 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
731 struct ieee80211_channel *other_chan = NULL;
732 int r1, r2;
733
734 if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
735 !netif_running(wdev_iter->netdev))
736 continue;
737
738 wdev_lock(wdev_iter);
739 if (wdev_iter->current_bss)
740 other_chan = wdev_iter->current_bss->pub.channel;
741 wdev_unlock(wdev_iter);
742
743 if (!other_chan)
744 continue;
745
746 if (chan == other_chan)
747 return true;
748
749 if (chan->band != IEEE80211_BAND_5GHZ)
750 continue;
751
752 r1 = cfg80211_get_unii(chan->center_freq);
753 r2 = cfg80211_get_unii(other_chan->center_freq);
754
755 if (r1 != -EINVAL && r1 == r2) {
756 /*
757 * At some locations channels 149-165 are considered a
758 * bundle, but at other locations, e.g., Indonesia,
759 * channels 149-161 are considered a bundle while
760 * channel 165 is left out and considered to be in a
761 * different bundle. Thus, in case that there is a
762 * station interface connected to an AP on channel 165,
763 * it is assumed that channels 149-161 are allowed for
764 * GO operations. However, having a station interface
765 * connected to an AP on channels 149-161, does not
766 * allow GO operation on channel 165.
767 */
768 if (chan->center_freq == 5825 &&
769 other_chan->center_freq != 5825)
770 continue;
771 return true;
772 }
773 }
774
775 return false;
776}
777
664bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 778bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
665 struct cfg80211_chan_def *chandef) 779 struct cfg80211_chan_def *chandef,
780 enum nl80211_iftype iftype)
666{ 781{
782 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
667 bool res; 783 bool res;
668 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 784 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
669 IEEE80211_CHAN_NO_IR |
670 IEEE80211_CHAN_RADAR; 785 IEEE80211_CHAN_RADAR;
671 786
672 trace_cfg80211_reg_can_beacon(wiphy, chandef); 787 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
673 788
674 if (cfg80211_chandef_dfs_required(wiphy, chandef) > 0 && 789 /*
790 * Under certain conditions suggested by the some regulatory bodies
791 * a GO can operate on channels marked with IEEE80211_NO_IR
792 * so set this flag only if such relaxations are not enabled and
793 * the conditions are not met.
794 */
795 if (iftype != NL80211_IFTYPE_P2P_GO ||
796 !cfg80211_go_permissive_chan(rdev, chandef->chan))
797 prohibited_flags |= IEEE80211_CHAN_NO_IR;
798
799 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
675 cfg80211_chandef_dfs_available(wiphy, chandef)) { 800 cfg80211_chandef_dfs_available(wiphy, chandef)) {
676 /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */ 801 /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */
677 prohibited_flags = IEEE80211_CHAN_DISABLED; 802 prohibited_flags = IEEE80211_CHAN_DISABLED;
@@ -701,6 +826,8 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
701 enum cfg80211_chan_mode *chanmode, 826 enum cfg80211_chan_mode *chanmode,
702 u8 *radar_detect) 827 u8 *radar_detect)
703{ 828{
829 int ret;
830
704 *chan = NULL; 831 *chan = NULL;
705 *chanmode = CHAN_MODE_UNDEFINED; 832 *chanmode = CHAN_MODE_UNDEFINED;
706 833
@@ -743,8 +870,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
743 *chan = wdev->chandef.chan; 870 *chan = wdev->chandef.chan;
744 *chanmode = CHAN_MODE_SHARED; 871 *chanmode = CHAN_MODE_SHARED;
745 872
746 if (cfg80211_chandef_dfs_required(wdev->wiphy, 873 ret = cfg80211_chandef_dfs_required(wdev->wiphy,
747 &wdev->chandef)) 874 &wdev->chandef,
875 wdev->iftype);
876 WARN_ON(ret < 0);
877 if (ret > 0)
748 *radar_detect |= BIT(wdev->chandef.width); 878 *radar_detect |= BIT(wdev->chandef.width);
749 } 879 }
750 return; 880 return;
@@ -753,8 +883,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
753 *chan = wdev->chandef.chan; 883 *chan = wdev->chandef.chan;
754 *chanmode = CHAN_MODE_SHARED; 884 *chanmode = CHAN_MODE_SHARED;
755 885
756 if (cfg80211_chandef_dfs_required(wdev->wiphy, 886 ret = cfg80211_chandef_dfs_required(wdev->wiphy,
757 &wdev->chandef)) 887 &wdev->chandef,
888 wdev->iftype);
889 WARN_ON(ret < 0);
890 if (ret > 0)
758 *radar_detect |= BIT(wdev->chandef.width); 891 *radar_detect |= BIT(wdev->chandef.width);
759 } 892 }
760 return; 893 return;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 086cddd03ba6..a1c40654dd9b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -69,7 +69,7 @@ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
69 69
70int get_wiphy_idx(struct wiphy *wiphy) 70int get_wiphy_idx(struct wiphy *wiphy)
71{ 71{
72 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 72 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
73 73
74 return rdev->wiphy_idx; 74 return rdev->wiphy_idx;
75} 75}
@@ -130,7 +130,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
130 newname)) 130 newname))
131 pr_err("failed to rename debugfs dir to %s!\n", newname); 131 pr_err("failed to rename debugfs dir to %s!\n", newname);
132 132
133 nl80211_notify_dev_rename(rdev); 133 nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
134 134
135 return 0; 135 return 0;
136} 136}
@@ -210,15 +210,12 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
210 } 210 }
211} 211}
212 212
213static int cfg80211_rfkill_set_block(void *data, bool blocked) 213void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
214{ 214{
215 struct cfg80211_registered_device *rdev = data; 215 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
216 struct wireless_dev *wdev; 216 struct wireless_dev *wdev;
217 217
218 if (!blocked) 218 ASSERT_RTNL();
219 return 0;
220
221 rtnl_lock();
222 219
223 list_for_each_entry(wdev, &rdev->wdev_list, list) { 220 list_for_each_entry(wdev, &rdev->wdev_list, list) {
224 if (wdev->netdev) { 221 if (wdev->netdev) {
@@ -234,7 +231,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
234 break; 231 break;
235 } 232 }
236 } 233 }
234}
235EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces);
236
237static int cfg80211_rfkill_set_block(void *data, bool blocked)
238{
239 struct cfg80211_registered_device *rdev = data;
240
241 if (!blocked)
242 return 0;
237 243
244 rtnl_lock();
245 cfg80211_shutdown_all_interfaces(&rdev->wiphy);
238 rtnl_unlock(); 246 rtnl_unlock();
239 247
240 return 0; 248 return 0;
@@ -260,6 +268,45 @@ static void cfg80211_event_work(struct work_struct *work)
260 rtnl_unlock(); 268 rtnl_unlock();
261} 269}
262 270
271void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
272{
273 struct cfg80211_iface_destroy *item;
274
275 ASSERT_RTNL();
276
277 spin_lock_irq(&rdev->destroy_list_lock);
278 while ((item = list_first_entry_or_null(&rdev->destroy_list,
279 struct cfg80211_iface_destroy,
280 list))) {
281 struct wireless_dev *wdev, *tmp;
282 u32 nlportid = item->nlportid;
283
284 list_del(&item->list);
285 kfree(item);
286 spin_unlock_irq(&rdev->destroy_list_lock);
287
288 list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
289 if (nlportid == wdev->owner_nlportid)
290 rdev_del_virtual_intf(rdev, wdev);
291 }
292
293 spin_lock_irq(&rdev->destroy_list_lock);
294 }
295 spin_unlock_irq(&rdev->destroy_list_lock);
296}
297
298static void cfg80211_destroy_iface_wk(struct work_struct *work)
299{
300 struct cfg80211_registered_device *rdev;
301
302 rdev = container_of(work, struct cfg80211_registered_device,
303 destroy_work);
304
305 rtnl_lock();
306 cfg80211_destroy_ifaces(rdev);
307 rtnl_unlock();
308}
309
263/* exported functions */ 310/* exported functions */
264 311
265struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) 312struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
@@ -318,6 +365,10 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
318 rdev->wiphy.dev.class = &ieee80211_class; 365 rdev->wiphy.dev.class = &ieee80211_class;
319 rdev->wiphy.dev.platform_data = rdev; 366 rdev->wiphy.dev.platform_data = rdev;
320 367
368 INIT_LIST_HEAD(&rdev->destroy_list);
369 spin_lock_init(&rdev->destroy_list_lock);
370 INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
371
321#ifdef CONFIG_CFG80211_DEFAULT_PS 372#ifdef CONFIG_CFG80211_DEFAULT_PS
322 rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 373 rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
323#endif 374#endif
@@ -351,6 +402,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
351 rdev->wiphy.rts_threshold = (u32) -1; 402 rdev->wiphy.rts_threshold = (u32) -1;
352 rdev->wiphy.coverage_class = 0; 403 rdev->wiphy.coverage_class = 0;
353 404
405 rdev->wiphy.max_num_csa_counters = 1;
406
354 return &rdev->wiphy; 407 return &rdev->wiphy;
355} 408}
356EXPORT_SYMBOL(wiphy_new); 409EXPORT_SYMBOL(wiphy_new);
@@ -396,10 +449,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
396 for (j = 0; j < c->n_limits; j++) { 449 for (j = 0; j < c->n_limits; j++) {
397 u16 types = c->limits[j].types; 450 u16 types = c->limits[j].types;
398 451
399 /* 452 /* interface types shouldn't overlap */
400 * interface types shouldn't overlap, this is
401 * used in cfg80211_can_change_interface()
402 */
403 if (WARN_ON(types & all_iftypes)) 453 if (WARN_ON(types & all_iftypes))
404 return -EINVAL; 454 return -EINVAL;
405 all_iftypes |= types; 455 all_iftypes |= types;
@@ -435,7 +485,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
435 485
436int wiphy_register(struct wiphy *wiphy) 486int wiphy_register(struct wiphy *wiphy)
437{ 487{
438 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 488 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
439 int res; 489 int res;
440 enum ieee80211_band band; 490 enum ieee80211_band band;
441 struct ieee80211_supported_band *sband; 491 struct ieee80211_supported_band *sband;
@@ -610,13 +660,15 @@ int wiphy_register(struct wiphy *wiphy)
610 return res; 660 return res;
611 } 661 }
612 662
663 nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
664
613 return 0; 665 return 0;
614} 666}
615EXPORT_SYMBOL(wiphy_register); 667EXPORT_SYMBOL(wiphy_register);
616 668
617void wiphy_rfkill_start_polling(struct wiphy *wiphy) 669void wiphy_rfkill_start_polling(struct wiphy *wiphy)
618{ 670{
619 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 671 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
620 672
621 if (!rdev->ops->rfkill_poll) 673 if (!rdev->ops->rfkill_poll)
622 return; 674 return;
@@ -627,7 +679,7 @@ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
627 679
628void wiphy_rfkill_stop_polling(struct wiphy *wiphy) 680void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
629{ 681{
630 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 682 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
631 683
632 rfkill_pause_polling(rdev->rfkill); 684 rfkill_pause_polling(rdev->rfkill);
633} 685}
@@ -635,7 +687,7 @@ EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
635 687
636void wiphy_unregister(struct wiphy *wiphy) 688void wiphy_unregister(struct wiphy *wiphy)
637{ 689{
638 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 690 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
639 691
640 wait_event(rdev->dev_wait, ({ 692 wait_event(rdev->dev_wait, ({
641 int __count; 693 int __count;
@@ -648,9 +700,10 @@ void wiphy_unregister(struct wiphy *wiphy)
648 rfkill_unregister(rdev->rfkill); 700 rfkill_unregister(rdev->rfkill);
649 701
650 rtnl_lock(); 702 rtnl_lock();
703 nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
651 rdev->wiphy.registered = false; 704 rdev->wiphy.registered = false;
652 705
653 BUG_ON(!list_empty(&rdev->wdev_list)); 706 WARN_ON(!list_empty(&rdev->wdev_list));
654 707
655 /* 708 /*
656 * First remove the hardware from everywhere, this makes 709 * First remove the hardware from everywhere, this makes
@@ -675,6 +728,7 @@ void wiphy_unregister(struct wiphy *wiphy)
675 cancel_work_sync(&rdev->conn_work); 728 cancel_work_sync(&rdev->conn_work);
676 flush_work(&rdev->event_work); 729 flush_work(&rdev->event_work);
677 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); 730 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
731 flush_work(&rdev->destroy_work);
678 732
679#ifdef CONFIG_PM 733#ifdef CONFIG_PM
680 if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) 734 if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -707,7 +761,7 @@ EXPORT_SYMBOL(wiphy_free);
707 761
708void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) 762void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
709{ 763{
710 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 764 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
711 765
712 if (rfkill_set_hw_state(rdev->rfkill, blocked)) 766 if (rfkill_set_hw_state(rdev->rfkill, blocked))
713 schedule_work(&rdev->rfkill_sync); 767 schedule_work(&rdev->rfkill_sync);
@@ -716,7 +770,7 @@ EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
716 770
717void cfg80211_unregister_wdev(struct wireless_dev *wdev) 771void cfg80211_unregister_wdev(struct wireless_dev *wdev)
718{ 772{
719 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 773 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
720 774
721 ASSERT_RTNL(); 775 ASSERT_RTNL();
722 776
@@ -751,23 +805,23 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
751 rdev->num_running_monitor_ifaces += num; 805 rdev->num_running_monitor_ifaces += num;
752} 806}
753 807
754void cfg80211_leave(struct cfg80211_registered_device *rdev, 808void __cfg80211_leave(struct cfg80211_registered_device *rdev,
755 struct wireless_dev *wdev) 809 struct wireless_dev *wdev)
756{ 810{
757 struct net_device *dev = wdev->netdev; 811 struct net_device *dev = wdev->netdev;
758 812
759 ASSERT_RTNL(); 813 ASSERT_RTNL();
814 ASSERT_WDEV_LOCK(wdev);
760 815
761 switch (wdev->iftype) { 816 switch (wdev->iftype) {
762 case NL80211_IFTYPE_ADHOC: 817 case NL80211_IFTYPE_ADHOC:
763 cfg80211_leave_ibss(rdev, dev, true); 818 __cfg80211_leave_ibss(rdev, dev, true);
764 break; 819 break;
765 case NL80211_IFTYPE_P2P_CLIENT: 820 case NL80211_IFTYPE_P2P_CLIENT:
766 case NL80211_IFTYPE_STATION: 821 case NL80211_IFTYPE_STATION:
767 if (rdev->sched_scan_req && dev == rdev->sched_scan_req->dev) 822 if (rdev->sched_scan_req && dev == rdev->sched_scan_req->dev)
768 __cfg80211_stop_sched_scan(rdev, false); 823 __cfg80211_stop_sched_scan(rdev, false);
769 824
770 wdev_lock(wdev);
771#ifdef CONFIG_CFG80211_WEXT 825#ifdef CONFIG_CFG80211_WEXT
772 kfree(wdev->wext.ie); 826 kfree(wdev->wext.ie);
773 wdev->wext.ie = NULL; 827 wdev->wext.ie = NULL;
@@ -776,32 +830,60 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
776#endif 830#endif
777 cfg80211_disconnect(rdev, dev, 831 cfg80211_disconnect(rdev, dev,
778 WLAN_REASON_DEAUTH_LEAVING, true); 832 WLAN_REASON_DEAUTH_LEAVING, true);
779 wdev_unlock(wdev);
780 break; 833 break;
781 case NL80211_IFTYPE_MESH_POINT: 834 case NL80211_IFTYPE_MESH_POINT:
782 cfg80211_leave_mesh(rdev, dev); 835 __cfg80211_leave_mesh(rdev, dev);
783 break; 836 break;
784 case NL80211_IFTYPE_AP: 837 case NL80211_IFTYPE_AP:
785 case NL80211_IFTYPE_P2P_GO: 838 case NL80211_IFTYPE_P2P_GO:
786 cfg80211_stop_ap(rdev, dev, true); 839 __cfg80211_stop_ap(rdev, dev, true);
787 break; 840 break;
788 default: 841 default:
789 break; 842 break;
790 } 843 }
791} 844}
792 845
846void cfg80211_leave(struct cfg80211_registered_device *rdev,
847 struct wireless_dev *wdev)
848{
849 wdev_lock(wdev);
850 __cfg80211_leave(rdev, wdev);
851 wdev_unlock(wdev);
852}
853
854void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
855 gfp_t gfp)
856{
857 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
858 struct cfg80211_event *ev;
859 unsigned long flags;
860
861 trace_cfg80211_stop_iface(wiphy, wdev);
862
863 ev = kzalloc(sizeof(*ev), gfp);
864 if (!ev)
865 return;
866
867 ev->type = EVENT_STOPPED;
868
869 spin_lock_irqsave(&wdev->event_lock, flags);
870 list_add_tail(&ev->list, &wdev->event_list);
871 spin_unlock_irqrestore(&wdev->event_lock, flags);
872 queue_work(cfg80211_wq, &rdev->event_work);
873}
874EXPORT_SYMBOL(cfg80211_stop_iface);
875
793static int cfg80211_netdev_notifier_call(struct notifier_block *nb, 876static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
794 unsigned long state, void *ptr) 877 unsigned long state, void *ptr)
795{ 878{
796 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 879 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
797 struct wireless_dev *wdev = dev->ieee80211_ptr; 880 struct wireless_dev *wdev = dev->ieee80211_ptr;
798 struct cfg80211_registered_device *rdev; 881 struct cfg80211_registered_device *rdev;
799 int ret;
800 882
801 if (!wdev) 883 if (!wdev)
802 return NOTIFY_DONE; 884 return NOTIFY_DONE;
803 885
804 rdev = wiphy_to_dev(wdev->wiphy); 886 rdev = wiphy_to_rdev(wdev->wiphy);
805 887
806 WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); 888 WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED);
807 889
@@ -959,13 +1041,14 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
959 case NETDEV_PRE_UP: 1041 case NETDEV_PRE_UP:
960 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 1042 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
961 return notifier_from_errno(-EOPNOTSUPP); 1043 return notifier_from_errno(-EOPNOTSUPP);
962 ret = cfg80211_can_add_interface(rdev, wdev->iftype); 1044 if (rfkill_blocked(rdev->rfkill))
963 if (ret) 1045 return notifier_from_errno(-ERFKILL);
964 return notifier_from_errno(ret);
965 break; 1046 break;
1047 default:
1048 return NOTIFY_DONE;
966 } 1049 }
967 1050
968 return NOTIFY_DONE; 1051 return NOTIFY_OK;
969} 1052}
970 1053
971static struct notifier_block cfg80211_netdev_notifier = { 1054static struct notifier_block cfg80211_netdev_notifier = {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5b1fdcadd469..e9afbf10e756 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -80,13 +80,17 @@ struct cfg80211_registered_device {
80 80
81 struct cfg80211_coalesce *coalesce; 81 struct cfg80211_coalesce *coalesce;
82 82
83 spinlock_t destroy_list_lock;
84 struct list_head destroy_list;
85 struct work_struct destroy_work;
86
83 /* must be last because of the way we do wiphy_priv(), 87 /* must be last because of the way we do wiphy_priv(),
84 * and it should at least be aligned to NETDEV_ALIGN */ 88 * and it should at least be aligned to NETDEV_ALIGN */
85 struct wiphy wiphy __aligned(NETDEV_ALIGN); 89 struct wiphy wiphy __aligned(NETDEV_ALIGN);
86}; 90};
87 91
88static inline 92static inline
89struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) 93struct cfg80211_registered_device *wiphy_to_rdev(struct wiphy *wiphy)
90{ 94{
91 BUG_ON(!wiphy); 95 BUG_ON(!wiphy);
92 return container_of(wiphy, struct cfg80211_registered_device, wiphy); 96 return container_of(wiphy, struct cfg80211_registered_device, wiphy);
@@ -181,6 +185,7 @@ enum cfg80211_event_type {
181 EVENT_ROAMED, 185 EVENT_ROAMED,
182 EVENT_DISCONNECTED, 186 EVENT_DISCONNECTED,
183 EVENT_IBSS_JOINED, 187 EVENT_IBSS_JOINED,
188 EVENT_STOPPED,
184}; 189};
185 190
186struct cfg80211_event { 191struct cfg80211_event {
@@ -232,6 +237,13 @@ struct cfg80211_beacon_registration {
232 u32 nlportid; 237 u32 nlportid;
233}; 238};
234 239
240struct cfg80211_iface_destroy {
241 struct list_head list;
242 u32 nlportid;
243};
244
245void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
246
235/* free object */ 247/* free object */
236void cfg80211_dev_free(struct cfg80211_registered_device *rdev); 248void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
237 249
@@ -240,8 +252,8 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
240 252
241void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 253void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
242 254
243void cfg80211_bss_expire(struct cfg80211_registered_device *dev); 255void cfg80211_bss_expire(struct cfg80211_registered_device *rdev);
244void cfg80211_bss_age(struct cfg80211_registered_device *dev, 256void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
245 unsigned long age_secs); 257 unsigned long age_secs);
246 258
247/* IBSS */ 259/* IBSS */
@@ -270,6 +282,8 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
270 struct net_device *dev, 282 struct net_device *dev,
271 struct mesh_setup *setup, 283 struct mesh_setup *setup,
272 const struct mesh_config *conf); 284 const struct mesh_config *conf);
285int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
286 struct net_device *dev);
273int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 287int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
274 struct net_device *dev); 288 struct net_device *dev);
275int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, 289int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
@@ -277,6 +291,8 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
277 struct cfg80211_chan_def *chandef); 291 struct cfg80211_chan_def *chandef);
278 292
279/* AP */ 293/* AP */
294int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
295 struct net_device *dev, bool notify);
280int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, 296int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
281 struct net_device *dev, bool notify); 297 struct net_device *dev, bool notify);
282 298
@@ -401,35 +417,6 @@ unsigned int
401cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, 417cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
402 const struct cfg80211_chan_def *chandef); 418 const struct cfg80211_chan_def *chandef);
403 419
404static inline int
405cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
406 struct wireless_dev *wdev,
407 enum nl80211_iftype iftype)
408{
409 return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
410 CHAN_MODE_UNDEFINED, 0);
411}
412
413static inline int
414cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
415 enum nl80211_iftype iftype)
416{
417 if (rfkill_blocked(rdev->rfkill))
418 return -ERFKILL;
419
420 return cfg80211_can_change_interface(rdev, NULL, iftype);
421}
422
423static inline int
424cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
425 struct wireless_dev *wdev,
426 struct ieee80211_channel *chan,
427 enum cfg80211_chan_mode chanmode)
428{
429 return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
430 chan, chanmode, 0);
431}
432
433static inline unsigned int elapsed_jiffies_msecs(unsigned long start) 420static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
434{ 421{
435 unsigned long end = jiffies; 422 unsigned long end = jiffies;
@@ -459,6 +446,8 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
459void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 446void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
460 enum nl80211_iftype iftype, int num); 447 enum nl80211_iftype iftype, int num);
461 448
449void __cfg80211_leave(struct cfg80211_registered_device *rdev,
450 struct wireless_dev *wdev);
462void cfg80211_leave(struct cfg80211_registered_device *rdev, 451void cfg80211_leave(struct cfg80211_registered_device *rdev,
463 struct wireless_dev *wdev); 452 struct wireless_dev *wdev);
464 453
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index e37862f1b127..d4860bfc020e 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -43,7 +43,7 @@ static void cfg80211_get_ringparam(struct net_device *dev,
43 struct ethtool_ringparam *rp) 43 struct ethtool_ringparam *rp)
44{ 44{
45 struct wireless_dev *wdev = dev->ieee80211_ptr; 45 struct wireless_dev *wdev = dev->ieee80211_ptr;
46 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 46 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
47 47
48 memset(rp, 0, sizeof(*rp)); 48 memset(rp, 0, sizeof(*rp));
49 49
@@ -56,7 +56,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
56 struct ethtool_ringparam *rp) 56 struct ethtool_ringparam *rp)
57{ 57{
58 struct wireless_dev *wdev = dev->ieee80211_ptr; 58 struct wireless_dev *wdev = dev->ieee80211_ptr;
59 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 59 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
60 60
61 if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) 61 if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
62 return -EINVAL; 62 return -EINVAL;
@@ -70,7 +70,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
70static int cfg80211_get_sset_count(struct net_device *dev, int sset) 70static int cfg80211_get_sset_count(struct net_device *dev, int sset)
71{ 71{
72 struct wireless_dev *wdev = dev->ieee80211_ptr; 72 struct wireless_dev *wdev = dev->ieee80211_ptr;
73 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 73 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
74 if (rdev->ops->get_et_sset_count) 74 if (rdev->ops->get_et_sset_count)
75 return rdev_get_et_sset_count(rdev, dev, sset); 75 return rdev_get_et_sset_count(rdev, dev, sset);
76 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
@@ -80,7 +80,7 @@ static void cfg80211_get_stats(struct net_device *dev,
80 struct ethtool_stats *stats, u64 *data) 80 struct ethtool_stats *stats, u64 *data)
81{ 81{
82 struct wireless_dev *wdev = dev->ieee80211_ptr; 82 struct wireless_dev *wdev = dev->ieee80211_ptr;
83 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 83 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
84 if (rdev->ops->get_et_stats) 84 if (rdev->ops->get_et_stats)
85 rdev_get_et_stats(rdev, dev, stats, data); 85 rdev_get_et_stats(rdev, dev, stats, data);
86} 86}
@@ -88,7 +88,7 @@ static void cfg80211_get_stats(struct net_device *dev,
88static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) 88static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
89{ 89{
90 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
91 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 91 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
92 if (rdev->ops->get_et_strings) 92 if (rdev->ops->get_et_strings)
93 rdev_get_et_strings(rdev, dev, sset, data); 93 rdev_get_et_strings(rdev, dev, sset, data);
94} 94}
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index b35da8dc85de..40c37fc5b67c 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -68,17 +68,7 @@ function parse_reg_rule()
68 sub(/,/, "", units) 68 sub(/,/, "", units)
69 dfs_cac = $9 69 dfs_cac = $9
70 if (units == "mW") { 70 if (units == "mW") {
71 if (power == 100) { 71 power = 10 * log(power)/log(10)
72 power = 20
73 } else if (power == 200) {
74 power = 23
75 } else if (power == 500) {
76 power = 27
77 } else if (power == 1000) {
78 power = 30
79 } else {
80 print "Unknown power value in database!"
81 }
82 } else { 72 } else {
83 dfs_cac = $8 73 dfs_cac = $8
84 } 74 }
@@ -117,7 +107,7 @@ function parse_reg_rule()
117 107
118 } 108 }
119 flags = flags "0" 109 flags = flags "0"
120 printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags 110 printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %.0f, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags
121 rules++ 111 rules++
122} 112}
123 113
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index a6b5bdad039c..8f345da3ea5f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -45,7 +45,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
45 45
46 cfg80211_upload_connect_keys(wdev); 46 cfg80211_upload_connect_keys(wdev);
47 47
48 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, 48 nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid,
49 GFP_KERNEL); 49 GFP_KERNEL);
50#ifdef CONFIG_CFG80211_WEXT 50#ifdef CONFIG_CFG80211_WEXT
51 memset(&wrqu, 0, sizeof(wrqu)); 51 memset(&wrqu, 0, sizeof(wrqu));
@@ -58,7 +58,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
58 struct ieee80211_channel *channel, gfp_t gfp) 58 struct ieee80211_channel *channel, gfp_t gfp)
59{ 59{
60 struct wireless_dev *wdev = dev->ieee80211_ptr; 60 struct wireless_dev *wdev = dev->ieee80211_ptr;
61 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 61 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
62 struct cfg80211_event *ev; 62 struct cfg80211_event *ev;
63 unsigned long flags; 63 unsigned long flags;
64 64
@@ -88,8 +88,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
88 struct cfg80211_cached_keys *connkeys) 88 struct cfg80211_cached_keys *connkeys)
89{ 89{
90 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
91 struct ieee80211_channel *check_chan;
92 u8 radar_detect_width = 0;
93 int err; 91 int err;
94 92
95 ASSERT_WDEV_LOCK(wdev); 93 ASSERT_WDEV_LOCK(wdev);
@@ -126,28 +124,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
126#ifdef CONFIG_CFG80211_WEXT 124#ifdef CONFIG_CFG80211_WEXT
127 wdev->wext.ibss.chandef = params->chandef; 125 wdev->wext.ibss.chandef = params->chandef;
128#endif 126#endif
129 check_chan = params->chandef.chan;
130 if (params->userspace_handles_dfs) {
131 /* Check for radar even if the current channel is not
132 * a radar channel - it might decide to change to DFS
133 * channel later.
134 */
135 radar_detect_width = BIT(params->chandef.width);
136 }
137
138 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
139 check_chan,
140 (params->channel_fixed &&
141 !radar_detect_width)
142 ? CHAN_MODE_SHARED
143 : CHAN_MODE_EXCLUSIVE,
144 radar_detect_width);
145
146 if (err) {
147 wdev->connect_keys = NULL;
148 return err;
149 }
150
151 err = rdev_join_ibss(rdev, dev, params); 127 err = rdev_join_ibss(rdev, dev, params);
152 if (err) { 128 if (err) {
153 wdev->connect_keys = NULL; 129 wdev->connect_keys = NULL;
@@ -180,7 +156,7 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
180static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) 156static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
181{ 157{
182 struct wireless_dev *wdev = dev->ieee80211_ptr; 158 struct wireless_dev *wdev = dev->ieee80211_ptr;
183 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 159 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
184 int i; 160 int i;
185 161
186 ASSERT_WDEV_LOCK(wdev); 162 ASSERT_WDEV_LOCK(wdev);
@@ -335,7 +311,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
335 struct iw_freq *wextfreq, char *extra) 311 struct iw_freq *wextfreq, char *extra)
336{ 312{
337 struct wireless_dev *wdev = dev->ieee80211_ptr; 313 struct wireless_dev *wdev = dev->ieee80211_ptr;
338 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 314 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
339 struct ieee80211_channel *chan = NULL; 315 struct ieee80211_channel *chan = NULL;
340 int err, freq; 316 int err, freq;
341 317
@@ -346,7 +322,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
346 if (!rdev->ops->join_ibss) 322 if (!rdev->ops->join_ibss)
347 return -EOPNOTSUPP; 323 return -EOPNOTSUPP;
348 324
349 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 325 freq = cfg80211_wext_freq(wextfreq);
350 if (freq < 0) 326 if (freq < 0)
351 return freq; 327 return freq;
352 328
@@ -420,7 +396,7 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
420 struct iw_point *data, char *ssid) 396 struct iw_point *data, char *ssid)
421{ 397{
422 struct wireless_dev *wdev = dev->ieee80211_ptr; 398 struct wireless_dev *wdev = dev->ieee80211_ptr;
423 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 399 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
424 size_t len = data->length; 400 size_t len = data->length;
425 int err; 401 int err;
426 402
@@ -444,8 +420,8 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
444 if (len > 0 && ssid[len - 1] == '\0') 420 if (len > 0 && ssid[len - 1] == '\0')
445 len--; 421 len--;
446 422
423 memcpy(wdev->ssid, ssid, len);
447 wdev->wext.ibss.ssid = wdev->ssid; 424 wdev->wext.ibss.ssid = wdev->ssid;
448 memcpy(wdev->wext.ibss.ssid, ssid, len);
449 wdev->wext.ibss.ssid_len = len; 425 wdev->wext.ibss.ssid_len = len;
450 426
451 wdev_lock(wdev); 427 wdev_lock(wdev);
@@ -487,7 +463,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
487 struct sockaddr *ap_addr, char *extra) 463 struct sockaddr *ap_addr, char *extra)
488{ 464{
489 struct wireless_dev *wdev = dev->ieee80211_ptr; 465 struct wireless_dev *wdev = dev->ieee80211_ptr;
490 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 466 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
491 u8 *bssid = ap_addr->sa_data; 467 u8 *bssid = ap_addr->sa_data;
492 int err; 468 int err;
493 469
@@ -505,6 +481,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
505 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) 481 if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
506 bssid = NULL; 482 bssid = NULL;
507 483
484 if (bssid && !is_valid_ether_addr(bssid))
485 return -EINVAL;
486
508 /* both automatic */ 487 /* both automatic */
509 if (!bssid && !wdev->wext.ibss.bssid) 488 if (!bssid && !wdev->wext.ibss.bssid)
510 return 0; 489 return 0;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 5af5cc6b2c4c..092300b30c37 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -99,7 +99,6 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
99 const struct mesh_config *conf) 99 const struct mesh_config *conf)
100{ 100{
101 struct wireless_dev *wdev = dev->ieee80211_ptr; 101 struct wireless_dev *wdev = dev->ieee80211_ptr;
102 u8 radar_detect_width = 0;
103 int err; 102 int err;
104 103
105 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); 104 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
@@ -175,22 +174,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
175 scan_width); 174 scan_width);
176 } 175 }
177 176
178 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) 177 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef,
178 NL80211_IFTYPE_MESH_POINT))
179 return -EINVAL; 179 return -EINVAL;
180 180
181 err = cfg80211_chandef_dfs_required(wdev->wiphy, &setup->chandef);
182 if (err < 0)
183 return err;
184 if (err)
185 radar_detect_width = BIT(setup->chandef.width);
186
187 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
188 setup->chandef.chan,
189 CHAN_MODE_SHARED,
190 radar_detect_width);
191 if (err)
192 return err;
193
194 err = rdev_join_mesh(rdev, dev, conf, setup); 181 err = rdev_join_mesh(rdev, dev, conf, setup);
195 if (!err) { 182 if (!err) {
196 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); 183 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
@@ -236,17 +223,6 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
236 if (!netif_running(wdev->netdev)) 223 if (!netif_running(wdev->netdev))
237 return -ENETDOWN; 224 return -ENETDOWN;
238 225
239 /* cfg80211_can_use_chan() calls
240 * cfg80211_can_use_iftype_chan() with no radar
241 * detection, so if we're trying to use a radar
242 * channel here, something is wrong.
243 */
244 WARN_ON_ONCE(chandef->chan->flags & IEEE80211_CHAN_RADAR);
245 err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
246 CHAN_MODE_SHARED);
247 if (err)
248 return err;
249
250 err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev, 226 err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
251 chandef->chan); 227 chandef->chan);
252 if (!err) 228 if (!err)
@@ -262,8 +238,8 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
262 return 0; 238 return 0;
263} 239}
264 240
265static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 241int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
266 struct net_device *dev) 242 struct net_device *dev)
267{ 243{
268 struct wireless_dev *wdev = dev->ieee80211_ptr; 244 struct wireless_dev *wdev = dev->ieee80211_ptr;
269 int err; 245 int err;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index c52ff59a3e96..266766b8d80b 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -23,7 +23,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
23{ 23{
24 struct wireless_dev *wdev = dev->ieee80211_ptr; 24 struct wireless_dev *wdev = dev->ieee80211_ptr;
25 struct wiphy *wiphy = wdev->wiphy; 25 struct wiphy *wiphy = wdev->wiphy;
26 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 26 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
27 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 27 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
28 u8 *ie = mgmt->u.assoc_resp.variable; 28 u8 *ie = mgmt->u.assoc_resp.variable;
29 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); 29 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
54static void cfg80211_process_auth(struct wireless_dev *wdev, 54static void cfg80211_process_auth(struct wireless_dev *wdev,
55 const u8 *buf, size_t len) 55 const u8 *buf, size_t len)
56{ 56{
57 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 57 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
58 58
59 nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); 59 nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
60 cfg80211_sme_rx_auth(wdev, buf, len); 60 cfg80211_sme_rx_auth(wdev, buf, len);
@@ -63,7 +63,7 @@ static void cfg80211_process_auth(struct wireless_dev *wdev,
63static void cfg80211_process_deauth(struct wireless_dev *wdev, 63static void cfg80211_process_deauth(struct wireless_dev *wdev,
64 const u8 *buf, size_t len) 64 const u8 *buf, size_t len)
65{ 65{
66 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 66 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
67 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 67 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
68 const u8 *bssid = mgmt->bssid; 68 const u8 *bssid = mgmt->bssid;
69 u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 69 u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
@@ -82,7 +82,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
82static void cfg80211_process_disassoc(struct wireless_dev *wdev, 82static void cfg80211_process_disassoc(struct wireless_dev *wdev,
83 const u8 *buf, size_t len) 83 const u8 *buf, size_t len)
84{ 84{
85 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 85 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
86 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 86 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
87 const u8 *bssid = mgmt->bssid; 87 const u8 *bssid = mgmt->bssid;
88 u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 88 u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -123,7 +123,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr)
123{ 123{
124 struct wireless_dev *wdev = dev->ieee80211_ptr; 124 struct wireless_dev *wdev = dev->ieee80211_ptr;
125 struct wiphy *wiphy = wdev->wiphy; 125 struct wiphy *wiphy = wdev->wiphy;
126 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 126 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
127 127
128 trace_cfg80211_send_auth_timeout(dev, addr); 128 trace_cfg80211_send_auth_timeout(dev, addr);
129 129
@@ -136,7 +136,7 @@ void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss)
136{ 136{
137 struct wireless_dev *wdev = dev->ieee80211_ptr; 137 struct wireless_dev *wdev = dev->ieee80211_ptr;
138 struct wiphy *wiphy = wdev->wiphy; 138 struct wiphy *wiphy = wdev->wiphy;
139 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 139 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
140 140
141 trace_cfg80211_send_assoc_timeout(dev, bss->bssid); 141 trace_cfg80211_send_assoc_timeout(dev, bss->bssid);
142 142
@@ -172,7 +172,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
172 const u8 *tsc, gfp_t gfp) 172 const u8 *tsc, gfp_t gfp)
173{ 173{
174 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 174 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
175 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 175 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
176#ifdef CONFIG_CFG80211_WEXT 176#ifdef CONFIG_CFG80211_WEXT
177 union iwreq_data wrqu; 177 union iwreq_data wrqu;
178 char *buf = kmalloc(128, gfp); 178 char *buf = kmalloc(128, gfp);
@@ -233,14 +233,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
233 if (!req.bss) 233 if (!req.bss)
234 return -ENOENT; 234 return -ENOENT;
235 235
236 err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
237 CHAN_MODE_SHARED);
238 if (err)
239 goto out;
240
241 err = rdev_auth(rdev, dev, &req); 236 err = rdev_auth(rdev, dev, &req);
242 237
243out:
244 cfg80211_put_bss(&rdev->wiphy, req.bss); 238 cfg80211_put_bss(&rdev->wiphy, req.bss);
245 return err; 239 return err;
246} 240}
@@ -306,16 +300,10 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
306 if (!req->bss) 300 if (!req->bss)
307 return -ENOENT; 301 return -ENOENT;
308 302
309 err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
310 if (err)
311 goto out;
312
313 err = rdev_assoc(rdev, dev, req); 303 err = rdev_assoc(rdev, dev, req);
314 if (!err) 304 if (!err)
315 cfg80211_hold_bss(bss_from_pub(req->bss)); 305 cfg80211_hold_bss(bss_from_pub(req->bss));
316 306 else
317out:
318 if (err)
319 cfg80211_put_bss(&rdev->wiphy, req->bss); 307 cfg80211_put_bss(&rdev->wiphy, req->bss);
320 308
321 return err; 309 return err;
@@ -414,7 +402,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
414 int match_len) 402 int match_len)
415{ 403{
416 struct wiphy *wiphy = wdev->wiphy; 404 struct wiphy *wiphy = wdev->wiphy;
417 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 405 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
418 struct cfg80211_mgmt_registration *reg, *nreg; 406 struct cfg80211_mgmt_registration *reg, *nreg;
419 int err = 0; 407 int err = 0;
420 u16 mgmt_type; 408 u16 mgmt_type;
@@ -473,7 +461,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
473void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) 461void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
474{ 462{
475 struct wiphy *wiphy = wdev->wiphy; 463 struct wiphy *wiphy = wdev->wiphy;
476 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 464 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
477 struct cfg80211_mgmt_registration *reg, *tmp; 465 struct cfg80211_mgmt_registration *reg, *tmp;
478 466
479 spin_lock_bh(&wdev->mgmt_registrations_lock); 467 spin_lock_bh(&wdev->mgmt_registrations_lock);
@@ -620,7 +608,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
620 const u8 *buf, size_t len, u32 flags, gfp_t gfp) 608 const u8 *buf, size_t len, u32 flags, gfp_t gfp)
621{ 609{
622 struct wiphy *wiphy = wdev->wiphy; 610 struct wiphy *wiphy = wdev->wiphy;
623 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 611 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
624 struct cfg80211_mgmt_registration *reg; 612 struct cfg80211_mgmt_registration *reg;
625 const struct ieee80211_txrx_stypes *stypes = 613 const struct ieee80211_txrx_stypes *stypes =
626 &wiphy->mgmt_stypes[wdev->iftype]; 614 &wiphy->mgmt_stypes[wdev->iftype];
@@ -739,7 +727,7 @@ void cfg80211_radar_event(struct wiphy *wiphy,
739 struct cfg80211_chan_def *chandef, 727 struct cfg80211_chan_def *chandef,
740 gfp_t gfp) 728 gfp_t gfp)
741{ 729{
742 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 730 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
743 unsigned long timeout; 731 unsigned long timeout;
744 732
745 trace_cfg80211_radar_event(wiphy, chandef); 733 trace_cfg80211_radar_event(wiphy, chandef);
@@ -764,7 +752,7 @@ void cfg80211_cac_event(struct net_device *netdev,
764{ 752{
765 struct wireless_dev *wdev = netdev->ieee80211_ptr; 753 struct wireless_dev *wdev = netdev->ieee80211_ptr;
766 struct wiphy *wiphy = wdev->wiphy; 754 struct wiphy *wiphy = wdev->wiphy;
767 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 755 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
768 unsigned long timeout; 756 unsigned long timeout;
769 757
770 trace_cfg80211_cac_event(netdev, event); 758 trace_cfg80211_cac_event(netdev, event);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 052c1bf8ffac..ba4f1723c83a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -168,8 +168,8 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
168 netdev = __dev_get_by_index(netns, ifindex); 168 netdev = __dev_get_by_index(netns, ifindex);
169 if (netdev) { 169 if (netdev) {
170 if (netdev->ieee80211_ptr) 170 if (netdev->ieee80211_ptr)
171 tmp = wiphy_to_dev( 171 tmp = wiphy_to_rdev(
172 netdev->ieee80211_ptr->wiphy); 172 netdev->ieee80211_ptr->wiphy);
173 else 173 else
174 tmp = NULL; 174 tmp = NULL;
175 175
@@ -371,8 +371,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
371 [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, 371 [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
372 [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, 372 [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
373 [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, 373 [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
374 [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 }, 374 [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY },
375 [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 }, 375 [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY },
376 [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY }, 376 [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
377 [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY }, 377 [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
378 [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG }, 378 [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG },
@@ -385,6 +385,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
385 [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN }, 385 [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
386 [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, 386 [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
387 [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, 387 [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
388 [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
389 [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
388}; 390};
389 391
390/* policy for the key attributes */ 392/* policy for the key attributes */
@@ -484,7 +486,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
484 err = PTR_ERR(*wdev); 486 err = PTR_ERR(*wdev);
485 goto out_unlock; 487 goto out_unlock;
486 } 488 }
487 *rdev = wiphy_to_dev((*wdev)->wiphy); 489 *rdev = wiphy_to_rdev((*wdev)->wiphy);
488 /* 0 is the first index - add 1 to parse only once */ 490 /* 0 is the first index - add 1 to parse only once */
489 cb->args[0] = (*rdev)->wiphy_idx + 1; 491 cb->args[0] = (*rdev)->wiphy_idx + 1;
490 cb->args[1] = (*wdev)->identifier; 492 cb->args[1] = (*wdev)->identifier;
@@ -497,7 +499,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
497 err = -ENODEV; 499 err = -ENODEV;
498 goto out_unlock; 500 goto out_unlock;
499 } 501 }
500 *rdev = wiphy_to_dev(wiphy); 502 *rdev = wiphy_to_rdev(wiphy);
501 *wdev = NULL; 503 *wdev = NULL;
502 504
503 list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { 505 list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
@@ -566,6 +568,13 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
566 struct ieee80211_channel *chan, 568 struct ieee80211_channel *chan,
567 bool large) 569 bool large)
568{ 570{
571 /* Some channels must be completely excluded from the
572 * list to protect old user-space tools from breaking
573 */
574 if (!large && chan->flags &
575 (IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ))
576 return 0;
577
569 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, 578 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
570 chan->center_freq)) 579 chan->center_freq))
571 goto nla_put_failure; 580 goto nla_put_failure;
@@ -613,6 +622,18 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
613 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && 622 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
614 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) 623 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
615 goto nla_put_failure; 624 goto nla_put_failure;
625 if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
626 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
627 goto nla_put_failure;
628 if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
629 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
630 goto nla_put_failure;
631 if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
632 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
633 goto nla_put_failure;
634 if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) &&
635 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ))
636 goto nla_put_failure;
616 } 637 }
617 638
618 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 639 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -950,8 +971,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
950 c->max_interfaces)) 971 c->max_interfaces))
951 goto nla_put_failure; 972 goto nla_put_failure;
952 if (large && 973 if (large &&
953 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, 974 (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
954 c->radar_detect_widths)) 975 c->radar_detect_widths) ||
976 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
977 c->radar_detect_regions)))
955 goto nla_put_failure; 978 goto nla_put_failure;
956 979
957 nla_nest_end(msg, nl_combi); 980 nla_nest_end(msg, nl_combi);
@@ -1006,42 +1029,42 @@ static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
1006} 1029}
1007 1030
1008static int nl80211_send_wowlan(struct sk_buff *msg, 1031static int nl80211_send_wowlan(struct sk_buff *msg,
1009 struct cfg80211_registered_device *dev, 1032 struct cfg80211_registered_device *rdev,
1010 bool large) 1033 bool large)
1011{ 1034{
1012 struct nlattr *nl_wowlan; 1035 struct nlattr *nl_wowlan;
1013 1036
1014 if (!dev->wiphy.wowlan) 1037 if (!rdev->wiphy.wowlan)
1015 return 0; 1038 return 0;
1016 1039
1017 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); 1040 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
1018 if (!nl_wowlan) 1041 if (!nl_wowlan)
1019 return -ENOBUFS; 1042 return -ENOBUFS;
1020 1043
1021 if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && 1044 if (((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
1022 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || 1045 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
1023 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && 1046 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
1024 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || 1047 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
1025 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && 1048 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
1026 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || 1049 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
1027 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && 1050 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
1028 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || 1051 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
1029 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && 1052 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1030 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || 1053 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1031 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && 1054 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1032 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || 1055 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1033 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && 1056 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1034 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || 1057 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1035 ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && 1058 ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1036 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) 1059 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1037 return -ENOBUFS; 1060 return -ENOBUFS;
1038 1061
1039 if (dev->wiphy.wowlan->n_patterns) { 1062 if (rdev->wiphy.wowlan->n_patterns) {
1040 struct nl80211_pattern_support pat = { 1063 struct nl80211_pattern_support pat = {
1041 .max_patterns = dev->wiphy.wowlan->n_patterns, 1064 .max_patterns = rdev->wiphy.wowlan->n_patterns,
1042 .min_pattern_len = dev->wiphy.wowlan->pattern_min_len, 1065 .min_pattern_len = rdev->wiphy.wowlan->pattern_min_len,
1043 .max_pattern_len = dev->wiphy.wowlan->pattern_max_len, 1066 .max_pattern_len = rdev->wiphy.wowlan->pattern_max_len,
1044 .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset, 1067 .max_pkt_offset = rdev->wiphy.wowlan->max_pkt_offset,
1045 }; 1068 };
1046 1069
1047 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, 1070 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
@@ -1049,7 +1072,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
1049 return -ENOBUFS; 1072 return -ENOBUFS;
1050 } 1073 }
1051 1074
1052 if (large && nl80211_send_wowlan_tcp_caps(dev, msg)) 1075 if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
1053 return -ENOBUFS; 1076 return -ENOBUFS;
1054 1077
1055 nla_nest_end(msg, nl_wowlan); 1078 nla_nest_end(msg, nl_wowlan);
@@ -1059,19 +1082,19 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
1059#endif 1082#endif
1060 1083
1061static int nl80211_send_coalesce(struct sk_buff *msg, 1084static int nl80211_send_coalesce(struct sk_buff *msg,
1062 struct cfg80211_registered_device *dev) 1085 struct cfg80211_registered_device *rdev)
1063{ 1086{
1064 struct nl80211_coalesce_rule_support rule; 1087 struct nl80211_coalesce_rule_support rule;
1065 1088
1066 if (!dev->wiphy.coalesce) 1089 if (!rdev->wiphy.coalesce)
1067 return 0; 1090 return 0;
1068 1091
1069 rule.max_rules = dev->wiphy.coalesce->n_rules; 1092 rule.max_rules = rdev->wiphy.coalesce->n_rules;
1070 rule.max_delay = dev->wiphy.coalesce->max_delay; 1093 rule.max_delay = rdev->wiphy.coalesce->max_delay;
1071 rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns; 1094 rule.pat.max_patterns = rdev->wiphy.coalesce->n_patterns;
1072 rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len; 1095 rule.pat.min_pattern_len = rdev->wiphy.coalesce->pattern_min_len;
1073 rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len; 1096 rule.pat.max_pattern_len = rdev->wiphy.coalesce->pattern_max_len;
1074 rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset; 1097 rule.pat.max_pkt_offset = rdev->wiphy.coalesce->max_pkt_offset;
1075 1098
1076 if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule)) 1099 if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
1077 return -ENOBUFS; 1100 return -ENOBUFS;
@@ -1202,7 +1225,8 @@ struct nl80211_dump_wiphy_state {
1202 bool split; 1225 bool split;
1203}; 1226};
1204 1227
1205static int nl80211_send_wiphy(struct cfg80211_registered_device *dev, 1228static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1229 enum nl80211_commands cmd,
1206 struct sk_buff *msg, u32 portid, u32 seq, 1230 struct sk_buff *msg, u32 portid, u32 seq,
1207 int flags, struct nl80211_dump_wiphy_state *state) 1231 int flags, struct nl80211_dump_wiphy_state *state)
1208{ 1232{
@@ -1214,63 +1238,66 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1214 struct ieee80211_channel *chan; 1238 struct ieee80211_channel *chan;
1215 int i; 1239 int i;
1216 const struct ieee80211_txrx_stypes *mgmt_stypes = 1240 const struct ieee80211_txrx_stypes *mgmt_stypes =
1217 dev->wiphy.mgmt_stypes; 1241 rdev->wiphy.mgmt_stypes;
1218 u32 features; 1242 u32 features;
1219 1243
1220 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY); 1244 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
1221 if (!hdr) 1245 if (!hdr)
1222 return -ENOBUFS; 1246 return -ENOBUFS;
1223 1247
1224 if (WARN_ON(!state)) 1248 if (WARN_ON(!state))
1225 return -EINVAL; 1249 return -EINVAL;
1226 1250
1227 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) || 1251 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1228 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, 1252 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
1229 wiphy_name(&dev->wiphy)) || 1253 wiphy_name(&rdev->wiphy)) ||
1230 nla_put_u32(msg, NL80211_ATTR_GENERATION, 1254 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1231 cfg80211_rdev_list_generation)) 1255 cfg80211_rdev_list_generation))
1232 goto nla_put_failure; 1256 goto nla_put_failure;
1233 1257
1258 if (cmd != NL80211_CMD_NEW_WIPHY)
1259 goto finish;
1260
1234 switch (state->split_start) { 1261 switch (state->split_start) {
1235 case 0: 1262 case 0:
1236 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, 1263 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
1237 dev->wiphy.retry_short) || 1264 rdev->wiphy.retry_short) ||
1238 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, 1265 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
1239 dev->wiphy.retry_long) || 1266 rdev->wiphy.retry_long) ||
1240 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, 1267 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
1241 dev->wiphy.frag_threshold) || 1268 rdev->wiphy.frag_threshold) ||
1242 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 1269 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
1243 dev->wiphy.rts_threshold) || 1270 rdev->wiphy.rts_threshold) ||
1244 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, 1271 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
1245 dev->wiphy.coverage_class) || 1272 rdev->wiphy.coverage_class) ||
1246 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 1273 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
1247 dev->wiphy.max_scan_ssids) || 1274 rdev->wiphy.max_scan_ssids) ||
1248 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, 1275 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
1249 dev->wiphy.max_sched_scan_ssids) || 1276 rdev->wiphy.max_sched_scan_ssids) ||
1250 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, 1277 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
1251 dev->wiphy.max_scan_ie_len) || 1278 rdev->wiphy.max_scan_ie_len) ||
1252 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, 1279 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
1253 dev->wiphy.max_sched_scan_ie_len) || 1280 rdev->wiphy.max_sched_scan_ie_len) ||
1254 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, 1281 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
1255 dev->wiphy.max_match_sets)) 1282 rdev->wiphy.max_match_sets))
1256 goto nla_put_failure; 1283 goto nla_put_failure;
1257 1284
1258 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && 1285 if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
1259 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) 1286 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
1260 goto nla_put_failure; 1287 goto nla_put_failure;
1261 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && 1288 if ((rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
1262 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) 1289 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
1263 goto nla_put_failure; 1290 goto nla_put_failure;
1264 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && 1291 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
1265 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) 1292 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
1266 goto nla_put_failure; 1293 goto nla_put_failure;
1267 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && 1294 if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
1268 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) 1295 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
1269 goto nla_put_failure; 1296 goto nla_put_failure;
1270 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && 1297 if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
1271 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) 1298 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
1272 goto nla_put_failure; 1299 goto nla_put_failure;
1273 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && 1300 if ((rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
1274 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) 1301 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
1275 goto nla_put_failure; 1302 goto nla_put_failure;
1276 state->split_start++; 1303 state->split_start++;
@@ -1278,35 +1305,35 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1278 break; 1305 break;
1279 case 1: 1306 case 1:
1280 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, 1307 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
1281 sizeof(u32) * dev->wiphy.n_cipher_suites, 1308 sizeof(u32) * rdev->wiphy.n_cipher_suites,
1282 dev->wiphy.cipher_suites)) 1309 rdev->wiphy.cipher_suites))
1283 goto nla_put_failure; 1310 goto nla_put_failure;
1284 1311
1285 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 1312 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
1286 dev->wiphy.max_num_pmkids)) 1313 rdev->wiphy.max_num_pmkids))
1287 goto nla_put_failure; 1314 goto nla_put_failure;
1288 1315
1289 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && 1316 if ((rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
1290 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) 1317 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
1291 goto nla_put_failure; 1318 goto nla_put_failure;
1292 1319
1293 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, 1320 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
1294 dev->wiphy.available_antennas_tx) || 1321 rdev->wiphy.available_antennas_tx) ||
1295 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, 1322 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
1296 dev->wiphy.available_antennas_rx)) 1323 rdev->wiphy.available_antennas_rx))
1297 goto nla_put_failure; 1324 goto nla_put_failure;
1298 1325
1299 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && 1326 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
1300 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, 1327 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
1301 dev->wiphy.probe_resp_offload)) 1328 rdev->wiphy.probe_resp_offload))
1302 goto nla_put_failure; 1329 goto nla_put_failure;
1303 1330
1304 if ((dev->wiphy.available_antennas_tx || 1331 if ((rdev->wiphy.available_antennas_tx ||
1305 dev->wiphy.available_antennas_rx) && 1332 rdev->wiphy.available_antennas_rx) &&
1306 dev->ops->get_antenna) { 1333 rdev->ops->get_antenna) {
1307 u32 tx_ant = 0, rx_ant = 0; 1334 u32 tx_ant = 0, rx_ant = 0;
1308 int res; 1335 int res;
1309 res = rdev_get_antenna(dev, &tx_ant, &rx_ant); 1336 res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
1310 if (!res) { 1337 if (!res) {
1311 if (nla_put_u32(msg, 1338 if (nla_put_u32(msg,
1312 NL80211_ATTR_WIPHY_ANTENNA_TX, 1339 NL80211_ATTR_WIPHY_ANTENNA_TX,
@@ -1323,7 +1350,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1323 break; 1350 break;
1324 case 2: 1351 case 2:
1325 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, 1352 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
1326 dev->wiphy.interface_modes)) 1353 rdev->wiphy.interface_modes))
1327 goto nla_put_failure; 1354 goto nla_put_failure;
1328 state->split_start++; 1355 state->split_start++;
1329 if (state->split) 1356 if (state->split)
@@ -1337,7 +1364,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1337 band < IEEE80211_NUM_BANDS; band++) { 1364 band < IEEE80211_NUM_BANDS; band++) {
1338 struct ieee80211_supported_band *sband; 1365 struct ieee80211_supported_band *sband;
1339 1366
1340 sband = dev->wiphy.bands[band]; 1367 sband = rdev->wiphy.bands[band];
1341 1368
1342 if (!sband) 1369 if (!sband)
1343 continue; 1370 continue;
@@ -1414,7 +1441,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1414 i = 0; 1441 i = 0;
1415#define CMD(op, n) \ 1442#define CMD(op, n) \
1416 do { \ 1443 do { \
1417 if (dev->ops->op) { \ 1444 if (rdev->ops->op) { \
1418 i++; \ 1445 i++; \
1419 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ 1446 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
1420 goto nla_put_failure; \ 1447 goto nla_put_failure; \
@@ -1438,32 +1465,32 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1438 CMD(set_pmksa, SET_PMKSA); 1465 CMD(set_pmksa, SET_PMKSA);
1439 CMD(del_pmksa, DEL_PMKSA); 1466 CMD(del_pmksa, DEL_PMKSA);
1440 CMD(flush_pmksa, FLUSH_PMKSA); 1467 CMD(flush_pmksa, FLUSH_PMKSA);
1441 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) 1468 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
1442 CMD(remain_on_channel, REMAIN_ON_CHANNEL); 1469 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
1443 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); 1470 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
1444 CMD(mgmt_tx, FRAME); 1471 CMD(mgmt_tx, FRAME);
1445 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); 1472 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
1446 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 1473 if (rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
1447 i++; 1474 i++;
1448 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) 1475 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
1449 goto nla_put_failure; 1476 goto nla_put_failure;
1450 } 1477 }
1451 if (dev->ops->set_monitor_channel || dev->ops->start_ap || 1478 if (rdev->ops->set_monitor_channel || rdev->ops->start_ap ||
1452 dev->ops->join_mesh) { 1479 rdev->ops->join_mesh) {
1453 i++; 1480 i++;
1454 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) 1481 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
1455 goto nla_put_failure; 1482 goto nla_put_failure;
1456 } 1483 }
1457 CMD(set_wds_peer, SET_WDS_PEER); 1484 CMD(set_wds_peer, SET_WDS_PEER);
1458 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1485 if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1459 CMD(tdls_mgmt, TDLS_MGMT); 1486 CMD(tdls_mgmt, TDLS_MGMT);
1460 CMD(tdls_oper, TDLS_OPER); 1487 CMD(tdls_oper, TDLS_OPER);
1461 } 1488 }
1462 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) 1489 if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
1463 CMD(sched_scan_start, START_SCHED_SCAN); 1490 CMD(sched_scan_start, START_SCHED_SCAN);
1464 CMD(probe_client, PROBE_CLIENT); 1491 CMD(probe_client, PROBE_CLIENT);
1465 CMD(set_noack_map, SET_NOACK_MAP); 1492 CMD(set_noack_map, SET_NOACK_MAP);
1466 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { 1493 if (rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
1467 i++; 1494 i++;
1468 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) 1495 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
1469 goto nla_put_failure; 1496 goto nla_put_failure;
@@ -1473,7 +1500,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1473 if (state->split) { 1500 if (state->split) {
1474 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1501 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1475 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1502 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1476 if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1503 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1477 CMD(channel_switch, CHANNEL_SWITCH); 1504 CMD(channel_switch, CHANNEL_SWITCH);
1478 } 1505 }
1479 CMD(set_qos_map, SET_QOS_MAP); 1506 CMD(set_qos_map, SET_QOS_MAP);
@@ -1484,13 +1511,13 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1484 1511
1485#undef CMD 1512#undef CMD
1486 1513
1487 if (dev->ops->connect || dev->ops->auth) { 1514 if (rdev->ops->connect || rdev->ops->auth) {
1488 i++; 1515 i++;
1489 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) 1516 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
1490 goto nla_put_failure; 1517 goto nla_put_failure;
1491 } 1518 }
1492 1519
1493 if (dev->ops->disconnect || dev->ops->deauth) { 1520 if (rdev->ops->disconnect || rdev->ops->deauth) {
1494 i++; 1521 i++;
1495 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) 1522 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
1496 goto nla_put_failure; 1523 goto nla_put_failure;
@@ -1501,14 +1528,14 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1501 if (state->split) 1528 if (state->split)
1502 break; 1529 break;
1503 case 5: 1530 case 5:
1504 if (dev->ops->remain_on_channel && 1531 if (rdev->ops->remain_on_channel &&
1505 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && 1532 (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
1506 nla_put_u32(msg, 1533 nla_put_u32(msg,
1507 NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 1534 NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
1508 dev->wiphy.max_remain_on_channel_duration)) 1535 rdev->wiphy.max_remain_on_channel_duration))
1509 goto nla_put_failure; 1536 goto nla_put_failure;
1510 1537
1511 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && 1538 if ((rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
1512 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) 1539 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
1513 goto nla_put_failure; 1540 goto nla_put_failure;
1514 1541
@@ -1519,7 +1546,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1519 break; 1546 break;
1520 case 6: 1547 case 6:
1521#ifdef CONFIG_PM 1548#ifdef CONFIG_PM
1522 if (nl80211_send_wowlan(msg, dev, state->split)) 1549 if (nl80211_send_wowlan(msg, rdev, state->split))
1523 goto nla_put_failure; 1550 goto nla_put_failure;
1524 state->split_start++; 1551 state->split_start++;
1525 if (state->split) 1552 if (state->split)
@@ -1529,10 +1556,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1529#endif 1556#endif
1530 case 7: 1557 case 7:
1531 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, 1558 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
1532 dev->wiphy.software_iftypes)) 1559 rdev->wiphy.software_iftypes))
1533 goto nla_put_failure; 1560 goto nla_put_failure;
1534 1561
1535 if (nl80211_put_iface_combinations(&dev->wiphy, msg, 1562 if (nl80211_put_iface_combinations(&rdev->wiphy, msg,
1536 state->split)) 1563 state->split))
1537 goto nla_put_failure; 1564 goto nla_put_failure;
1538 1565
@@ -1540,12 +1567,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1540 if (state->split) 1567 if (state->split)
1541 break; 1568 break;
1542 case 8: 1569 case 8:
1543 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && 1570 if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
1544 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, 1571 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
1545 dev->wiphy.ap_sme_capa)) 1572 rdev->wiphy.ap_sme_capa))
1546 goto nla_put_failure; 1573 goto nla_put_failure;
1547 1574
1548 features = dev->wiphy.features; 1575 features = rdev->wiphy.features;
1549 /* 1576 /*
1550 * We can only add the per-channel limit information if the 1577 * We can only add the per-channel limit information if the
1551 * dump is split, otherwise it makes it too big. Therefore 1578 * dump is split, otherwise it makes it too big. Therefore
@@ -1556,16 +1583,16 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1556 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features)) 1583 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
1557 goto nla_put_failure; 1584 goto nla_put_failure;
1558 1585
1559 if (dev->wiphy.ht_capa_mod_mask && 1586 if (rdev->wiphy.ht_capa_mod_mask &&
1560 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, 1587 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1561 sizeof(*dev->wiphy.ht_capa_mod_mask), 1588 sizeof(*rdev->wiphy.ht_capa_mod_mask),
1562 dev->wiphy.ht_capa_mod_mask)) 1589 rdev->wiphy.ht_capa_mod_mask))
1563 goto nla_put_failure; 1590 goto nla_put_failure;
1564 1591
1565 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME && 1592 if (rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
1566 dev->wiphy.max_acl_mac_addrs && 1593 rdev->wiphy.max_acl_mac_addrs &&
1567 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, 1594 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
1568 dev->wiphy.max_acl_mac_addrs)) 1595 rdev->wiphy.max_acl_mac_addrs))
1569 goto nla_put_failure; 1596 goto nla_put_failure;
1570 1597
1571 /* 1598 /*
@@ -1581,41 +1608,41 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1581 state->split_start++; 1608 state->split_start++;
1582 break; 1609 break;
1583 case 9: 1610 case 9:
1584 if (dev->wiphy.extended_capabilities && 1611 if (rdev->wiphy.extended_capabilities &&
1585 (nla_put(msg, NL80211_ATTR_EXT_CAPA, 1612 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1586 dev->wiphy.extended_capabilities_len, 1613 rdev->wiphy.extended_capabilities_len,
1587 dev->wiphy.extended_capabilities) || 1614 rdev->wiphy.extended_capabilities) ||
1588 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, 1615 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1589 dev->wiphy.extended_capabilities_len, 1616 rdev->wiphy.extended_capabilities_len,
1590 dev->wiphy.extended_capabilities_mask))) 1617 rdev->wiphy.extended_capabilities_mask)))
1591 goto nla_put_failure; 1618 goto nla_put_failure;
1592 1619
1593 if (dev->wiphy.vht_capa_mod_mask && 1620 if (rdev->wiphy.vht_capa_mod_mask &&
1594 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK, 1621 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK,
1595 sizeof(*dev->wiphy.vht_capa_mod_mask), 1622 sizeof(*rdev->wiphy.vht_capa_mod_mask),
1596 dev->wiphy.vht_capa_mod_mask)) 1623 rdev->wiphy.vht_capa_mod_mask))
1597 goto nla_put_failure; 1624 goto nla_put_failure;
1598 1625
1599 state->split_start++; 1626 state->split_start++;
1600 break; 1627 break;
1601 case 10: 1628 case 10:
1602 if (nl80211_send_coalesce(msg, dev)) 1629 if (nl80211_send_coalesce(msg, rdev))
1603 goto nla_put_failure; 1630 goto nla_put_failure;
1604 1631
1605 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) && 1632 if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
1606 (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) || 1633 (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
1607 nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ))) 1634 nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
1608 goto nla_put_failure; 1635 goto nla_put_failure;
1609 1636
1610 if (dev->wiphy.max_ap_assoc_sta && 1637 if (rdev->wiphy.max_ap_assoc_sta &&
1611 nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA, 1638 nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
1612 dev->wiphy.max_ap_assoc_sta)) 1639 rdev->wiphy.max_ap_assoc_sta))
1613 goto nla_put_failure; 1640 goto nla_put_failure;
1614 1641
1615 state->split_start++; 1642 state->split_start++;
1616 break; 1643 break;
1617 case 11: 1644 case 11:
1618 if (dev->wiphy.n_vendor_commands) { 1645 if (rdev->wiphy.n_vendor_commands) {
1619 const struct nl80211_vendor_cmd_info *info; 1646 const struct nl80211_vendor_cmd_info *info;
1620 struct nlattr *nested; 1647 struct nlattr *nested;
1621 1648
@@ -1623,15 +1650,15 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1623 if (!nested) 1650 if (!nested)
1624 goto nla_put_failure; 1651 goto nla_put_failure;
1625 1652
1626 for (i = 0; i < dev->wiphy.n_vendor_commands; i++) { 1653 for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) {
1627 info = &dev->wiphy.vendor_commands[i].info; 1654 info = &rdev->wiphy.vendor_commands[i].info;
1628 if (nla_put(msg, i + 1, sizeof(*info), info)) 1655 if (nla_put(msg, i + 1, sizeof(*info), info))
1629 goto nla_put_failure; 1656 goto nla_put_failure;
1630 } 1657 }
1631 nla_nest_end(msg, nested); 1658 nla_nest_end(msg, nested);
1632 } 1659 }
1633 1660
1634 if (dev->wiphy.n_vendor_events) { 1661 if (rdev->wiphy.n_vendor_events) {
1635 const struct nl80211_vendor_cmd_info *info; 1662 const struct nl80211_vendor_cmd_info *info;
1636 struct nlattr *nested; 1663 struct nlattr *nested;
1637 1664
@@ -1640,18 +1667,26 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1640 if (!nested) 1667 if (!nested)
1641 goto nla_put_failure; 1668 goto nla_put_failure;
1642 1669
1643 for (i = 0; i < dev->wiphy.n_vendor_events; i++) { 1670 for (i = 0; i < rdev->wiphy.n_vendor_events; i++) {
1644 info = &dev->wiphy.vendor_events[i]; 1671 info = &rdev->wiphy.vendor_events[i];
1645 if (nla_put(msg, i + 1, sizeof(*info), info)) 1672 if (nla_put(msg, i + 1, sizeof(*info), info))
1646 goto nla_put_failure; 1673 goto nla_put_failure;
1647 } 1674 }
1648 nla_nest_end(msg, nested); 1675 nla_nest_end(msg, nested);
1649 } 1676 }
1677 state->split_start++;
1678 break;
1679 case 12:
1680 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH &&
1681 nla_put_u8(msg, NL80211_ATTR_MAX_CSA_COUNTERS,
1682 rdev->wiphy.max_num_csa_counters))
1683 goto nla_put_failure;
1650 1684
1651 /* done */ 1685 /* done */
1652 state->split_start = 0; 1686 state->split_start = 0;
1653 break; 1687 break;
1654 } 1688 }
1689 finish:
1655 return genlmsg_end(msg, hdr); 1690 return genlmsg_end(msg, hdr);
1656 1691
1657 nla_put_failure: 1692 nla_put_failure:
@@ -1684,7 +1719,7 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb,
1684 if (!netdev) 1719 if (!netdev)
1685 return -ENODEV; 1720 return -ENODEV;
1686 if (netdev->ieee80211_ptr) { 1721 if (netdev->ieee80211_ptr) {
1687 rdev = wiphy_to_dev( 1722 rdev = wiphy_to_rdev(
1688 netdev->ieee80211_ptr->wiphy); 1723 netdev->ieee80211_ptr->wiphy);
1689 state->filter_wiphy = rdev->wiphy_idx; 1724 state->filter_wiphy = rdev->wiphy_idx;
1690 } 1725 }
@@ -1697,7 +1732,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1697{ 1732{
1698 int idx = 0, ret; 1733 int idx = 0, ret;
1699 struct nl80211_dump_wiphy_state *state = (void *)cb->args[0]; 1734 struct nl80211_dump_wiphy_state *state = (void *)cb->args[0];
1700 struct cfg80211_registered_device *dev; 1735 struct cfg80211_registered_device *rdev;
1701 1736
1702 rtnl_lock(); 1737 rtnl_lock();
1703 if (!state) { 1738 if (!state) {
@@ -1716,17 +1751,18 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1716 cb->args[0] = (long)state; 1751 cb->args[0] = (long)state;
1717 } 1752 }
1718 1753
1719 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1754 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
1720 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1755 if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
1721 continue; 1756 continue;
1722 if (++idx <= state->start) 1757 if (++idx <= state->start)
1723 continue; 1758 continue;
1724 if (state->filter_wiphy != -1 && 1759 if (state->filter_wiphy != -1 &&
1725 state->filter_wiphy != dev->wiphy_idx) 1760 state->filter_wiphy != rdev->wiphy_idx)
1726 continue; 1761 continue;
1727 /* attempt to fit multiple wiphy data chunks into the skb */ 1762 /* attempt to fit multiple wiphy data chunks into the skb */
1728 do { 1763 do {
1729 ret = nl80211_send_wiphy(dev, skb, 1764 ret = nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY,
1765 skb,
1730 NETLINK_CB(cb->skb).portid, 1766 NETLINK_CB(cb->skb).portid,
1731 cb->nlh->nlmsg_seq, 1767 cb->nlh->nlmsg_seq,
1732 NLM_F_MULTI, state); 1768 NLM_F_MULTI, state);
@@ -1774,14 +1810,15 @@ static int nl80211_dump_wiphy_done(struct netlink_callback *cb)
1774static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) 1810static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
1775{ 1811{
1776 struct sk_buff *msg; 1812 struct sk_buff *msg;
1777 struct cfg80211_registered_device *dev = info->user_ptr[0]; 1813 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1778 struct nl80211_dump_wiphy_state state = {}; 1814 struct nl80211_dump_wiphy_state state = {};
1779 1815
1780 msg = nlmsg_new(4096, GFP_KERNEL); 1816 msg = nlmsg_new(4096, GFP_KERNEL);
1781 if (!msg) 1817 if (!msg)
1782 return -ENOMEM; 1818 return -ENOMEM;
1783 1819
1784 if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0, 1820 if (nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, msg,
1821 info->snd_portid, info->snd_seq, 0,
1785 &state) < 0) { 1822 &state) < 0) {
1786 nlmsg_free(msg); 1823 nlmsg_free(msg);
1787 return -ENOBUFS; 1824 return -ENOBUFS;
@@ -1908,18 +1945,20 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
1908} 1945}
1909 1946
1910static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, 1947static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1911 struct wireless_dev *wdev, 1948 struct net_device *dev,
1912 struct genl_info *info) 1949 struct genl_info *info)
1913{ 1950{
1914 struct cfg80211_chan_def chandef; 1951 struct cfg80211_chan_def chandef;
1915 int result; 1952 int result;
1916 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; 1953 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
1954 struct wireless_dev *wdev = NULL;
1917 1955
1918 if (wdev) 1956 if (dev)
1919 iftype = wdev->iftype; 1957 wdev = dev->ieee80211_ptr;
1920
1921 if (!nl80211_can_set_dev_channel(wdev)) 1958 if (!nl80211_can_set_dev_channel(wdev))
1922 return -EOPNOTSUPP; 1959 return -EOPNOTSUPP;
1960 if (wdev)
1961 iftype = wdev->iftype;
1923 1962
1924 result = nl80211_parse_chandef(rdev, info, &chandef); 1963 result = nl80211_parse_chandef(rdev, info, &chandef);
1925 if (result) 1964 if (result)
@@ -1928,14 +1967,27 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1928 switch (iftype) { 1967 switch (iftype) {
1929 case NL80211_IFTYPE_AP: 1968 case NL80211_IFTYPE_AP:
1930 case NL80211_IFTYPE_P2P_GO: 1969 case NL80211_IFTYPE_P2P_GO:
1931 if (wdev->beacon_interval) { 1970 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) {
1932 result = -EBUSY;
1933 break;
1934 }
1935 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) {
1936 result = -EINVAL; 1971 result = -EINVAL;
1937 break; 1972 break;
1938 } 1973 }
1974 if (wdev->beacon_interval) {
1975 if (!dev || !rdev->ops->set_ap_chanwidth ||
1976 !(rdev->wiphy.features &
1977 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) {
1978 result = -EBUSY;
1979 break;
1980 }
1981
1982 /* Only allow dynamic channel width changes */
1983 if (chandef.chan != wdev->preset_chandef.chan) {
1984 result = -EBUSY;
1985 break;
1986 }
1987 result = rdev_set_ap_chanwidth(rdev, dev, &chandef);
1988 if (result)
1989 break;
1990 }
1939 wdev->preset_chandef = chandef; 1991 wdev->preset_chandef = chandef;
1940 result = 0; 1992 result = 0;
1941 break; 1993 break;
@@ -1957,7 +2009,7 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
1957 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2009 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1958 struct net_device *netdev = info->user_ptr[1]; 2010 struct net_device *netdev = info->user_ptr[1];
1959 2011
1960 return __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info); 2012 return __nl80211_set_channel(rdev, netdev, info);
1961} 2013}
1962 2014
1963static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info) 2015static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
@@ -2013,7 +2065,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2013 2065
2014 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 2066 netdev = __dev_get_by_index(genl_info_net(info), ifindex);
2015 if (netdev && netdev->ieee80211_ptr) 2067 if (netdev && netdev->ieee80211_ptr)
2016 rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); 2068 rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy);
2017 else 2069 else
2018 netdev = NULL; 2070 netdev = NULL;
2019 } 2071 }
@@ -2079,9 +2131,10 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2079 } 2131 }
2080 2132
2081 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 2133 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2082 result = __nl80211_set_channel(rdev, 2134 result = __nl80211_set_channel(
2083 nl80211_can_set_dev_channel(wdev) ? wdev : NULL, 2135 rdev,
2084 info); 2136 nl80211_can_set_dev_channel(wdev) ? netdev : NULL,
2137 info);
2085 if (result) 2138 if (result)
2086 return result; 2139 return result;
2087 } 2140 }
@@ -2229,7 +2282,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
2229static inline u64 wdev_id(struct wireless_dev *wdev) 2282static inline u64 wdev_id(struct wireless_dev *wdev)
2230{ 2283{
2231 return (u64)wdev->identifier | 2284 return (u64)wdev->identifier |
2232 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32); 2285 ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
2233} 2286}
2234 2287
2235static int nl80211_send_chandef(struct sk_buff *msg, 2288static int nl80211_send_chandef(struct sk_buff *msg,
@@ -2355,7 +2408,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
2355static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) 2408static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
2356{ 2409{
2357 struct sk_buff *msg; 2410 struct sk_buff *msg;
2358 struct cfg80211_registered_device *dev = info->user_ptr[0]; 2411 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2359 struct wireless_dev *wdev = info->user_ptr[1]; 2412 struct wireless_dev *wdev = info->user_ptr[1];
2360 2413
2361 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2414 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -2363,7 +2416,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
2363 return -ENOMEM; 2416 return -ENOMEM;
2364 2417
2365 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, 2418 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
2366 dev, wdev) < 0) { 2419 rdev, wdev) < 0) {
2367 nlmsg_free(msg); 2420 nlmsg_free(msg);
2368 return -ENOBUFS; 2421 return -ENOBUFS;
2369 } 2422 }
@@ -2514,6 +2567,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2514 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; 2567 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
2515 u32 flags; 2568 u32 flags;
2516 2569
2570 /* to avoid failing a new interface creation due to pending removal */
2571 cfg80211_destroy_ifaces(rdev);
2572
2517 memset(&params, 0, sizeof(params)); 2573 memset(&params, 0, sizeof(params));
2518 2574
2519 if (!info->attrs[NL80211_ATTR_IFNAME]) 2575 if (!info->attrs[NL80211_ATTR_IFNAME])
@@ -2563,6 +2619,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2563 return PTR_ERR(wdev); 2619 return PTR_ERR(wdev);
2564 } 2620 }
2565 2621
2622 if (info->attrs[NL80211_ATTR_IFACE_SOCKET_OWNER])
2623 wdev->owner_nlportid = info->snd_portid;
2624
2566 switch (type) { 2625 switch (type) {
2567 case NL80211_IFTYPE_MESH_POINT: 2626 case NL80211_IFTYPE_MESH_POINT:
2568 if (!info->attrs[NL80211_ATTR_MESH_ID]) 2627 if (!info->attrs[NL80211_ATTR_MESH_ID])
@@ -3142,7 +3201,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3142 struct wireless_dev *wdev = dev->ieee80211_ptr; 3201 struct wireless_dev *wdev = dev->ieee80211_ptr;
3143 struct cfg80211_ap_settings params; 3202 struct cfg80211_ap_settings params;
3144 int err; 3203 int err;
3145 u8 radar_detect_width = 0;
3146 3204
3147 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 3205 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
3148 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 3206 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -3258,24 +3316,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3258 } else if (!nl80211_get_ap_channel(rdev, &params)) 3316 } else if (!nl80211_get_ap_channel(rdev, &params))
3259 return -EINVAL; 3317 return -EINVAL;
3260 3318
3261 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef)) 3319 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
3320 wdev->iftype))
3262 return -EINVAL; 3321 return -EINVAL;
3263 3322
3264 err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
3265 if (err < 0)
3266 return err;
3267 if (err) {
3268 radar_detect_width = BIT(params.chandef.width);
3269 params.radar_required = true;
3270 }
3271
3272 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
3273 params.chandef.chan,
3274 CHAN_MODE_SHARED,
3275 radar_detect_width);
3276 if (err)
3277 return err;
3278
3279 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3323 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
3280 params.acl = parse_acl_data(&rdev->wiphy, info); 3324 params.acl = parse_acl_data(&rdev->wiphy, info);
3281 if (IS_ERR(params.acl)) 3325 if (IS_ERR(params.acl))
@@ -3613,6 +3657,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
3613 nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED, 3657 nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
3614 sinfo->tx_failed)) 3658 sinfo->tx_failed))
3615 goto nla_put_failure; 3659 goto nla_put_failure;
3660 if ((sinfo->filled & STATION_INFO_EXPECTED_THROUGHPUT) &&
3661 nla_put_u32(msg, NL80211_STA_INFO_EXPECTED_THROUGHPUT,
3662 sinfo->expected_throughput))
3663 goto nla_put_failure;
3616 if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) && 3664 if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
3617 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS, 3665 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
3618 sinfo->beacon_loss_count)) 3666 sinfo->beacon_loss_count))
@@ -3675,13 +3723,13 @@ static int nl80211_dump_station(struct sk_buff *skb,
3675 struct netlink_callback *cb) 3723 struct netlink_callback *cb)
3676{ 3724{
3677 struct station_info sinfo; 3725 struct station_info sinfo;
3678 struct cfg80211_registered_device *dev; 3726 struct cfg80211_registered_device *rdev;
3679 struct wireless_dev *wdev; 3727 struct wireless_dev *wdev;
3680 u8 mac_addr[ETH_ALEN]; 3728 u8 mac_addr[ETH_ALEN];
3681 int sta_idx = cb->args[2]; 3729 int sta_idx = cb->args[2];
3682 int err; 3730 int err;
3683 3731
3684 err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev); 3732 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
3685 if (err) 3733 if (err)
3686 return err; 3734 return err;
3687 3735
@@ -3690,14 +3738,14 @@ static int nl80211_dump_station(struct sk_buff *skb,
3690 goto out_err; 3738 goto out_err;
3691 } 3739 }
3692 3740
3693 if (!dev->ops->dump_station) { 3741 if (!rdev->ops->dump_station) {
3694 err = -EOPNOTSUPP; 3742 err = -EOPNOTSUPP;
3695 goto out_err; 3743 goto out_err;
3696 } 3744 }
3697 3745
3698 while (1) { 3746 while (1) {
3699 memset(&sinfo, 0, sizeof(sinfo)); 3747 memset(&sinfo, 0, sizeof(sinfo));
3700 err = rdev_dump_station(dev, wdev->netdev, sta_idx, 3748 err = rdev_dump_station(rdev, wdev->netdev, sta_idx,
3701 mac_addr, &sinfo); 3749 mac_addr, &sinfo);
3702 if (err == -ENOENT) 3750 if (err == -ENOENT)
3703 break; 3751 break;
@@ -3707,7 +3755,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
3707 if (nl80211_send_station(skb, 3755 if (nl80211_send_station(skb,
3708 NETLINK_CB(cb->skb).portid, 3756 NETLINK_CB(cb->skb).portid,
3709 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3757 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3710 dev, wdev->netdev, mac_addr, 3758 rdev, wdev->netdev, mac_addr,
3711 &sinfo) < 0) 3759 &sinfo) < 0)
3712 goto out; 3760 goto out;
3713 3761
@@ -3719,7 +3767,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
3719 cb->args[2] = sta_idx; 3767 cb->args[2] = sta_idx;
3720 err = skb->len; 3768 err = skb->len;
3721 out_err: 3769 out_err:
3722 nl80211_finish_wdev_dump(dev); 3770 nl80211_finish_wdev_dump(rdev);
3723 3771
3724 return err; 3772 return err;
3725} 3773}
@@ -4380,18 +4428,18 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4380 struct netlink_callback *cb) 4428 struct netlink_callback *cb)
4381{ 4429{
4382 struct mpath_info pinfo; 4430 struct mpath_info pinfo;
4383 struct cfg80211_registered_device *dev; 4431 struct cfg80211_registered_device *rdev;
4384 struct wireless_dev *wdev; 4432 struct wireless_dev *wdev;
4385 u8 dst[ETH_ALEN]; 4433 u8 dst[ETH_ALEN];
4386 u8 next_hop[ETH_ALEN]; 4434 u8 next_hop[ETH_ALEN];
4387 int path_idx = cb->args[2]; 4435 int path_idx = cb->args[2];
4388 int err; 4436 int err;
4389 4437
4390 err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev); 4438 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
4391 if (err) 4439 if (err)
4392 return err; 4440 return err;
4393 4441
4394 if (!dev->ops->dump_mpath) { 4442 if (!rdev->ops->dump_mpath) {
4395 err = -EOPNOTSUPP; 4443 err = -EOPNOTSUPP;
4396 goto out_err; 4444 goto out_err;
4397 } 4445 }
@@ -4402,7 +4450,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4402 } 4450 }
4403 4451
4404 while (1) { 4452 while (1) {
4405 err = rdev_dump_mpath(dev, wdev->netdev, path_idx, dst, 4453 err = rdev_dump_mpath(rdev, wdev->netdev, path_idx, dst,
4406 next_hop, &pinfo); 4454 next_hop, &pinfo);
4407 if (err == -ENOENT) 4455 if (err == -ENOENT)
4408 break; 4456 break;
@@ -4423,7 +4471,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4423 cb->args[2] = path_idx; 4471 cb->args[2] = path_idx;
4424 err = skb->len; 4472 err = skb->len;
4425 out_err: 4473 out_err:
4426 nl80211_finish_wdev_dump(dev); 4474 nl80211_finish_wdev_dump(rdev);
4427 return err; 4475 return err;
4428} 4476}
4429 4477
@@ -4663,7 +4711,6 @@ static int parse_reg_rule(struct nlattr *tb[],
4663 4711
4664static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) 4712static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
4665{ 4713{
4666 int r;
4667 char *data = NULL; 4714 char *data = NULL;
4668 enum nl80211_user_reg_hint_type user_reg_hint_type; 4715 enum nl80211_user_reg_hint_type user_reg_hint_type;
4669 4716
@@ -4676,11 +4723,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
4676 if (unlikely(!rcu_access_pointer(cfg80211_regdomain))) 4723 if (unlikely(!rcu_access_pointer(cfg80211_regdomain)))
4677 return -EINPROGRESS; 4724 return -EINPROGRESS;
4678 4725
4679 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
4680 return -EINVAL;
4681
4682 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
4683
4684 if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]) 4726 if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
4685 user_reg_hint_type = 4727 user_reg_hint_type =
4686 nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]); 4728 nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
@@ -4690,14 +4732,16 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
4690 switch (user_reg_hint_type) { 4732 switch (user_reg_hint_type) {
4691 case NL80211_USER_REG_HINT_USER: 4733 case NL80211_USER_REG_HINT_USER:
4692 case NL80211_USER_REG_HINT_CELL_BASE: 4734 case NL80211_USER_REG_HINT_CELL_BASE:
4693 break; 4735 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
4736 return -EINVAL;
4737
4738 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
4739 return regulatory_hint_user(data, user_reg_hint_type);
4740 case NL80211_USER_REG_HINT_INDOOR:
4741 return regulatory_hint_indoor_user();
4694 default: 4742 default:
4695 return -EINVAL; 4743 return -EINVAL;
4696 } 4744 }
4697
4698 r = regulatory_hint_user(data, user_reg_hint_type);
4699
4700 return r;
4701} 4745}
4702 4746
4703static int nl80211_get_mesh_config(struct sk_buff *skb, 4747static int nl80211_get_mesh_config(struct sk_buff *skb,
@@ -5796,7 +5840,8 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
5796 if (wdev->cac_started) 5840 if (wdev->cac_started)
5797 return -EBUSY; 5841 return -EBUSY;
5798 5842
5799 err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef); 5843 err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef,
5844 wdev->iftype);
5800 if (err < 0) 5845 if (err < 0)
5801 return err; 5846 return err;
5802 5847
@@ -5809,12 +5854,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
5809 if (!rdev->ops->start_radar_detection) 5854 if (!rdev->ops->start_radar_detection)
5810 return -EOPNOTSUPP; 5855 return -EOPNOTSUPP;
5811 5856
5812 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
5813 chandef.chan, CHAN_MODE_SHARED,
5814 BIT(chandef.width));
5815 if (err)
5816 return err;
5817
5818 cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); 5857 cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
5819 if (WARN_ON(!cac_time_ms)) 5858 if (WARN_ON(!cac_time_ms))
5820 cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; 5859 cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
@@ -5843,6 +5882,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
5843 u8 radar_detect_width = 0; 5882 u8 radar_detect_width = 0;
5844 int err; 5883 int err;
5845 bool need_new_beacon = false; 5884 bool need_new_beacon = false;
5885 int len, i;
5846 5886
5847 if (!rdev->ops->channel_switch || 5887 if (!rdev->ops->channel_switch ||
5848 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) 5888 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
@@ -5901,26 +5941,55 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
5901 if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]) 5941 if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
5902 return -EINVAL; 5942 return -EINVAL;
5903 5943
5904 params.counter_offset_beacon = 5944 len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
5905 nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]); 5945 if (!len || (len % sizeof(u16)))
5906 if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
5907 return -EINVAL; 5946 return -EINVAL;
5908 5947
5909 /* sanity check - counters should be the same */ 5948 params.n_counter_offsets_beacon = len / sizeof(u16);
5910 if (params.beacon_csa.tail[params.counter_offset_beacon] != 5949 if (rdev->wiphy.max_num_csa_counters &&
5911 params.count) 5950 (params.n_counter_offsets_beacon >
5951 rdev->wiphy.max_num_csa_counters))
5912 return -EINVAL; 5952 return -EINVAL;
5913 5953
5954 params.counter_offsets_beacon =
5955 nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
5956
5957 /* sanity checks - counters should fit and be the same */
5958 for (i = 0; i < params.n_counter_offsets_beacon; i++) {
5959 u16 offset = params.counter_offsets_beacon[i];
5960
5961 if (offset >= params.beacon_csa.tail_len)
5962 return -EINVAL;
5963
5964 if (params.beacon_csa.tail[offset] != params.count)
5965 return -EINVAL;
5966 }
5967
5914 if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) { 5968 if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
5915 params.counter_offset_presp = 5969 len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
5916 nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]); 5970 if (!len || (len % sizeof(u16)))
5917 if (params.counter_offset_presp >=
5918 params.beacon_csa.probe_resp_len)
5919 return -EINVAL; 5971 return -EINVAL;
5920 5972
5921 if (params.beacon_csa.probe_resp[params.counter_offset_presp] != 5973 params.n_counter_offsets_presp = len / sizeof(u16);
5922 params.count) 5974 if (rdev->wiphy.max_num_csa_counters &&
5975 (params.n_counter_offsets_beacon >
5976 rdev->wiphy.max_num_csa_counters))
5923 return -EINVAL; 5977 return -EINVAL;
5978
5979 params.counter_offsets_presp =
5980 nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
5981
5982 /* sanity checks - counters should fit and be the same */
5983 for (i = 0; i < params.n_counter_offsets_presp; i++) {
5984 u16 offset = params.counter_offsets_presp[i];
5985
5986 if (offset >= params.beacon_csa.probe_resp_len)
5987 return -EINVAL;
5988
5989 if (params.beacon_csa.probe_resp[offset] !=
5990 params.count)
5991 return -EINVAL;
5992 }
5924 } 5993 }
5925 5994
5926skip_beacons: 5995skip_beacons:
@@ -5928,27 +5997,25 @@ skip_beacons:
5928 if (err) 5997 if (err)
5929 return err; 5998 return err;
5930 5999
5931 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef)) 6000 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
6001 wdev->iftype))
5932 return -EINVAL; 6002 return -EINVAL;
5933 6003
5934 switch (dev->ieee80211_ptr->iftype) { 6004 err = cfg80211_chandef_dfs_required(wdev->wiphy,
5935 case NL80211_IFTYPE_AP: 6005 &params.chandef,
5936 case NL80211_IFTYPE_P2P_GO: 6006 wdev->iftype);
5937 case NL80211_IFTYPE_ADHOC: 6007 if (err < 0)
5938 case NL80211_IFTYPE_MESH_POINT: 6008 return err;
5939 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6009
5940 &params.chandef); 6010 if (err > 0) {
5941 if (err < 0) 6011 radar_detect_width = BIT(params.chandef.width);
5942 return err; 6012 params.radar_required = true;
5943 if (err) {
5944 radar_detect_width = BIT(params.chandef.width);
5945 params.radar_required = true;
5946 }
5947 break;
5948 default:
5949 break;
5950 } 6013 }
5951 6014
6015 /* TODO: I left this here for now. With channel switch, the
6016 * verification is a bit more complicated, because we only do
6017 * it later when the channel switch really happens.
6018 */
5952 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, 6019 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
5953 params.chandef.chan, 6020 params.chandef.chan,
5954 CHAN_MODE_SHARED, 6021 CHAN_MODE_SHARED,
@@ -6175,12 +6242,12 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6175 struct netlink_callback *cb) 6242 struct netlink_callback *cb)
6176{ 6243{
6177 struct survey_info survey; 6244 struct survey_info survey;
6178 struct cfg80211_registered_device *dev; 6245 struct cfg80211_registered_device *rdev;
6179 struct wireless_dev *wdev; 6246 struct wireless_dev *wdev;
6180 int survey_idx = cb->args[2]; 6247 int survey_idx = cb->args[2];
6181 int res; 6248 int res;
6182 6249
6183 res = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev); 6250 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
6184 if (res) 6251 if (res)
6185 return res; 6252 return res;
6186 6253
@@ -6189,7 +6256,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6189 goto out_err; 6256 goto out_err;
6190 } 6257 }
6191 6258
6192 if (!dev->ops->dump_survey) { 6259 if (!rdev->ops->dump_survey) {
6193 res = -EOPNOTSUPP; 6260 res = -EOPNOTSUPP;
6194 goto out_err; 6261 goto out_err;
6195 } 6262 }
@@ -6197,7 +6264,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6197 while (1) { 6264 while (1) {
6198 struct ieee80211_channel *chan; 6265 struct ieee80211_channel *chan;
6199 6266
6200 res = rdev_dump_survey(dev, wdev->netdev, survey_idx, &survey); 6267 res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey);
6201 if (res == -ENOENT) 6268 if (res == -ENOENT)
6202 break; 6269 break;
6203 if (res) 6270 if (res)
@@ -6209,7 +6276,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6209 goto out; 6276 goto out;
6210 } 6277 }
6211 6278
6212 chan = ieee80211_get_channel(&dev->wiphy, 6279 chan = ieee80211_get_channel(&rdev->wiphy,
6213 survey.channel->center_freq); 6280 survey.channel->center_freq);
6214 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { 6281 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
6215 survey_idx++; 6282 survey_idx++;
@@ -6228,7 +6295,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
6228 cb->args[2] = survey_idx; 6295 cb->args[2] = survey_idx;
6229 res = skb->len; 6296 res = skb->len;
6230 out_err: 6297 out_err:
6231 nl80211_finish_wdev_dump(dev); 6298 nl80211_finish_wdev_dump(rdev);
6232 return res; 6299 return res;
6233} 6300}
6234 6301
@@ -6704,7 +6771,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
6704 if (err) 6771 if (err)
6705 return err; 6772 return err;
6706 6773
6707 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef)) 6774 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef,
6775 NL80211_IFTYPE_ADHOC))
6708 return -EINVAL; 6776 return -EINVAL;
6709 6777
6710 switch (ibss.chandef.width) { 6778 switch (ibss.chandef.width) {
@@ -6879,7 +6947,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
6879 int vendor_event_idx, 6947 int vendor_event_idx,
6880 int approxlen, gfp_t gfp) 6948 int approxlen, gfp_t gfp)
6881{ 6949{
6882 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 6950 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
6883 const struct nl80211_vendor_cmd_info *info; 6951 const struct nl80211_vendor_cmd_info *info;
6884 6952
6885 switch (cmd) { 6953 switch (cmd) {
@@ -7767,6 +7835,27 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7767 if (!chandef.chan && params.offchan) 7835 if (!chandef.chan && params.offchan)
7768 return -EINVAL; 7836 return -EINVAL;
7769 7837
7838 params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
7839 params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
7840
7841 if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
7842 int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
7843 int i;
7844
7845 if (len % sizeof(u16))
7846 return -EINVAL;
7847
7848 params.n_csa_offsets = len / sizeof(u16);
7849 params.csa_offsets =
7850 nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
7851
7852 /* check that all the offsets fit the frame */
7853 for (i = 0; i < params.n_csa_offsets; i++) {
7854 if (params.csa_offsets[i] >= params.len)
7855 return -EINVAL;
7856 }
7857 }
7858
7770 if (!params.dont_wait_for_ack) { 7859 if (!params.dont_wait_for_ack) {
7771 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 7860 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
7772 if (!msg) 7861 if (!msg)
@@ -7780,8 +7869,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7780 } 7869 }
7781 } 7870 }
7782 7871
7783 params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
7784 params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
7785 params.chan = chandef.chan; 7872 params.chan = chandef.chan;
7786 err = cfg80211_mlme_mgmt_tx(rdev, wdev, &params, &cookie); 7873 err = cfg80211_mlme_mgmt_tx(rdev, wdev, &params, &cookie);
7787 if (err) 7874 if (err)
@@ -8478,6 +8565,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
8478 8565
8479 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], 8566 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
8480 rem) { 8567 rem) {
8568 u8 *mask_pat;
8569
8481 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat), 8570 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
8482 nla_len(pat), NULL); 8571 nla_len(pat), NULL);
8483 err = -EINVAL; 8572 err = -EINVAL;
@@ -8501,19 +8590,18 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
8501 goto error; 8590 goto error;
8502 new_triggers.patterns[i].pkt_offset = pkt_offset; 8591 new_triggers.patterns[i].pkt_offset = pkt_offset;
8503 8592
8504 new_triggers.patterns[i].mask = 8593 mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL);
8505 kmalloc(mask_len + pat_len, GFP_KERNEL); 8594 if (!mask_pat) {
8506 if (!new_triggers.patterns[i].mask) {
8507 err = -ENOMEM; 8595 err = -ENOMEM;
8508 goto error; 8596 goto error;
8509 } 8597 }
8510 new_triggers.patterns[i].pattern = 8598 new_triggers.patterns[i].mask = mask_pat;
8511 new_triggers.patterns[i].mask + mask_len; 8599 memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]),
8512 memcpy(new_triggers.patterns[i].mask,
8513 nla_data(pat_tb[NL80211_PKTPAT_MASK]),
8514 mask_len); 8600 mask_len);
8601 mask_pat += mask_len;
8602 new_triggers.patterns[i].pattern = mask_pat;
8515 new_triggers.patterns[i].pattern_len = pat_len; 8603 new_triggers.patterns[i].pattern_len = pat_len;
8516 memcpy(new_triggers.patterns[i].pattern, 8604 memcpy(mask_pat,
8517 nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), 8605 nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
8518 pat_len); 8606 pat_len);
8519 i++; 8607 i++;
@@ -8705,6 +8793,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
8705 8793
8706 nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], 8794 nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
8707 rem) { 8795 rem) {
8796 u8 *mask_pat;
8797
8708 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat), 8798 nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
8709 nla_len(pat), NULL); 8799 nla_len(pat), NULL);
8710 if (!pat_tb[NL80211_PKTPAT_MASK] || 8800 if (!pat_tb[NL80211_PKTPAT_MASK] ||
@@ -8726,17 +8816,19 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
8726 return -EINVAL; 8816 return -EINVAL;
8727 new_rule->patterns[i].pkt_offset = pkt_offset; 8817 new_rule->patterns[i].pkt_offset = pkt_offset;
8728 8818
8729 new_rule->patterns[i].mask = 8819 mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL);
8730 kmalloc(mask_len + pat_len, GFP_KERNEL); 8820 if (!mask_pat)
8731 if (!new_rule->patterns[i].mask)
8732 return -ENOMEM; 8821 return -ENOMEM;
8733 new_rule->patterns[i].pattern = 8822
8734 new_rule->patterns[i].mask + mask_len; 8823 new_rule->patterns[i].mask = mask_pat;
8735 memcpy(new_rule->patterns[i].mask, 8824 memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]),
8736 nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); 8825 mask_len);
8826
8827 mask_pat += mask_len;
8828 new_rule->patterns[i].pattern = mask_pat;
8737 new_rule->patterns[i].pattern_len = pat_len; 8829 new_rule->patterns[i].pattern_len = pat_len;
8738 memcpy(new_rule->patterns[i].pattern, 8830 memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
8739 nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); 8831 pat_len);
8740 i++; 8832 i++;
8741 } 8833 }
8742 8834
@@ -8981,9 +9073,8 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
8981 if (wdev->p2p_started) 9073 if (wdev->p2p_started)
8982 return 0; 9074 return 0;
8983 9075
8984 err = cfg80211_can_add_interface(rdev, wdev->iftype); 9076 if (rfkill_blocked(rdev->rfkill))
8985 if (err) 9077 return -ERFKILL;
8986 return err;
8987 9078
8988 err = rdev_start_p2p_device(rdev, wdev); 9079 err = rdev_start_p2p_device(rdev, wdev);
8989 if (err) 9080 if (err)
@@ -9192,7 +9283,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
9192 enum nl80211_attrs attr, 9283 enum nl80211_attrs attr,
9193 int approxlen) 9284 int approxlen)
9194{ 9285{
9195 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 9286 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
9196 9287
9197 if (WARN_ON(!rdev->cur_cmd_info)) 9288 if (WARN_ON(!rdev->cur_cmd_info))
9198 return NULL; 9289 return NULL;
@@ -9316,7 +9407,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
9316 } 9407 }
9317 9408
9318 dev = wdev->netdev; 9409 dev = wdev->netdev;
9319 rdev = wiphy_to_dev(wdev->wiphy); 9410 rdev = wiphy_to_rdev(wdev->wiphy);
9320 9411
9321 if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { 9412 if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
9322 if (!dev) { 9413 if (!dev) {
@@ -10017,16 +10108,20 @@ static const struct genl_ops nl80211_ops[] = {
10017 10108
10018/* notification functions */ 10109/* notification functions */
10019 10110
10020void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) 10111void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
10112 enum nl80211_commands cmd)
10021{ 10113{
10022 struct sk_buff *msg; 10114 struct sk_buff *msg;
10023 struct nl80211_dump_wiphy_state state = {}; 10115 struct nl80211_dump_wiphy_state state = {};
10024 10116
10117 WARN_ON(cmd != NL80211_CMD_NEW_WIPHY &&
10118 cmd != NL80211_CMD_DEL_WIPHY);
10119
10025 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 10120 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10026 if (!msg) 10121 if (!msg)
10027 return; 10122 return;
10028 10123
10029 if (nl80211_send_wiphy(rdev, msg, 0, 0, 0, &state) < 0) { 10124 if (nl80211_send_wiphy(rdev, cmd, msg, 0, 0, 0, &state) < 0) {
10030 nlmsg_free(msg); 10125 nlmsg_free(msg);
10031 return; 10126 return;
10032 } 10127 }
@@ -10345,7 +10440,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
10345{ 10440{
10346 struct wireless_dev *wdev = dev->ieee80211_ptr; 10441 struct wireless_dev *wdev = dev->ieee80211_ptr;
10347 struct wiphy *wiphy = wdev->wiphy; 10442 struct wiphy *wiphy = wdev->wiphy;
10348 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10443 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10349 const struct ieee80211_mgmt *mgmt = (void *)buf; 10444 const struct ieee80211_mgmt *mgmt = (void *)buf;
10350 u32 cmd; 10445 u32 cmd;
10351 10446
@@ -10567,7 +10662,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
10567 const u8* ie, u8 ie_len, gfp_t gfp) 10662 const u8* ie, u8 ie_len, gfp_t gfp)
10568{ 10663{
10569 struct wireless_dev *wdev = dev->ieee80211_ptr; 10664 struct wireless_dev *wdev = dev->ieee80211_ptr;
10570 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 10665 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
10571 struct sk_buff *msg; 10666 struct sk_buff *msg;
10572 void *hdr; 10667 void *hdr;
10573 10668
@@ -10747,7 +10842,7 @@ void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
10747 unsigned int duration, gfp_t gfp) 10842 unsigned int duration, gfp_t gfp)
10748{ 10843{
10749 struct wiphy *wiphy = wdev->wiphy; 10844 struct wiphy *wiphy = wdev->wiphy;
10750 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10845 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10751 10846
10752 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration); 10847 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
10753 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, 10848 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
@@ -10761,7 +10856,7 @@ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
10761 gfp_t gfp) 10856 gfp_t gfp)
10762{ 10857{
10763 struct wiphy *wiphy = wdev->wiphy; 10858 struct wiphy *wiphy = wdev->wiphy;
10764 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10859 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10765 10860
10766 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan); 10861 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
10767 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 10862 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
@@ -10773,7 +10868,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
10773 struct station_info *sinfo, gfp_t gfp) 10868 struct station_info *sinfo, gfp_t gfp)
10774{ 10869{
10775 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 10870 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
10776 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10871 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10777 struct sk_buff *msg; 10872 struct sk_buff *msg;
10778 10873
10779 trace_cfg80211_new_sta(dev, mac_addr, sinfo); 10874 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
@@ -10796,7 +10891,7 @@ EXPORT_SYMBOL(cfg80211_new_sta);
10796void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) 10891void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
10797{ 10892{
10798 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 10893 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
10799 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10894 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10800 struct sk_buff *msg; 10895 struct sk_buff *msg;
10801 void *hdr; 10896 void *hdr;
10802 10897
@@ -10833,7 +10928,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
10833 gfp_t gfp) 10928 gfp_t gfp)
10834{ 10929{
10835 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 10930 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
10836 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 10931 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10837 struct sk_buff *msg; 10932 struct sk_buff *msg;
10838 void *hdr; 10933 void *hdr;
10839 10934
@@ -10868,7 +10963,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
10868 const u8 *addr, gfp_t gfp) 10963 const u8 *addr, gfp_t gfp)
10869{ 10964{
10870 struct wireless_dev *wdev = dev->ieee80211_ptr; 10965 struct wireless_dev *wdev = dev->ieee80211_ptr;
10871 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 10966 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
10872 struct sk_buff *msg; 10967 struct sk_buff *msg;
10873 void *hdr; 10968 void *hdr;
10874 u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); 10969 u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
@@ -10988,7 +11083,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
10988 const u8 *buf, size_t len, bool ack, gfp_t gfp) 11083 const u8 *buf, size_t len, bool ack, gfp_t gfp)
10989{ 11084{
10990 struct wiphy *wiphy = wdev->wiphy; 11085 struct wiphy *wiphy = wdev->wiphy;
10991 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11086 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
10992 struct net_device *netdev = wdev->netdev; 11087 struct net_device *netdev = wdev->netdev;
10993 struct sk_buff *msg; 11088 struct sk_buff *msg;
10994 void *hdr; 11089 void *hdr;
@@ -11032,7 +11127,7 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
11032{ 11127{
11033 struct wireless_dev *wdev = dev->ieee80211_ptr; 11128 struct wireless_dev *wdev = dev->ieee80211_ptr;
11034 struct wiphy *wiphy = wdev->wiphy; 11129 struct wiphy *wiphy = wdev->wiphy;
11035 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11130 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11036 struct sk_buff *msg; 11131 struct sk_buff *msg;
11037 struct nlattr *pinfoattr; 11132 struct nlattr *pinfoattr;
11038 void *hdr; 11133 void *hdr;
@@ -11124,7 +11219,7 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
11124{ 11219{
11125 struct wireless_dev *wdev = dev->ieee80211_ptr; 11220 struct wireless_dev *wdev = dev->ieee80211_ptr;
11126 struct wiphy *wiphy = wdev->wiphy; 11221 struct wiphy *wiphy = wdev->wiphy;
11127 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11222 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11128 11223
11129 trace_cfg80211_gtk_rekey_notify(dev, bssid); 11224 trace_cfg80211_gtk_rekey_notify(dev, bssid);
11130 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); 11225 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
@@ -11182,7 +11277,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
11182{ 11277{
11183 struct wireless_dev *wdev = dev->ieee80211_ptr; 11278 struct wireless_dev *wdev = dev->ieee80211_ptr;
11184 struct wiphy *wiphy = wdev->wiphy; 11279 struct wiphy *wiphy = wdev->wiphy;
11185 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11280 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11186 11281
11187 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth); 11282 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
11188 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); 11283 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
@@ -11229,7 +11324,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
11229{ 11324{
11230 struct wireless_dev *wdev = dev->ieee80211_ptr; 11325 struct wireless_dev *wdev = dev->ieee80211_ptr;
11231 struct wiphy *wiphy = wdev->wiphy; 11326 struct wiphy *wiphy = wdev->wiphy;
11232 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11327 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11233 11328
11234 ASSERT_WDEV_LOCK(wdev); 11329 ASSERT_WDEV_LOCK(wdev);
11235 11330
@@ -11253,7 +11348,7 @@ void cfg80211_cqm_txe_notify(struct net_device *dev,
11253{ 11348{
11254 struct wireless_dev *wdev = dev->ieee80211_ptr; 11349 struct wireless_dev *wdev = dev->ieee80211_ptr;
11255 struct wiphy *wiphy = wdev->wiphy; 11350 struct wiphy *wiphy = wdev->wiphy;
11256 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11351 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11257 struct sk_buff *msg; 11352 struct sk_buff *msg;
11258 struct nlattr *pinfoattr; 11353 struct nlattr *pinfoattr;
11259 void *hdr; 11354 void *hdr;
@@ -11353,7 +11448,7 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
11353{ 11448{
11354 struct wireless_dev *wdev = dev->ieee80211_ptr; 11449 struct wireless_dev *wdev = dev->ieee80211_ptr;
11355 struct wiphy *wiphy = wdev->wiphy; 11450 struct wiphy *wiphy = wdev->wiphy;
11356 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11451 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11357 struct sk_buff *msg; 11452 struct sk_buff *msg;
11358 struct nlattr *pinfoattr; 11453 struct nlattr *pinfoattr;
11359 void *hdr; 11454 void *hdr;
@@ -11400,7 +11495,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
11400 u64 cookie, bool acked, gfp_t gfp) 11495 u64 cookie, bool acked, gfp_t gfp)
11401{ 11496{
11402 struct wireless_dev *wdev = dev->ieee80211_ptr; 11497 struct wireless_dev *wdev = dev->ieee80211_ptr;
11403 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 11498 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
11404 struct sk_buff *msg; 11499 struct sk_buff *msg;
11405 void *hdr; 11500 void *hdr;
11406 11501
@@ -11440,7 +11535,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
11440 const u8 *frame, size_t len, 11535 const u8 *frame, size_t len,
11441 int freq, int sig_dbm) 11536 int freq, int sig_dbm)
11442{ 11537{
11443 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11538 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11444 struct sk_buff *msg; 11539 struct sk_buff *msg;
11445 void *hdr; 11540 void *hdr;
11446 struct cfg80211_beacon_registration *reg; 11541 struct cfg80211_beacon_registration *reg;
@@ -11487,7 +11582,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
11487 struct cfg80211_wowlan_wakeup *wakeup, 11582 struct cfg80211_wowlan_wakeup *wakeup,
11488 gfp_t gfp) 11583 gfp_t gfp)
11489{ 11584{
11490 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 11585 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
11491 struct sk_buff *msg; 11586 struct sk_buff *msg;
11492 void *hdr; 11587 void *hdr;
11493 int size = 200; 11588 int size = 200;
@@ -11597,7 +11692,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
11597 u16 reason_code, gfp_t gfp) 11692 u16 reason_code, gfp_t gfp)
11598{ 11693{
11599 struct wireless_dev *wdev = dev->ieee80211_ptr; 11694 struct wireless_dev *wdev = dev->ieee80211_ptr;
11600 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 11695 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
11601 struct sk_buff *msg; 11696 struct sk_buff *msg;
11602 void *hdr; 11697 void *hdr;
11603 11698
@@ -11649,9 +11744,15 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
11649 rcu_read_lock(); 11744 rcu_read_lock();
11650 11745
11651 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { 11746 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
11652 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) 11747 bool schedule_destroy_work = false;
11748
11749 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
11653 cfg80211_mlme_unregister_socket(wdev, notify->portid); 11750 cfg80211_mlme_unregister_socket(wdev, notify->portid);
11654 11751
11752 if (wdev->owner_nlportid == notify->portid)
11753 schedule_destroy_work = true;
11754 }
11755
11655 spin_lock_bh(&rdev->beacon_registrations_lock); 11756 spin_lock_bh(&rdev->beacon_registrations_lock);
11656 list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations, 11757 list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations,
11657 list) { 11758 list) {
@@ -11662,11 +11763,24 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
11662 } 11763 }
11663 } 11764 }
11664 spin_unlock_bh(&rdev->beacon_registrations_lock); 11765 spin_unlock_bh(&rdev->beacon_registrations_lock);
11766
11767 if (schedule_destroy_work) {
11768 struct cfg80211_iface_destroy *destroy;
11769
11770 destroy = kzalloc(sizeof(*destroy), GFP_ATOMIC);
11771 if (destroy) {
11772 destroy->nlportid = notify->portid;
11773 spin_lock(&rdev->destroy_list_lock);
11774 list_add(&destroy->list, &rdev->destroy_list);
11775 spin_unlock(&rdev->destroy_list_lock);
11776 schedule_work(&rdev->destroy_work);
11777 }
11778 }
11665 } 11779 }
11666 11780
11667 rcu_read_unlock(); 11781 rcu_read_unlock();
11668 11782
11669 return NOTIFY_DONE; 11783 return NOTIFY_OK;
11670} 11784}
11671 11785
11672static struct notifier_block nl80211_netlink_notifier = { 11786static struct notifier_block nl80211_netlink_notifier = {
@@ -11677,7 +11791,7 @@ void cfg80211_ft_event(struct net_device *netdev,
11677 struct cfg80211_ft_event_params *ft_event) 11791 struct cfg80211_ft_event_params *ft_event)
11678{ 11792{
11679 struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy; 11793 struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
11680 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11794 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11681 struct sk_buff *msg; 11795 struct sk_buff *msg;
11682 void *hdr; 11796 void *hdr;
11683 11797
@@ -11724,7 +11838,7 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
11724 void *hdr; 11838 void *hdr;
11725 u32 nlportid; 11839 u32 nlportid;
11726 11840
11727 rdev = wiphy_to_dev(wdev->wiphy); 11841 rdev = wiphy_to_rdev(wdev->wiphy);
11728 if (!rdev->crit_proto_nlportid) 11842 if (!rdev->crit_proto_nlportid)
11729 return; 11843 return;
11730 11844
@@ -11759,7 +11873,7 @@ EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
11759void nl80211_send_ap_stopped(struct wireless_dev *wdev) 11873void nl80211_send_ap_stopped(struct wireless_dev *wdev)
11760{ 11874{
11761 struct wiphy *wiphy = wdev->wiphy; 11875 struct wiphy *wiphy = wdev->wiphy;
11762 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 11876 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
11763 struct sk_buff *msg; 11877 struct sk_buff *msg;
11764 void *hdr; 11878 void *hdr;
11765 11879
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 1e6df9630f42..49c9a482dd12 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -5,7 +5,8 @@
5 5
6int nl80211_init(void); 6int nl80211_init(void);
7void nl80211_exit(void); 7void nl80211_exit(void);
8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 8void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
9 enum nl80211_commands cmd);
9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 10void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10 struct wireless_dev *wdev); 11 struct wireless_dev *wdev);
11struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, 12struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 74d97d33c938..d95bbe348138 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -199,7 +199,7 @@ static inline int rdev_change_station(struct cfg80211_registered_device *rdev,
199} 199}
200 200
201static inline int rdev_get_station(struct cfg80211_registered_device *rdev, 201static inline int rdev_get_station(struct cfg80211_registered_device *rdev,
202 struct net_device *dev, u8 *mac, 202 struct net_device *dev, const u8 *mac,
203 struct station_info *sinfo) 203 struct station_info *sinfo)
204{ 204{
205 int ret; 205 int ret;
@@ -950,4 +950,17 @@ static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
950 return ret; 950 return ret;
951} 951}
952 952
953static inline int
954rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev,
955 struct net_device *dev, struct cfg80211_chan_def *chandef)
956{
957 int ret;
958
959 trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, chandef);
960 ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, chandef);
961 trace_rdev_return_int(&rdev->wiphy, ret);
962
963 return ret;
964}
965
953#endif /* __CFG80211_RDEV_OPS */ 966#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f59aaac586f8..558b0e3a02d8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -65,11 +65,26 @@
65#define REG_DBG_PRINT(args...) 65#define REG_DBG_PRINT(args...)
66#endif 66#endif
67 67
68/**
69 * enum reg_request_treatment - regulatory request treatment
70 *
71 * @REG_REQ_OK: continue processing the regulatory request
72 * @REG_REQ_IGNORE: ignore the regulatory request
73 * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should
74 * be intersected with the current one.
75 * @REG_REQ_ALREADY_SET: the regulatory request will not change the current
76 * regulatory settings, and no further processing is required.
77 * @REG_REQ_USER_HINT_HANDLED: a non alpha2 user hint was handled and no
78 * further processing is required, i.e., not need to update last_request
79 * etc. This should be used for user hints that do not provide an alpha2
80 * but some other type of regulatory hint, i.e., indoor operation.
81 */
68enum reg_request_treatment { 82enum reg_request_treatment {
69 REG_REQ_OK, 83 REG_REQ_OK,
70 REG_REQ_IGNORE, 84 REG_REQ_IGNORE,
71 REG_REQ_INTERSECT, 85 REG_REQ_INTERSECT,
72 REG_REQ_ALREADY_SET, 86 REG_REQ_ALREADY_SET,
87 REG_REQ_USER_HINT_HANDLED,
73}; 88};
74 89
75static struct regulatory_request core_request_world = { 90static struct regulatory_request core_request_world = {
@@ -106,6 +121,14 @@ const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
106 */ 121 */
107static int reg_num_devs_support_basehint; 122static int reg_num_devs_support_basehint;
108 123
124/*
125 * State variable indicating if the platform on which the devices
126 * are attached is operating in an indoor environment. The state variable
127 * is relevant for all registered devices.
128 * (protected by RTNL)
129 */
130static bool reg_is_indoor;
131
109static const struct ieee80211_regdomain *get_cfg80211_regdom(void) 132static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
110{ 133{
111 return rtnl_dereference(cfg80211_regdomain); 134 return rtnl_dereference(cfg80211_regdomain);
@@ -240,8 +263,16 @@ static char user_alpha2[2];
240module_param(ieee80211_regdom, charp, 0444); 263module_param(ieee80211_regdom, charp, 0444);
241MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 264MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
242 265
243static void reg_free_request(struct regulatory_request *lr) 266static void reg_free_request(struct regulatory_request *request)
244{ 267{
268 if (request != get_last_request())
269 kfree(request);
270}
271
272static void reg_free_last_request(void)
273{
274 struct regulatory_request *lr = get_last_request();
275
245 if (lr != &core_request_world && lr) 276 if (lr != &core_request_world && lr)
246 kfree_rcu(lr, rcu_head); 277 kfree_rcu(lr, rcu_head);
247} 278}
@@ -254,7 +285,7 @@ static void reg_update_last_request(struct regulatory_request *request)
254 if (lr == request) 285 if (lr == request)
255 return; 286 return;
256 287
257 reg_free_request(lr); 288 reg_free_last_request();
258 rcu_assign_pointer(last_request, request); 289 rcu_assign_pointer(last_request, request);
259} 290}
260 291
@@ -873,6 +904,8 @@ static u32 map_regdom_flags(u32 rd_flags)
873 channel_flags |= IEEE80211_CHAN_RADAR; 904 channel_flags |= IEEE80211_CHAN_RADAR;
874 if (rd_flags & NL80211_RRF_NO_OFDM) 905 if (rd_flags & NL80211_RRF_NO_OFDM)
875 channel_flags |= IEEE80211_CHAN_NO_OFDM; 906 channel_flags |= IEEE80211_CHAN_NO_OFDM;
907 if (rd_flags & NL80211_RRF_NO_OUTDOOR)
908 channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
876 return channel_flags; 909 return channel_flags;
877} 910}
878 911
@@ -902,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
902 if (!band_rule_found) 935 if (!band_rule_found)
903 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
904 937
905 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
906 939
907 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
908 return rr; 941 return rr;
@@ -986,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
986} 1019}
987#endif 1020#endif
988 1021
989/* 1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
990 * Note that right now we assume the desired channel bandwidth 1023 * chan->center_freq fits there.
991 * is always 20 MHz for each individual channel (HT40 uses 20 MHz 1024 * If there is no such reg_rule, disable the channel, otherwise set the
992 * per channel, the primary and the extension channel). 1025 * flags corresponding to the bandwidths allowed in the particular reg_rule
993 */ 1026 */
994static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
995 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1050,8 +1083,12 @@ static void handle_channel(struct wiphy *wiphy,
1050 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1051 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1052 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1053 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1054 bw_flags = IEEE80211_CHAN_NO_HT40; 1091 bw_flags |= IEEE80211_CHAN_NO_HT40;
1055 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1056 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1057 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1071,6 +1108,13 @@ static void handle_channel(struct wiphy *wiphy,
1071 (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1108 (int) MBI_TO_DBI(power_rule->max_antenna_gain);
1072 chan->max_reg_power = chan->max_power = chan->orig_mpwr = 1109 chan->max_reg_power = chan->max_power = chan->orig_mpwr =
1073 (int) MBM_TO_DBM(power_rule->max_eirp); 1110 (int) MBM_TO_DBM(power_rule->max_eirp);
1111
1112 if (chan->flags & IEEE80211_CHAN_RADAR) {
1113 chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
1114 if (reg_rule->dfs_cac_ms)
1115 chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
1116 }
1117
1074 return; 1118 return;
1075 } 1119 }
1076 1120
@@ -1126,12 +1170,19 @@ static bool reg_request_cell_base(struct regulatory_request *request)
1126 return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE; 1170 return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
1127} 1171}
1128 1172
1173static bool reg_request_indoor(struct regulatory_request *request)
1174{
1175 if (request->initiator != NL80211_REGDOM_SET_BY_USER)
1176 return false;
1177 return request->user_reg_hint_type == NL80211_USER_REG_HINT_INDOOR;
1178}
1179
1129bool reg_last_request_cell_base(void) 1180bool reg_last_request_cell_base(void)
1130{ 1181{
1131 return reg_request_cell_base(get_last_request()); 1182 return reg_request_cell_base(get_last_request());
1132} 1183}
1133 1184
1134#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS 1185#ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS
1135/* Core specific check */ 1186/* Core specific check */
1136static enum reg_request_treatment 1187static enum reg_request_treatment
1137reg_ignore_cell_hint(struct regulatory_request *pending_request) 1188reg_ignore_cell_hint(struct regulatory_request *pending_request)
@@ -1471,8 +1522,12 @@ static void handle_channel_custom(struct wiphy *wiphy,
1471 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1472 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1473 1524
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1474 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1475 bw_flags = IEEE80211_CHAN_NO_HT40; 1530 bw_flags |= IEEE80211_CHAN_NO_HT40;
1476 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1477 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1478 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1568,6 +1623,11 @@ __reg_process_hint_user(struct regulatory_request *user_request)
1568{ 1623{
1569 struct regulatory_request *lr = get_last_request(); 1624 struct regulatory_request *lr = get_last_request();
1570 1625
1626 if (reg_request_indoor(user_request)) {
1627 reg_is_indoor = true;
1628 return REG_REQ_USER_HINT_HANDLED;
1629 }
1630
1571 if (reg_request_cell_base(user_request)) 1631 if (reg_request_cell_base(user_request))
1572 return reg_ignore_cell_hint(user_request); 1632 return reg_ignore_cell_hint(user_request);
1573 1633
@@ -1615,8 +1675,9 @@ reg_process_hint_user(struct regulatory_request *user_request)
1615 1675
1616 treatment = __reg_process_hint_user(user_request); 1676 treatment = __reg_process_hint_user(user_request);
1617 if (treatment == REG_REQ_IGNORE || 1677 if (treatment == REG_REQ_IGNORE ||
1618 treatment == REG_REQ_ALREADY_SET) { 1678 treatment == REG_REQ_ALREADY_SET ||
1619 kfree(user_request); 1679 treatment == REG_REQ_USER_HINT_HANDLED) {
1680 reg_free_request(user_request);
1620 return treatment; 1681 return treatment;
1621 } 1682 }
1622 1683
@@ -1676,14 +1737,15 @@ reg_process_hint_driver(struct wiphy *wiphy,
1676 case REG_REQ_OK: 1737 case REG_REQ_OK:
1677 break; 1738 break;
1678 case REG_REQ_IGNORE: 1739 case REG_REQ_IGNORE:
1679 kfree(driver_request); 1740 case REG_REQ_USER_HINT_HANDLED:
1741 reg_free_request(driver_request);
1680 return treatment; 1742 return treatment;
1681 case REG_REQ_INTERSECT: 1743 case REG_REQ_INTERSECT:
1682 /* fall through */ 1744 /* fall through */
1683 case REG_REQ_ALREADY_SET: 1745 case REG_REQ_ALREADY_SET:
1684 regd = reg_copy_regd(get_cfg80211_regdom()); 1746 regd = reg_copy_regd(get_cfg80211_regdom());
1685 if (IS_ERR(regd)) { 1747 if (IS_ERR(regd)) {
1686 kfree(driver_request); 1748 reg_free_request(driver_request);
1687 return REG_REQ_IGNORE; 1749 return REG_REQ_IGNORE;
1688 } 1750 }
1689 rcu_assign_pointer(wiphy->regd, regd); 1751 rcu_assign_pointer(wiphy->regd, regd);
@@ -1775,12 +1837,13 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
1775 case REG_REQ_OK: 1837 case REG_REQ_OK:
1776 break; 1838 break;
1777 case REG_REQ_IGNORE: 1839 case REG_REQ_IGNORE:
1840 case REG_REQ_USER_HINT_HANDLED:
1778 /* fall through */ 1841 /* fall through */
1779 case REG_REQ_ALREADY_SET: 1842 case REG_REQ_ALREADY_SET:
1780 kfree(country_ie_request); 1843 reg_free_request(country_ie_request);
1781 return treatment; 1844 return treatment;
1782 case REG_REQ_INTERSECT: 1845 case REG_REQ_INTERSECT:
1783 kfree(country_ie_request); 1846 reg_free_request(country_ie_request);
1784 /* 1847 /*
1785 * This doesn't happen yet, not sure we 1848 * This doesn't happen yet, not sure we
1786 * ever want to support it for this case. 1849 * ever want to support it for this case.
@@ -1813,7 +1876,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1813 case NL80211_REGDOM_SET_BY_USER: 1876 case NL80211_REGDOM_SET_BY_USER:
1814 treatment = reg_process_hint_user(reg_request); 1877 treatment = reg_process_hint_user(reg_request);
1815 if (treatment == REG_REQ_IGNORE || 1878 if (treatment == REG_REQ_IGNORE ||
1816 treatment == REG_REQ_ALREADY_SET) 1879 treatment == REG_REQ_ALREADY_SET ||
1880 treatment == REG_REQ_USER_HINT_HANDLED)
1817 return; 1881 return;
1818 queue_delayed_work(system_power_efficient_wq, 1882 queue_delayed_work(system_power_efficient_wq,
1819 &reg_timeout, msecs_to_jiffies(3142)); 1883 &reg_timeout, msecs_to_jiffies(3142));
@@ -1841,7 +1905,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1841 return; 1905 return;
1842 1906
1843out_free: 1907out_free:
1844 kfree(reg_request); 1908 reg_free_request(reg_request);
1845} 1909}
1846 1910
1847/* 1911/*
@@ -1857,7 +1921,7 @@ static void reg_process_pending_hints(void)
1857 1921
1858 /* When last_request->processed becomes true this will be rescheduled */ 1922 /* When last_request->processed becomes true this will be rescheduled */
1859 if (lr && !lr->processed) { 1923 if (lr && !lr->processed) {
1860 REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n"); 1924 reg_process_hint(lr);
1861 return; 1925 return;
1862 } 1926 }
1863 1927
@@ -1967,6 +2031,22 @@ int regulatory_hint_user(const char *alpha2,
1967 return 0; 2031 return 0;
1968} 2032}
1969 2033
2034int regulatory_hint_indoor_user(void)
2035{
2036 struct regulatory_request *request;
2037
2038 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
2039 if (!request)
2040 return -ENOMEM;
2041
2042 request->wiphy_idx = WIPHY_IDX_INVALID;
2043 request->initiator = NL80211_REGDOM_SET_BY_USER;
2044 request->user_reg_hint_type = NL80211_USER_REG_HINT_INDOOR;
2045 queue_regulatory_request(request);
2046
2047 return 0;
2048}
2049
1970/* Driver hints */ 2050/* Driver hints */
1971int regulatory_hint(struct wiphy *wiphy, const char *alpha2) 2051int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
1972{ 2052{
@@ -2134,6 +2214,8 @@ static void restore_regulatory_settings(bool reset_user)
2134 2214
2135 ASSERT_RTNL(); 2215 ASSERT_RTNL();
2136 2216
2217 reg_is_indoor = false;
2218
2137 reset_regdomains(true, &world_regdom); 2219 reset_regdomains(true, &world_regdom);
2138 restore_alpha2(alpha2, reset_user); 2220 restore_alpha2(alpha2, reset_user);
2139 2221
@@ -2594,7 +2676,7 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
2594 reg_num_devs_support_basehint--; 2676 reg_num_devs_support_basehint--;
2595 2677
2596 rcu_free_regdom(get_wiphy_regdom(wiphy)); 2678 rcu_free_regdom(get_wiphy_regdom(wiphy));
2597 rcu_assign_pointer(wiphy->regd, NULL); 2679 RCU_INIT_POINTER(wiphy->regd, NULL);
2598 2680
2599 if (lr) 2681 if (lr)
2600 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); 2682 request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -2614,6 +2696,40 @@ static void reg_timeout_work(struct work_struct *work)
2614 rtnl_unlock(); 2696 rtnl_unlock();
2615} 2697}
2616 2698
2699/*
2700 * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
2701 * UNII band definitions
2702 */
2703int cfg80211_get_unii(int freq)
2704{
2705 /* UNII-1 */
2706 if (freq >= 5150 && freq <= 5250)
2707 return 0;
2708
2709 /* UNII-2A */
2710 if (freq > 5250 && freq <= 5350)
2711 return 1;
2712
2713 /* UNII-2B */
2714 if (freq > 5350 && freq <= 5470)
2715 return 2;
2716
2717 /* UNII-2C */
2718 if (freq > 5470 && freq <= 5725)
2719 return 3;
2720
2721 /* UNII-3 */
2722 if (freq > 5725 && freq <= 5825)
2723 return 4;
2724
2725 return -EINVAL;
2726}
2727
2728bool regulatory_indoor_allowed(void)
2729{
2730 return reg_is_indoor;
2731}
2732
2617int __init regulatory_init(void) 2733int __init regulatory_init(void)
2618{ 2734{
2619 int err = 0; 2735 int err = 0;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 37c180df34b7..5e48031ccb9a 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -25,6 +25,7 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
25 25
26int regulatory_hint_user(const char *alpha2, 26int regulatory_hint_user(const char *alpha2,
27 enum nl80211_user_reg_hint_type user_reg_hint_type); 27 enum nl80211_user_reg_hint_type user_reg_hint_type);
28int regulatory_hint_indoor_user(void);
28 29
29void wiphy_regulatory_register(struct wiphy *wiphy); 30void wiphy_regulatory_register(struct wiphy *wiphy);
30void wiphy_regulatory_deregister(struct wiphy *wiphy); 31void wiphy_regulatory_deregister(struct wiphy *wiphy);
@@ -104,4 +105,21 @@ void regulatory_hint_country_ie(struct wiphy *wiphy,
104 */ 105 */
105void regulatory_hint_disconnect(void); 106void regulatory_hint_disconnect(void);
106 107
108/**
109 * cfg80211_get_unii - get the U-NII band for the frequency
110 * @freq: the frequency for which we want to get the UNII band.
111
112 * Get a value specifying the U-NII band frequency belongs to.
113 * U-NII bands are defined by the FCC in C.F.R 47 part 15.
114 *
115 * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A,
116 * 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3.
117 */
118int cfg80211_get_unii(int freq);
119
120/**
121 * regulatory_indoor_allowed - is indoor operation allowed
122 */
123bool regulatory_indoor_allowed(void);
124
107#endif /* __NET_WIRELESS_REG_H */ 125#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 88f108edfb58..0798c62e6085 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -81,10 +81,10 @@ static void bss_free(struct cfg80211_internal_bss *bss)
81 kfree(bss); 81 kfree(bss);
82} 82}
83 83
84static inline void bss_ref_get(struct cfg80211_registered_device *dev, 84static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
85 struct cfg80211_internal_bss *bss) 85 struct cfg80211_internal_bss *bss)
86{ 86{
87 lockdep_assert_held(&dev->bss_lock); 87 lockdep_assert_held(&rdev->bss_lock);
88 88
89 bss->refcount++; 89 bss->refcount++;
90 if (bss->pub.hidden_beacon_bss) { 90 if (bss->pub.hidden_beacon_bss) {
@@ -95,10 +95,10 @@ static inline void bss_ref_get(struct cfg80211_registered_device *dev,
95 } 95 }
96} 96}
97 97
98static inline void bss_ref_put(struct cfg80211_registered_device *dev, 98static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
99 struct cfg80211_internal_bss *bss) 99 struct cfg80211_internal_bss *bss)
100{ 100{
101 lockdep_assert_held(&dev->bss_lock); 101 lockdep_assert_held(&rdev->bss_lock);
102 102
103 if (bss->pub.hidden_beacon_bss) { 103 if (bss->pub.hidden_beacon_bss) {
104 struct cfg80211_internal_bss *hbss; 104 struct cfg80211_internal_bss *hbss;
@@ -114,10 +114,10 @@ static inline void bss_ref_put(struct cfg80211_registered_device *dev,
114 bss_free(bss); 114 bss_free(bss);
115} 115}
116 116
117static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, 117static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
118 struct cfg80211_internal_bss *bss) 118 struct cfg80211_internal_bss *bss)
119{ 119{
120 lockdep_assert_held(&dev->bss_lock); 120 lockdep_assert_held(&rdev->bss_lock);
121 121
122 if (!list_empty(&bss->hidden_list)) { 122 if (!list_empty(&bss->hidden_list)) {
123 /* 123 /*
@@ -134,31 +134,31 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
134 } 134 }
135 135
136 list_del_init(&bss->list); 136 list_del_init(&bss->list);
137 rb_erase(&bss->rbn, &dev->bss_tree); 137 rb_erase(&bss->rbn, &rdev->bss_tree);
138 bss_ref_put(dev, bss); 138 bss_ref_put(rdev, bss);
139 return true; 139 return true;
140} 140}
141 141
142static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev, 142static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
143 unsigned long expire_time) 143 unsigned long expire_time)
144{ 144{
145 struct cfg80211_internal_bss *bss, *tmp; 145 struct cfg80211_internal_bss *bss, *tmp;
146 bool expired = false; 146 bool expired = false;
147 147
148 lockdep_assert_held(&dev->bss_lock); 148 lockdep_assert_held(&rdev->bss_lock);
149 149
150 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { 150 list_for_each_entry_safe(bss, tmp, &rdev->bss_list, list) {
151 if (atomic_read(&bss->hold)) 151 if (atomic_read(&bss->hold))
152 continue; 152 continue;
153 if (!time_after(expire_time, bss->ts)) 153 if (!time_after(expire_time, bss->ts))
154 continue; 154 continue;
155 155
156 if (__cfg80211_unlink_bss(dev, bss)) 156 if (__cfg80211_unlink_bss(rdev, bss))
157 expired = true; 157 expired = true;
158 } 158 }
159 159
160 if (expired) 160 if (expired)
161 dev->bss_generation++; 161 rdev->bss_generation++;
162} 162}
163 163
164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, 164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
@@ -238,11 +238,11 @@ void __cfg80211_scan_done(struct work_struct *wk)
238void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) 238void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
239{ 239{
240 trace_cfg80211_scan_done(request, aborted); 240 trace_cfg80211_scan_done(request, aborted);
241 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); 241 WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req);
242 242
243 request->aborted = aborted; 243 request->aborted = aborted;
244 request->notified = true; 244 request->notified = true;
245 queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk); 245 queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
246} 246}
247EXPORT_SYMBOL(cfg80211_scan_done); 247EXPORT_SYMBOL(cfg80211_scan_done);
248 248
@@ -278,15 +278,15 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
278{ 278{
279 trace_cfg80211_sched_scan_results(wiphy); 279 trace_cfg80211_sched_scan_results(wiphy);
280 /* ignore if we're not scanning */ 280 /* ignore if we're not scanning */
281 if (wiphy_to_dev(wiphy)->sched_scan_req) 281 if (wiphy_to_rdev(wiphy)->sched_scan_req)
282 queue_work(cfg80211_wq, 282 queue_work(cfg80211_wq,
283 &wiphy_to_dev(wiphy)->sched_scan_results_wk); 283 &wiphy_to_rdev(wiphy)->sched_scan_results_wk);
284} 284}
285EXPORT_SYMBOL(cfg80211_sched_scan_results); 285EXPORT_SYMBOL(cfg80211_sched_scan_results);
286 286
287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy) 287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
288{ 288{
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
290 290
291 ASSERT_RTNL(); 291 ASSERT_RTNL();
292 292
@@ -330,21 +330,21 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
330 return 0; 330 return 0;
331} 331}
332 332
333void cfg80211_bss_age(struct cfg80211_registered_device *dev, 333void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
334 unsigned long age_secs) 334 unsigned long age_secs)
335{ 335{
336 struct cfg80211_internal_bss *bss; 336 struct cfg80211_internal_bss *bss;
337 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); 337 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
338 338
339 spin_lock_bh(&dev->bss_lock); 339 spin_lock_bh(&rdev->bss_lock);
340 list_for_each_entry(bss, &dev->bss_list, list) 340 list_for_each_entry(bss, &rdev->bss_list, list)
341 bss->ts -= age_jiffies; 341 bss->ts -= age_jiffies;
342 spin_unlock_bh(&dev->bss_lock); 342 spin_unlock_bh(&rdev->bss_lock);
343} 343}
344 344
345void cfg80211_bss_expire(struct cfg80211_registered_device *dev) 345void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
346{ 346{
347 __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); 347 __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
348} 348}
349 349
350const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) 350const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
@@ -534,32 +534,34 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
534 const u8 *ssid, size_t ssid_len, 534 const u8 *ssid, size_t ssid_len,
535 u16 capa_mask, u16 capa_val) 535 u16 capa_mask, u16 capa_val)
536{ 536{
537 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 537 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
538 struct cfg80211_internal_bss *bss, *res = NULL; 538 struct cfg80211_internal_bss *bss, *res = NULL;
539 unsigned long now = jiffies; 539 unsigned long now = jiffies;
540 540
541 trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask, 541 trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
542 capa_val); 542 capa_val);
543 543
544 spin_lock_bh(&dev->bss_lock); 544 spin_lock_bh(&rdev->bss_lock);
545 545
546 list_for_each_entry(bss, &dev->bss_list, list) { 546 list_for_each_entry(bss, &rdev->bss_list, list) {
547 if ((bss->pub.capability & capa_mask) != capa_val) 547 if ((bss->pub.capability & capa_mask) != capa_val)
548 continue; 548 continue;
549 if (channel && bss->pub.channel != channel) 549 if (channel && bss->pub.channel != channel)
550 continue; 550 continue;
551 if (!is_valid_ether_addr(bss->pub.bssid))
552 continue;
551 /* Don't get expired BSS structs */ 553 /* Don't get expired BSS structs */
552 if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) && 554 if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
553 !atomic_read(&bss->hold)) 555 !atomic_read(&bss->hold))
554 continue; 556 continue;
555 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { 557 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
556 res = bss; 558 res = bss;
557 bss_ref_get(dev, res); 559 bss_ref_get(rdev, res);
558 break; 560 break;
559 } 561 }
560 } 562 }
561 563
562 spin_unlock_bh(&dev->bss_lock); 564 spin_unlock_bh(&rdev->bss_lock);
563 if (!res) 565 if (!res)
564 return NULL; 566 return NULL;
565 trace_cfg80211_return_bss(&res->pub); 567 trace_cfg80211_return_bss(&res->pub);
@@ -567,10 +569,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
567} 569}
568EXPORT_SYMBOL(cfg80211_get_bss); 570EXPORT_SYMBOL(cfg80211_get_bss);
569 571
570static void rb_insert_bss(struct cfg80211_registered_device *dev, 572static void rb_insert_bss(struct cfg80211_registered_device *rdev,
571 struct cfg80211_internal_bss *bss) 573 struct cfg80211_internal_bss *bss)
572{ 574{
573 struct rb_node **p = &dev->bss_tree.rb_node; 575 struct rb_node **p = &rdev->bss_tree.rb_node;
574 struct rb_node *parent = NULL; 576 struct rb_node *parent = NULL;
575 struct cfg80211_internal_bss *tbss; 577 struct cfg80211_internal_bss *tbss;
576 int cmp; 578 int cmp;
@@ -593,15 +595,15 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
593 } 595 }
594 596
595 rb_link_node(&bss->rbn, parent, p); 597 rb_link_node(&bss->rbn, parent, p);
596 rb_insert_color(&bss->rbn, &dev->bss_tree); 598 rb_insert_color(&bss->rbn, &rdev->bss_tree);
597} 599}
598 600
599static struct cfg80211_internal_bss * 601static struct cfg80211_internal_bss *
600rb_find_bss(struct cfg80211_registered_device *dev, 602rb_find_bss(struct cfg80211_registered_device *rdev,
601 struct cfg80211_internal_bss *res, 603 struct cfg80211_internal_bss *res,
602 enum bss_compare_mode mode) 604 enum bss_compare_mode mode)
603{ 605{
604 struct rb_node *n = dev->bss_tree.rb_node; 606 struct rb_node *n = rdev->bss_tree.rb_node;
605 struct cfg80211_internal_bss *bss; 607 struct cfg80211_internal_bss *bss;
606 int r; 608 int r;
607 609
@@ -620,7 +622,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
620 return NULL; 622 return NULL;
621} 623}
622 624
623static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, 625static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
624 struct cfg80211_internal_bss *new) 626 struct cfg80211_internal_bss *new)
625{ 627{
626 const struct cfg80211_bss_ies *ies; 628 const struct cfg80211_bss_ies *ies;
@@ -650,7 +652,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
650 652
651 /* This is the bad part ... */ 653 /* This is the bad part ... */
652 654
653 list_for_each_entry(bss, &dev->bss_list, list) { 655 list_for_each_entry(bss, &rdev->bss_list, list) {
654 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) 656 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
655 continue; 657 continue;
656 if (bss->pub.channel != new->pub.channel) 658 if (bss->pub.channel != new->pub.channel)
@@ -684,7 +686,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
684 686
685/* Returned bss is reference counted and must be cleaned up appropriately. */ 687/* Returned bss is reference counted and must be cleaned up appropriately. */
686static struct cfg80211_internal_bss * 688static struct cfg80211_internal_bss *
687cfg80211_bss_update(struct cfg80211_registered_device *dev, 689cfg80211_bss_update(struct cfg80211_registered_device *rdev,
688 struct cfg80211_internal_bss *tmp, 690 struct cfg80211_internal_bss *tmp,
689 bool signal_valid) 691 bool signal_valid)
690{ 692{
@@ -695,14 +697,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
695 697
696 tmp->ts = jiffies; 698 tmp->ts = jiffies;
697 699
698 spin_lock_bh(&dev->bss_lock); 700 spin_lock_bh(&rdev->bss_lock);
699 701
700 if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) { 702 if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) {
701 spin_unlock_bh(&dev->bss_lock); 703 spin_unlock_bh(&rdev->bss_lock);
702 return NULL; 704 return NULL;
703 } 705 }
704 706
705 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); 707 found = rb_find_bss(rdev, tmp, BSS_CMP_REGULAR);
706 708
707 if (found) { 709 if (found) {
708 /* Update IEs */ 710 /* Update IEs */
@@ -789,7 +791,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
789 * is allocated on the stack since it's not needed in the 791 * is allocated on the stack since it's not needed in the
790 * more common case of an update 792 * more common case of an update
791 */ 793 */
792 new = kzalloc(sizeof(*new) + dev->wiphy.bss_priv_size, 794 new = kzalloc(sizeof(*new) + rdev->wiphy.bss_priv_size,
793 GFP_ATOMIC); 795 GFP_ATOMIC);
794 if (!new) { 796 if (!new) {
795 ies = (void *)rcu_dereference(tmp->pub.beacon_ies); 797 ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
@@ -805,9 +807,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
805 INIT_LIST_HEAD(&new->hidden_list); 807 INIT_LIST_HEAD(&new->hidden_list);
806 808
807 if (rcu_access_pointer(tmp->pub.proberesp_ies)) { 809 if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
808 hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN); 810 hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
809 if (!hidden) 811 if (!hidden)
810 hidden = rb_find_bss(dev, tmp, 812 hidden = rb_find_bss(rdev, tmp,
811 BSS_CMP_HIDE_NUL); 813 BSS_CMP_HIDE_NUL);
812 if (hidden) { 814 if (hidden) {
813 new->pub.hidden_beacon_bss = &hidden->pub; 815 new->pub.hidden_beacon_bss = &hidden->pub;
@@ -824,24 +826,24 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
824 * expensive search for any probe responses that should 826 * expensive search for any probe responses that should
825 * be grouped with this beacon for updates ... 827 * be grouped with this beacon for updates ...
826 */ 828 */
827 if (!cfg80211_combine_bsses(dev, new)) { 829 if (!cfg80211_combine_bsses(rdev, new)) {
828 kfree(new); 830 kfree(new);
829 goto drop; 831 goto drop;
830 } 832 }
831 } 833 }
832 834
833 list_add_tail(&new->list, &dev->bss_list); 835 list_add_tail(&new->list, &rdev->bss_list);
834 rb_insert_bss(dev, new); 836 rb_insert_bss(rdev, new);
835 found = new; 837 found = new;
836 } 838 }
837 839
838 dev->bss_generation++; 840 rdev->bss_generation++;
839 bss_ref_get(dev, found); 841 bss_ref_get(rdev, found);
840 spin_unlock_bh(&dev->bss_lock); 842 spin_unlock_bh(&rdev->bss_lock);
841 843
842 return found; 844 return found;
843 drop: 845 drop:
844 spin_unlock_bh(&dev->bss_lock); 846 spin_unlock_bh(&rdev->bss_lock);
845 return NULL; 847 return NULL;
846} 848}
847 849
@@ -889,6 +891,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
889 struct cfg80211_bss_ies *ies; 891 struct cfg80211_bss_ies *ies;
890 struct ieee80211_channel *channel; 892 struct ieee80211_channel *channel;
891 struct cfg80211_internal_bss tmp = {}, *res; 893 struct cfg80211_internal_bss tmp = {}, *res;
894 bool signal_valid;
892 895
893 if (WARN_ON(!wiphy)) 896 if (WARN_ON(!wiphy))
894 return NULL; 897 return NULL;
@@ -925,8 +928,9 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
925 rcu_assign_pointer(tmp.pub.beacon_ies, ies); 928 rcu_assign_pointer(tmp.pub.beacon_ies, ies);
926 rcu_assign_pointer(tmp.pub.ies, ies); 929 rcu_assign_pointer(tmp.pub.ies, ies);
927 930
928 res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp, 931 signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
929 rx_channel == channel); 932 wiphy->max_adj_channel_rssi_comp;
933 res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
930 if (!res) 934 if (!res)
931 return NULL; 935 return NULL;
932 936
@@ -950,6 +954,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
950 struct cfg80211_internal_bss tmp = {}, *res; 954 struct cfg80211_internal_bss tmp = {}, *res;
951 struct cfg80211_bss_ies *ies; 955 struct cfg80211_bss_ies *ies;
952 struct ieee80211_channel *channel; 956 struct ieee80211_channel *channel;
957 bool signal_valid;
953 size_t ielen = len - offsetof(struct ieee80211_mgmt, 958 size_t ielen = len - offsetof(struct ieee80211_mgmt,
954 u.probe_resp.variable); 959 u.probe_resp.variable);
955 960
@@ -997,8 +1002,9 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
997 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 1002 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
998 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 1003 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
999 1004
1000 res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp, 1005 signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
1001 rx_channel == channel); 1006 wiphy->max_adj_channel_rssi_comp;
1007 res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
1002 if (!res) 1008 if (!res)
1003 return NULL; 1009 return NULL;
1004 1010
@@ -1013,7 +1019,7 @@ EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
1013 1019
1014void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) 1020void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1015{ 1021{
1016 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 1022 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1017 struct cfg80211_internal_bss *bss; 1023 struct cfg80211_internal_bss *bss;
1018 1024
1019 if (!pub) 1025 if (!pub)
@@ -1021,15 +1027,15 @@ void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1021 1027
1022 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1028 bss = container_of(pub, struct cfg80211_internal_bss, pub);
1023 1029
1024 spin_lock_bh(&dev->bss_lock); 1030 spin_lock_bh(&rdev->bss_lock);
1025 bss_ref_get(dev, bss); 1031 bss_ref_get(rdev, bss);
1026 spin_unlock_bh(&dev->bss_lock); 1032 spin_unlock_bh(&rdev->bss_lock);
1027} 1033}
1028EXPORT_SYMBOL(cfg80211_ref_bss); 1034EXPORT_SYMBOL(cfg80211_ref_bss);
1029 1035
1030void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) 1036void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1031{ 1037{
1032 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 1038 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1033 struct cfg80211_internal_bss *bss; 1039 struct cfg80211_internal_bss *bss;
1034 1040
1035 if (!pub) 1041 if (!pub)
@@ -1037,15 +1043,15 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1037 1043
1038 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1044 bss = container_of(pub, struct cfg80211_internal_bss, pub);
1039 1045
1040 spin_lock_bh(&dev->bss_lock); 1046 spin_lock_bh(&rdev->bss_lock);
1041 bss_ref_put(dev, bss); 1047 bss_ref_put(rdev, bss);
1042 spin_unlock_bh(&dev->bss_lock); 1048 spin_unlock_bh(&rdev->bss_lock);
1043} 1049}
1044EXPORT_SYMBOL(cfg80211_put_bss); 1050EXPORT_SYMBOL(cfg80211_put_bss);
1045 1051
1046void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) 1052void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1047{ 1053{
1048 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 1054 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1049 struct cfg80211_internal_bss *bss; 1055 struct cfg80211_internal_bss *bss;
1050 1056
1051 if (WARN_ON(!pub)) 1057 if (WARN_ON(!pub))
@@ -1053,12 +1059,12 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
1053 1059
1054 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1060 bss = container_of(pub, struct cfg80211_internal_bss, pub);
1055 1061
1056 spin_lock_bh(&dev->bss_lock); 1062 spin_lock_bh(&rdev->bss_lock);
1057 if (!list_empty(&bss->list)) { 1063 if (!list_empty(&bss->list)) {
1058 if (__cfg80211_unlink_bss(dev, bss)) 1064 if (__cfg80211_unlink_bss(rdev, bss))
1059 dev->bss_generation++; 1065 rdev->bss_generation++;
1060 } 1066 }
1061 spin_unlock_bh(&dev->bss_lock); 1067 spin_unlock_bh(&rdev->bss_lock);
1062} 1068}
1063EXPORT_SYMBOL(cfg80211_unlink_bss); 1069EXPORT_SYMBOL(cfg80211_unlink_bss);
1064 1070
@@ -1075,7 +1081,7 @@ cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
1075 if (!dev) 1081 if (!dev)
1076 return ERR_PTR(-ENODEV); 1082 return ERR_PTR(-ENODEV);
1077 if (dev->ieee80211_ptr) 1083 if (dev->ieee80211_ptr)
1078 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); 1084 rdev = wiphy_to_rdev(dev->ieee80211_ptr->wiphy);
1079 else 1085 else
1080 rdev = ERR_PTR(-ENODEV); 1086 rdev = ERR_PTR(-ENODEV);
1081 dev_put(dev); 1087 dev_put(dev);
@@ -1155,7 +1161,11 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1155 int k; 1161 int k;
1156 int wiphy_freq = wiphy->bands[band]->channels[j].center_freq; 1162 int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
1157 for (k = 0; k < wreq->num_channels; k++) { 1163 for (k = 0; k < wreq->num_channels; k++) {
1158 int wext_freq = cfg80211_wext_freq(wiphy, &wreq->channel_list[k]); 1164 struct iw_freq *freq =
1165 &wreq->channel_list[k];
1166 int wext_freq =
1167 cfg80211_wext_freq(freq);
1168
1159 if (wext_freq == wiphy_freq) 1169 if (wext_freq == wiphy_freq)
1160 goto wext_freq_found; 1170 goto wext_freq_found;
1161 } 1171 }
@@ -1467,7 +1477,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1467} 1477}
1468 1478
1469 1479
1470static int ieee80211_scan_results(struct cfg80211_registered_device *dev, 1480static int ieee80211_scan_results(struct cfg80211_registered_device *rdev,
1471 struct iw_request_info *info, 1481 struct iw_request_info *info,
1472 char *buf, size_t len) 1482 char *buf, size_t len)
1473{ 1483{
@@ -1475,18 +1485,18 @@ static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
1475 char *end_buf = buf + len; 1485 char *end_buf = buf + len;
1476 struct cfg80211_internal_bss *bss; 1486 struct cfg80211_internal_bss *bss;
1477 1487
1478 spin_lock_bh(&dev->bss_lock); 1488 spin_lock_bh(&rdev->bss_lock);
1479 cfg80211_bss_expire(dev); 1489 cfg80211_bss_expire(rdev);
1480 1490
1481 list_for_each_entry(bss, &dev->bss_list, list) { 1491 list_for_each_entry(bss, &rdev->bss_list, list) {
1482 if (buf + len - current_ev <= IW_EV_ADDR_LEN) { 1492 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
1483 spin_unlock_bh(&dev->bss_lock); 1493 spin_unlock_bh(&rdev->bss_lock);
1484 return -E2BIG; 1494 return -E2BIG;
1485 } 1495 }
1486 current_ev = ieee80211_bss(&dev->wiphy, info, bss, 1496 current_ev = ieee80211_bss(&rdev->wiphy, info, bss,
1487 current_ev, end_buf); 1497 current_ev, end_buf);
1488 } 1498 }
1489 spin_unlock_bh(&dev->bss_lock); 1499 spin_unlock_bh(&rdev->bss_lock);
1490 return current_ev - buf; 1500 return current_ev - buf;
1491} 1501}
1492 1502
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 3546a77033de..8bbeeb302216 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -59,7 +59,7 @@ static void cfg80211_sme_free(struct wireless_dev *wdev)
59 59
60static int cfg80211_conn_scan(struct wireless_dev *wdev) 60static int cfg80211_conn_scan(struct wireless_dev *wdev)
61{ 61{
62 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 62 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
63 struct cfg80211_scan_request *request; 63 struct cfg80211_scan_request *request;
64 int n_channels, err; 64 int n_channels, err;
65 65
@@ -130,7 +130,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
130 130
131static int cfg80211_conn_do_work(struct wireless_dev *wdev) 131static int cfg80211_conn_do_work(struct wireless_dev *wdev)
132{ 132{
133 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 133 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
134 struct cfg80211_connect_params *params; 134 struct cfg80211_connect_params *params;
135 struct cfg80211_assoc_request req = {}; 135 struct cfg80211_assoc_request req = {};
136 int err; 136 int err;
@@ -149,7 +149,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
149 case CFG80211_CONN_SCAN_AGAIN: 149 case CFG80211_CONN_SCAN_AGAIN:
150 return cfg80211_conn_scan(wdev); 150 return cfg80211_conn_scan(wdev);
151 case CFG80211_CONN_AUTHENTICATE_NEXT: 151 case CFG80211_CONN_AUTHENTICATE_NEXT:
152 BUG_ON(!rdev->ops->auth); 152 if (WARN_ON(!rdev->ops->auth))
153 return -EOPNOTSUPP;
153 wdev->conn->state = CFG80211_CONN_AUTHENTICATING; 154 wdev->conn->state = CFG80211_CONN_AUTHENTICATING;
154 return cfg80211_mlme_auth(rdev, wdev->netdev, 155 return cfg80211_mlme_auth(rdev, wdev->netdev,
155 params->channel, params->auth_type, 156 params->channel, params->auth_type,
@@ -161,7 +162,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
161 case CFG80211_CONN_AUTH_FAILED: 162 case CFG80211_CONN_AUTH_FAILED:
162 return -ENOTCONN; 163 return -ENOTCONN;
163 case CFG80211_CONN_ASSOCIATE_NEXT: 164 case CFG80211_CONN_ASSOCIATE_NEXT:
164 BUG_ON(!rdev->ops->assoc); 165 if (WARN_ON(!rdev->ops->assoc))
166 return -EOPNOTSUPP;
165 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 167 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
166 if (wdev->conn->prev_bssid_valid) 168 if (wdev->conn->prev_bssid_valid)
167 req.prev_bssid = wdev->conn->prev_bssid; 169 req.prev_bssid = wdev->conn->prev_bssid;
@@ -244,7 +246,7 @@ void cfg80211_conn_work(struct work_struct *work)
244/* Returned bss is reference counted and must be cleaned up appropriately. */ 246/* Returned bss is reference counted and must be cleaned up appropriately. */
245static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) 247static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
246{ 248{
247 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 249 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
248 struct cfg80211_bss *bss; 250 struct cfg80211_bss *bss;
249 u16 capa = WLAN_CAPABILITY_ESS; 251 u16 capa = WLAN_CAPABILITY_ESS;
250 252
@@ -274,7 +276,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
274static void __cfg80211_sme_scan_done(struct net_device *dev) 276static void __cfg80211_sme_scan_done(struct net_device *dev)
275{ 277{
276 struct wireless_dev *wdev = dev->ieee80211_ptr; 278 struct wireless_dev *wdev = dev->ieee80211_ptr;
277 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 279 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
278 struct cfg80211_bss *bss; 280 struct cfg80211_bss *bss;
279 281
280 ASSERT_WDEV_LOCK(wdev); 282 ASSERT_WDEV_LOCK(wdev);
@@ -305,7 +307,7 @@ void cfg80211_sme_scan_done(struct net_device *dev)
305void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) 307void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
306{ 308{
307 struct wiphy *wiphy = wdev->wiphy; 309 struct wiphy *wiphy = wdev->wiphy;
308 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 310 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
309 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 311 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
310 u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); 312 u16 status_code = le16_to_cpu(mgmt->u.auth.status_code);
311 313
@@ -351,7 +353,7 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
351 353
352bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) 354bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
353{ 355{
354 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 356 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
355 357
356 if (!wdev->conn) 358 if (!wdev->conn)
357 return false; 359 return false;
@@ -385,7 +387,7 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
385 387
386void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) 388void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
387{ 389{
388 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 390 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
389 391
390 if (!wdev->conn) 392 if (!wdev->conn)
391 return; 393 return;
@@ -396,7 +398,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
396 398
397void cfg80211_sme_disassoc(struct wireless_dev *wdev) 399void cfg80211_sme_disassoc(struct wireless_dev *wdev)
398{ 400{
399 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 401 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
400 402
401 if (!wdev->conn) 403 if (!wdev->conn)
402 return; 404 return;
@@ -407,7 +409,7 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
407 409
408void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) 410void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
409{ 411{
410 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 412 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
411 413
412 if (!wdev->conn) 414 if (!wdev->conn)
413 return; 415 return;
@@ -420,7 +422,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
420 struct cfg80211_connect_params *connect, 422 struct cfg80211_connect_params *connect,
421 const u8 *prev_bssid) 423 const u8 *prev_bssid)
422{ 424{
423 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 425 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
424 struct cfg80211_bss *bss; 426 struct cfg80211_bss *bss;
425 int err; 427 int err;
426 428
@@ -467,7 +469,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
467 } 469 }
468 470
469 wdev->conn->params.ssid = wdev->ssid; 471 wdev->conn->params.ssid = wdev->ssid;
470 wdev->conn->params.ssid_len = connect->ssid_len; 472 wdev->conn->params.ssid_len = wdev->ssid_len;
471 473
472 /* see if we have the bss already */ 474 /* see if we have the bss already */
473 bss = cfg80211_get_conn_bss(wdev); 475 bss = cfg80211_get_conn_bss(wdev);
@@ -479,7 +481,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
479 481
480 /* we're good if we have a matching bss struct */ 482 /* we're good if we have a matching bss struct */
481 if (bss) { 483 if (bss) {
482 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
483 err = cfg80211_conn_do_work(wdev); 484 err = cfg80211_conn_do_work(wdev);
484 cfg80211_put_bss(wdev->wiphy, bss); 485 cfg80211_put_bss(wdev->wiphy, bss);
485 } else { 486 } else {
@@ -505,7 +506,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
505 506
506static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) 507static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason)
507{ 508{
508 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 509 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
509 int err; 510 int err;
510 511
511 if (!wdev->conn) 512 if (!wdev->conn)
@@ -593,7 +594,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
593 return; 594 return;
594 } 595 }
595 596
596 nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, 597 nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
597 bssid, req_ie, req_ie_len, 598 bssid, req_ie, req_ie_len,
598 resp_ie, resp_ie_len, 599 resp_ie, resp_ie_len,
599 status, GFP_KERNEL); 600 status, GFP_KERNEL);
@@ -624,7 +625,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
624#endif 625#endif
625 626
626 if (!bss && (status == WLAN_STATUS_SUCCESS)) { 627 if (!bss && (status == WLAN_STATUS_SUCCESS)) {
627 WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); 628 WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
628 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 629 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
629 wdev->ssid, wdev->ssid_len, 630 wdev->ssid, wdev->ssid_len,
630 WLAN_CAPABILITY_ESS, 631 WLAN_CAPABILITY_ESS,
@@ -687,7 +688,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
687 u16 status, gfp_t gfp) 688 u16 status, gfp_t gfp)
688{ 689{
689 struct wireless_dev *wdev = dev->ieee80211_ptr; 690 struct wireless_dev *wdev = dev->ieee80211_ptr;
690 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 691 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
691 struct cfg80211_event *ev; 692 struct cfg80211_event *ev;
692 unsigned long flags; 693 unsigned long flags;
693 694
@@ -742,7 +743,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
742 cfg80211_hold_bss(bss_from_pub(bss)); 743 cfg80211_hold_bss(bss_from_pub(bss));
743 wdev->current_bss = bss_from_pub(bss); 744 wdev->current_bss = bss_from_pub(bss);
744 745
745 nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid, 746 nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy),
747 wdev->netdev, bss->bssid,
746 req_ie, req_ie_len, resp_ie, resp_ie_len, 748 req_ie, req_ie_len, resp_ie, resp_ie_len,
747 GFP_KERNEL); 749 GFP_KERNEL);
748 750
@@ -801,7 +803,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
801 size_t resp_ie_len, gfp_t gfp) 803 size_t resp_ie_len, gfp_t gfp)
802{ 804{
803 struct wireless_dev *wdev = dev->ieee80211_ptr; 805 struct wireless_dev *wdev = dev->ieee80211_ptr;
804 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 806 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
805 struct cfg80211_event *ev; 807 struct cfg80211_event *ev;
806 unsigned long flags; 808 unsigned long flags;
807 809
@@ -834,7 +836,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
834 size_t ie_len, u16 reason, bool from_ap) 836 size_t ie_len, u16 reason, bool from_ap)
835{ 837{
836 struct wireless_dev *wdev = dev->ieee80211_ptr; 838 struct wireless_dev *wdev = dev->ieee80211_ptr;
837 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 839 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
838 int i; 840 int i;
839#ifdef CONFIG_CFG80211_WEXT 841#ifdef CONFIG_CFG80211_WEXT
840 union iwreq_data wrqu; 842 union iwreq_data wrqu;
@@ -877,10 +879,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
877} 879}
878 880
879void cfg80211_disconnected(struct net_device *dev, u16 reason, 881void cfg80211_disconnected(struct net_device *dev, u16 reason,
880 u8 *ie, size_t ie_len, gfp_t gfp) 882 const u8 *ie, size_t ie_len, gfp_t gfp)
881{ 883{
882 struct wireless_dev *wdev = dev->ieee80211_ptr; 884 struct wireless_dev *wdev = dev->ieee80211_ptr;
883 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 885 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
884 struct cfg80211_event *ev; 886 struct cfg80211_event *ev;
885 unsigned long flags; 887 unsigned long flags;
886 888
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index aabccf13e07b..560ed77084e9 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1876,29 +1876,33 @@ TRACE_EVENT(rdev_channel_switch,
1876 WIPHY_ENTRY 1876 WIPHY_ENTRY
1877 NETDEV_ENTRY 1877 NETDEV_ENTRY
1878 CHAN_DEF_ENTRY 1878 CHAN_DEF_ENTRY
1879 __field(u16, counter_offset_beacon)
1880 __field(u16, counter_offset_presp)
1881 __field(bool, radar_required) 1879 __field(bool, radar_required)
1882 __field(bool, block_tx) 1880 __field(bool, block_tx)
1883 __field(u8, count) 1881 __field(u8, count)
1882 __dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
1883 __dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
1884 ), 1884 ),
1885 TP_fast_assign( 1885 TP_fast_assign(
1886 WIPHY_ASSIGN; 1886 WIPHY_ASSIGN;
1887 NETDEV_ASSIGN; 1887 NETDEV_ASSIGN;
1888 CHAN_DEF_ASSIGN(&params->chandef); 1888 CHAN_DEF_ASSIGN(&params->chandef);
1889 __entry->counter_offset_beacon = params->counter_offset_beacon;
1890 __entry->counter_offset_presp = params->counter_offset_presp;
1891 __entry->radar_required = params->radar_required; 1889 __entry->radar_required = params->radar_required;
1892 __entry->block_tx = params->block_tx; 1890 __entry->block_tx = params->block_tx;
1893 __entry->count = params->count; 1891 __entry->count = params->count;
1892 memcpy(__get_dynamic_array(bcn_ofs),
1893 params->counter_offsets_beacon,
1894 params->n_counter_offsets_beacon * sizeof(u16));
1895
1896 /* probe response offsets are optional */
1897 if (params->n_counter_offsets_presp)
1898 memcpy(__get_dynamic_array(pres_ofs),
1899 params->counter_offsets_presp,
1900 params->n_counter_offsets_presp * sizeof(u16));
1894 ), 1901 ),
1895 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT 1902 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
1896 ", block_tx: %d, count: %u, radar_required: %d" 1903 ", block_tx: %d, count: %u, radar_required: %d",
1897 ", counter offsets (beacon/presp): %u/%u",
1898 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, 1904 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
1899 __entry->block_tx, __entry->count, __entry->radar_required, 1905 __entry->block_tx, __entry->count, __entry->radar_required)
1900 __entry->counter_offset_beacon,
1901 __entry->counter_offset_presp)
1902); 1906);
1903 1907
1904TRACE_EVENT(rdev_set_qos_map, 1908TRACE_EVENT(rdev_set_qos_map,
@@ -1919,6 +1923,24 @@ TRACE_EVENT(rdev_set_qos_map,
1919 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des) 1923 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des)
1920); 1924);
1921 1925
1926TRACE_EVENT(rdev_set_ap_chanwidth,
1927 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1928 struct cfg80211_chan_def *chandef),
1929 TP_ARGS(wiphy, netdev, chandef),
1930 TP_STRUCT__entry(
1931 WIPHY_ENTRY
1932 NETDEV_ENTRY
1933 CHAN_DEF_ENTRY
1934 ),
1935 TP_fast_assign(
1936 WIPHY_ASSIGN;
1937 NETDEV_ASSIGN;
1938 CHAN_DEF_ASSIGN(chandef);
1939 ),
1940 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
1941 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
1942);
1943
1922/************************************************************* 1944/*************************************************************
1923 * cfg80211 exported functions traces * 1945 * cfg80211 exported functions traces *
1924 *************************************************************/ 1946 *************************************************************/
@@ -2193,18 +2215,21 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2193); 2215);
2194 2216
2195TRACE_EVENT(cfg80211_reg_can_beacon, 2217TRACE_EVENT(cfg80211_reg_can_beacon,
2196 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), 2218 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2197 TP_ARGS(wiphy, chandef), 2219 enum nl80211_iftype iftype),
2220 TP_ARGS(wiphy, chandef, iftype),
2198 TP_STRUCT__entry( 2221 TP_STRUCT__entry(
2199 WIPHY_ENTRY 2222 WIPHY_ENTRY
2200 CHAN_DEF_ENTRY 2223 CHAN_DEF_ENTRY
2224 __field(enum nl80211_iftype, iftype)
2201 ), 2225 ),
2202 TP_fast_assign( 2226 TP_fast_assign(
2203 WIPHY_ASSIGN; 2227 WIPHY_ASSIGN;
2204 CHAN_DEF_ASSIGN(chandef); 2228 CHAN_DEF_ASSIGN(chandef);
2229 __entry->iftype = iftype;
2205 ), 2230 ),
2206 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, 2231 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d",
2207 WIPHY_PR_ARG, CHAN_DEF_PR_ARG) 2232 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype)
2208); 2233);
2209 2234
2210TRACE_EVENT(cfg80211_chandef_dfs_required, 2235TRACE_EVENT(cfg80211_chandef_dfs_required,
@@ -2615,6 +2640,21 @@ TRACE_EVENT(cfg80211_ft_event,
2615 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap)) 2640 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
2616); 2641);
2617 2642
2643TRACE_EVENT(cfg80211_stop_iface,
2644 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
2645 TP_ARGS(wiphy, wdev),
2646 TP_STRUCT__entry(
2647 WIPHY_ENTRY
2648 WDEV_ENTRY
2649 ),
2650 TP_fast_assign(
2651 WIPHY_ASSIGN;
2652 WDEV_ASSIGN;
2653 ),
2654 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT,
2655 WIPHY_PR_ARG, WDEV_PR_ARG)
2656);
2657
2618#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 2658#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2619 2659
2620#undef TRACE_INCLUDE_PATH 2660#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e5872ff2c27c..728f1c0dc70d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -476,7 +476,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
476EXPORT_SYMBOL(ieee80211_data_to_8023); 476EXPORT_SYMBOL(ieee80211_data_to_8023);
477 477
478int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, 478int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
479 enum nl80211_iftype iftype, u8 *bssid, bool qos) 479 enum nl80211_iftype iftype,
480 const u8 *bssid, bool qos)
480{ 481{
481 struct ieee80211_hdr hdr; 482 struct ieee80211_hdr hdr;
482 u16 hdrlen, ethertype; 483 u16 hdrlen, ethertype;
@@ -770,7 +771,7 @@ EXPORT_SYMBOL(ieee80211_bss_get_ie);
770 771
771void cfg80211_upload_connect_keys(struct wireless_dev *wdev) 772void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
772{ 773{
773 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 774 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
774 struct net_device *dev = wdev->netdev; 775 struct net_device *dev = wdev->netdev;
775 int i; 776 int i;
776 777
@@ -839,6 +840,9 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
839 __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid, 840 __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
840 ev->ij.channel); 841 ev->ij.channel);
841 break; 842 break;
843 case EVENT_STOPPED:
844 __cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev);
845 break;
842 } 846 }
843 wdev_unlock(wdev); 847 wdev_unlock(wdev);
844 848
@@ -888,11 +892,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
888 return -EBUSY; 892 return -EBUSY;
889 893
890 if (ntype != otype && netif_running(dev)) { 894 if (ntype != otype && netif_running(dev)) {
891 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
892 ntype);
893 if (err)
894 return err;
895
896 dev->ieee80211_ptr->use_4addr = false; 895 dev->ieee80211_ptr->use_4addr = false;
897 dev->ieee80211_ptr->mesh_id_up_len = 0; 896 dev->ieee80211_ptr->mesh_id_up_len = 0;
898 wdev_lock(dev->ieee80211_ptr); 897 wdev_lock(dev->ieee80211_ptr);
@@ -1268,6 +1267,120 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
1268 return res; 1267 return res;
1269} 1268}
1270 1269
1270int cfg80211_iter_combinations(struct wiphy *wiphy,
1271 const int num_different_channels,
1272 const u8 radar_detect,
1273 const int iftype_num[NUM_NL80211_IFTYPES],
1274 void (*iter)(const struct ieee80211_iface_combination *c,
1275 void *data),
1276 void *data)
1277{
1278 const struct ieee80211_regdomain *regdom;
1279 enum nl80211_dfs_regions region = 0;
1280 int i, j, iftype;
1281 int num_interfaces = 0;
1282 u32 used_iftypes = 0;
1283
1284 if (radar_detect) {
1285 rcu_read_lock();
1286 regdom = rcu_dereference(cfg80211_regdomain);
1287 if (regdom)
1288 region = regdom->dfs_region;
1289 rcu_read_unlock();
1290 }
1291
1292 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1293 num_interfaces += iftype_num[iftype];
1294 if (iftype_num[iftype] > 0 &&
1295 !(wiphy->software_iftypes & BIT(iftype)))
1296 used_iftypes |= BIT(iftype);
1297 }
1298
1299 for (i = 0; i < wiphy->n_iface_combinations; i++) {
1300 const struct ieee80211_iface_combination *c;
1301 struct ieee80211_iface_limit *limits;
1302 u32 all_iftypes = 0;
1303
1304 c = &wiphy->iface_combinations[i];
1305
1306 if (num_interfaces > c->max_interfaces)
1307 continue;
1308 if (num_different_channels > c->num_different_channels)
1309 continue;
1310
1311 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
1312 GFP_KERNEL);
1313 if (!limits)
1314 return -ENOMEM;
1315
1316 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1317 if (wiphy->software_iftypes & BIT(iftype))
1318 continue;
1319 for (j = 0; j < c->n_limits; j++) {
1320 all_iftypes |= limits[j].types;
1321 if (!(limits[j].types & BIT(iftype)))
1322 continue;
1323 if (limits[j].max < iftype_num[iftype])
1324 goto cont;
1325 limits[j].max -= iftype_num[iftype];
1326 }
1327 }
1328
1329 if (radar_detect != (c->radar_detect_widths & radar_detect))
1330 goto cont;
1331
1332 if (radar_detect && c->radar_detect_regions &&
1333 !(c->radar_detect_regions & BIT(region)))
1334 goto cont;
1335
1336 /* Finally check that all iftypes that we're currently
1337 * using are actually part of this combination. If they
1338 * aren't then we can't use this combination and have
1339 * to continue to the next.
1340 */
1341 if ((all_iftypes & used_iftypes) != used_iftypes)
1342 goto cont;
1343
1344 /* This combination covered all interface types and
1345 * supported the requested numbers, so we're good.
1346 */
1347
1348 (*iter)(c, data);
1349 cont:
1350 kfree(limits);
1351 }
1352
1353 return 0;
1354}
1355EXPORT_SYMBOL(cfg80211_iter_combinations);
1356
1357static void
1358cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c,
1359 void *data)
1360{
1361 int *num = data;
1362 (*num)++;
1363}
1364
1365int cfg80211_check_combinations(struct wiphy *wiphy,
1366 const int num_different_channels,
1367 const u8 radar_detect,
1368 const int iftype_num[NUM_NL80211_IFTYPES])
1369{
1370 int err, num = 0;
1371
1372 err = cfg80211_iter_combinations(wiphy, num_different_channels,
1373 radar_detect, iftype_num,
1374 cfg80211_iter_sum_ifcombs, &num);
1375 if (err)
1376 return err;
1377 if (num == 0)
1378 return -EBUSY;
1379
1380 return 0;
1381}
1382EXPORT_SYMBOL(cfg80211_check_combinations);
1383
1271int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, 1384int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1272 struct wireless_dev *wdev, 1385 struct wireless_dev *wdev,
1273 enum nl80211_iftype iftype, 1386 enum nl80211_iftype iftype,
@@ -1276,7 +1389,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1276 u8 radar_detect) 1389 u8 radar_detect)
1277{ 1390{
1278 struct wireless_dev *wdev_iter; 1391 struct wireless_dev *wdev_iter;
1279 u32 used_iftypes = BIT(iftype);
1280 int num[NUM_NL80211_IFTYPES]; 1392 int num[NUM_NL80211_IFTYPES];
1281 struct ieee80211_channel 1393 struct ieee80211_channel
1282 *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS]; 1394 *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
@@ -1284,7 +1396,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1284 enum cfg80211_chan_mode chmode; 1396 enum cfg80211_chan_mode chmode;
1285 int num_different_channels = 0; 1397 int num_different_channels = 0;
1286 int total = 1; 1398 int total = 1;
1287 int i, j; 1399 int i;
1288 1400
1289 ASSERT_RTNL(); 1401 ASSERT_RTNL();
1290 1402
@@ -1306,6 +1418,11 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1306 1418
1307 num[iftype] = 1; 1419 num[iftype] = 1;
1308 1420
1421 /* TODO: We'll probably not need this anymore, since this
1422 * should only be called with CHAN_MODE_UNDEFINED. There are
1423 * still a couple of pending calls where other chanmodes are
1424 * used, but we should get rid of them.
1425 */
1309 switch (chanmode) { 1426 switch (chanmode) {
1310 case CHAN_MODE_UNDEFINED: 1427 case CHAN_MODE_UNDEFINED:
1311 break; 1428 break;
@@ -1369,65 +1486,13 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1369 1486
1370 num[wdev_iter->iftype]++; 1487 num[wdev_iter->iftype]++;
1371 total++; 1488 total++;
1372 used_iftypes |= BIT(wdev_iter->iftype);
1373 } 1489 }
1374 1490
1375 if (total == 1 && !radar_detect) 1491 if (total == 1 && !radar_detect)
1376 return 0; 1492 return 0;
1377 1493
1378 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 1494 return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
1379 const struct ieee80211_iface_combination *c; 1495 radar_detect, num);
1380 struct ieee80211_iface_limit *limits;
1381 u32 all_iftypes = 0;
1382
1383 c = &rdev->wiphy.iface_combinations[i];
1384
1385 if (total > c->max_interfaces)
1386 continue;
1387 if (num_different_channels > c->num_different_channels)
1388 continue;
1389
1390 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
1391 GFP_KERNEL);
1392 if (!limits)
1393 return -ENOMEM;
1394
1395 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
1396 if (rdev->wiphy.software_iftypes & BIT(iftype))
1397 continue;
1398 for (j = 0; j < c->n_limits; j++) {
1399 all_iftypes |= limits[j].types;
1400 if (!(limits[j].types & BIT(iftype)))
1401 continue;
1402 if (limits[j].max < num[iftype])
1403 goto cont;
1404 limits[j].max -= num[iftype];
1405 }
1406 }
1407
1408 if (radar_detect && !(c->radar_detect_widths & radar_detect))
1409 goto cont;
1410
1411 /*
1412 * Finally check that all iftypes that we're currently
1413 * using are actually part of this combination. If they
1414 * aren't then we can't use this combination and have
1415 * to continue to the next.
1416 */
1417 if ((all_iftypes & used_iftypes) != used_iftypes)
1418 goto cont;
1419
1420 /*
1421 * This combination covered all interface types and
1422 * supported the requested numbers, so we're good.
1423 */
1424 kfree(limits);
1425 return 0;
1426 cont:
1427 kfree(limits);
1428 }
1429
1430 return -EBUSY;
1431} 1496}
1432 1497
1433int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 1498int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
@@ -1481,6 +1546,24 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy)
1481} 1546}
1482EXPORT_SYMBOL(ieee80211_get_num_supported_channels); 1547EXPORT_SYMBOL(ieee80211_get_num_supported_channels);
1483 1548
1549int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
1550 struct station_info *sinfo)
1551{
1552 struct cfg80211_registered_device *rdev;
1553 struct wireless_dev *wdev;
1554
1555 wdev = dev->ieee80211_ptr;
1556 if (!wdev)
1557 return -EOPNOTSUPP;
1558
1559 rdev = wiphy_to_rdev(wdev->wiphy);
1560 if (!rdev->ops->get_station)
1561 return -EOPNOTSUPP;
1562
1563 return rdev_get_station(rdev, dev, mac_addr, sinfo);
1564}
1565EXPORT_SYMBOL(cfg80211_get_station);
1566
1484/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ 1567/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
1485/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ 1568/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
1486const unsigned char rfc1042_header[] __aligned(2) = 1569const unsigned char rfc1042_header[] __aligned(2) =
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 5661a54ac7ee..11120bb14162 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -73,7 +73,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
73 struct vif_params vifparams; 73 struct vif_params vifparams;
74 enum nl80211_iftype type; 74 enum nl80211_iftype type;
75 75
76 rdev = wiphy_to_dev(wdev->wiphy); 76 rdev = wiphy_to_rdev(wdev->wiphy);
77 77
78 switch (*mode) { 78 switch (*mode) {
79 case IW_MODE_INFRA: 79 case IW_MODE_INFRA:
@@ -253,12 +253,12 @@ EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
253 253
254/** 254/**
255 * cfg80211_wext_freq - get wext frequency for non-"auto" 255 * cfg80211_wext_freq - get wext frequency for non-"auto"
256 * @wiphy: the wiphy 256 * @dev: the net device
257 * @freq: the wext freq encoding 257 * @freq: the wext freq encoding
258 * 258 *
259 * Returns a frequency, or a negative error code, or 0 for auto. 259 * Returns a frequency, or a negative error code, or 0 for auto.
260 */ 260 */
261int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq) 261int cfg80211_wext_freq(struct iw_freq *freq)
262{ 262{
263 /* 263 /*
264 * Parse frequency - return 0 for auto and 264 * Parse frequency - return 0 for auto and
@@ -286,7 +286,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
286 struct iw_param *rts, char *extra) 286 struct iw_param *rts, char *extra)
287{ 287{
288 struct wireless_dev *wdev = dev->ieee80211_ptr; 288 struct wireless_dev *wdev = dev->ieee80211_ptr;
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
290 u32 orts = wdev->wiphy->rts_threshold; 290 u32 orts = wdev->wiphy->rts_threshold;
291 int err; 291 int err;
292 292
@@ -324,7 +324,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
324 struct iw_param *frag, char *extra) 324 struct iw_param *frag, char *extra)
325{ 325{
326 struct wireless_dev *wdev = dev->ieee80211_ptr; 326 struct wireless_dev *wdev = dev->ieee80211_ptr;
327 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 327 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
328 u32 ofrag = wdev->wiphy->frag_threshold; 328 u32 ofrag = wdev->wiphy->frag_threshold;
329 int err; 329 int err;
330 330
@@ -364,7 +364,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
364 struct iw_param *retry, char *extra) 364 struct iw_param *retry, char *extra)
365{ 365{
366 struct wireless_dev *wdev = dev->ieee80211_ptr; 366 struct wireless_dev *wdev = dev->ieee80211_ptr;
367 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 367 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
368 u32 changed = 0; 368 u32 changed = 0;
369 u8 olong = wdev->wiphy->retry_long; 369 u8 olong = wdev->wiphy->retry_long;
370 u8 oshort = wdev->wiphy->retry_short; 370 u8 oshort = wdev->wiphy->retry_short;
@@ -587,7 +587,7 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
587 struct iw_point *erq, char *keybuf) 587 struct iw_point *erq, char *keybuf)
588{ 588{
589 struct wireless_dev *wdev = dev->ieee80211_ptr; 589 struct wireless_dev *wdev = dev->ieee80211_ptr;
590 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 590 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
591 int idx, err; 591 int idx, err;
592 bool remove = false; 592 bool remove = false;
593 struct key_params params; 593 struct key_params params;
@@ -647,7 +647,7 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev,
647 struct iw_point *erq, char *extra) 647 struct iw_point *erq, char *extra)
648{ 648{
649 struct wireless_dev *wdev = dev->ieee80211_ptr; 649 struct wireless_dev *wdev = dev->ieee80211_ptr;
650 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 650 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
651 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; 651 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
652 const u8 *addr; 652 const u8 *addr;
653 int idx; 653 int idx;
@@ -775,7 +775,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
775 struct iw_freq *wextfreq, char *extra) 775 struct iw_freq *wextfreq, char *extra)
776{ 776{
777 struct wireless_dev *wdev = dev->ieee80211_ptr; 777 struct wireless_dev *wdev = dev->ieee80211_ptr;
778 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 778 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
779 struct cfg80211_chan_def chandef = { 779 struct cfg80211_chan_def chandef = {
780 .width = NL80211_CHAN_WIDTH_20_NOHT, 780 .width = NL80211_CHAN_WIDTH_20_NOHT,
781 }; 781 };
@@ -787,7 +787,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
787 case NL80211_IFTYPE_ADHOC: 787 case NL80211_IFTYPE_ADHOC:
788 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); 788 return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
789 case NL80211_IFTYPE_MONITOR: 789 case NL80211_IFTYPE_MONITOR:
790 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 790 freq = cfg80211_wext_freq(wextfreq);
791 if (freq < 0) 791 if (freq < 0)
792 return freq; 792 return freq;
793 if (freq == 0) 793 if (freq == 0)
@@ -798,7 +798,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
798 return -EINVAL; 798 return -EINVAL;
799 return cfg80211_set_monitor_channel(rdev, &chandef); 799 return cfg80211_set_monitor_channel(rdev, &chandef);
800 case NL80211_IFTYPE_MESH_POINT: 800 case NL80211_IFTYPE_MESH_POINT:
801 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 801 freq = cfg80211_wext_freq(wextfreq);
802 if (freq < 0) 802 if (freq < 0)
803 return freq; 803 return freq;
804 if (freq == 0) 804 if (freq == 0)
@@ -818,7 +818,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
818 struct iw_freq *freq, char *extra) 818 struct iw_freq *freq, char *extra)
819{ 819{
820 struct wireless_dev *wdev = dev->ieee80211_ptr; 820 struct wireless_dev *wdev = dev->ieee80211_ptr;
821 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 821 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
822 struct cfg80211_chan_def chandef; 822 struct cfg80211_chan_def chandef;
823 int ret; 823 int ret;
824 824
@@ -847,7 +847,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
847 union iwreq_data *data, char *extra) 847 union iwreq_data *data, char *extra)
848{ 848{
849 struct wireless_dev *wdev = dev->ieee80211_ptr; 849 struct wireless_dev *wdev = dev->ieee80211_ptr;
850 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 850 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
851 enum nl80211_tx_power_setting type; 851 enum nl80211_tx_power_setting type;
852 int dbm = 0; 852 int dbm = 0;
853 853
@@ -899,7 +899,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
899 union iwreq_data *data, char *extra) 899 union iwreq_data *data, char *extra)
900{ 900{
901 struct wireless_dev *wdev = dev->ieee80211_ptr; 901 struct wireless_dev *wdev = dev->ieee80211_ptr;
902 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 902 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
903 int err, val; 903 int err, val;
904 904
905 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) 905 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -1119,7 +1119,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
1119 struct iw_param *wrq, char *extra) 1119 struct iw_param *wrq, char *extra)
1120{ 1120{
1121 struct wireless_dev *wdev = dev->ieee80211_ptr; 1121 struct wireless_dev *wdev = dev->ieee80211_ptr;
1122 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1122 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1123 bool ps = wdev->ps; 1123 bool ps = wdev->ps;
1124 int timeout = wdev->ps_timeout; 1124 int timeout = wdev->ps_timeout;
1125 int err; 1125 int err;
@@ -1177,7 +1177,7 @@ static int cfg80211_wds_wext_siwap(struct net_device *dev,
1177 struct sockaddr *addr, char *extra) 1177 struct sockaddr *addr, char *extra)
1178{ 1178{
1179 struct wireless_dev *wdev = dev->ieee80211_ptr; 1179 struct wireless_dev *wdev = dev->ieee80211_ptr;
1180 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1180 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1181 int err; 1181 int err;
1182 1182
1183 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) 1183 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
@@ -1221,7 +1221,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
1221 struct iw_param *rate, char *extra) 1221 struct iw_param *rate, char *extra)
1222{ 1222{
1223 struct wireless_dev *wdev = dev->ieee80211_ptr; 1223 struct wireless_dev *wdev = dev->ieee80211_ptr;
1224 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1224 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1225 struct cfg80211_bitrate_mask mask; 1225 struct cfg80211_bitrate_mask mask;
1226 u32 fixed, maxrate; 1226 u32 fixed, maxrate;
1227 struct ieee80211_supported_band *sband; 1227 struct ieee80211_supported_band *sband;
@@ -1272,7 +1272,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1272 struct iw_param *rate, char *extra) 1272 struct iw_param *rate, char *extra)
1273{ 1273{
1274 struct wireless_dev *wdev = dev->ieee80211_ptr; 1274 struct wireless_dev *wdev = dev->ieee80211_ptr;
1275 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1275 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1276 /* we are under RTNL - globally locked - so can use a static struct */ 1276 /* we are under RTNL - globally locked - so can use a static struct */
1277 static struct station_info sinfo; 1277 static struct station_info sinfo;
1278 u8 addr[ETH_ALEN]; 1278 u8 addr[ETH_ALEN];
@@ -1310,7 +1310,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1310static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) 1310static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1311{ 1311{
1312 struct wireless_dev *wdev = dev->ieee80211_ptr; 1312 struct wireless_dev *wdev = dev->ieee80211_ptr;
1313 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1313 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1314 /* we are under RTNL - globally locked - so can use static structs */ 1314 /* we are under RTNL - globally locked - so can use static structs */
1315 static struct iw_statistics wstats; 1315 static struct iw_statistics wstats;
1316 static struct station_info sinfo; 1316 static struct station_info sinfo;
@@ -1449,7 +1449,7 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
1449 struct iw_point *data, char *extra) 1449 struct iw_point *data, char *extra)
1450{ 1450{
1451 struct wireless_dev *wdev = dev->ieee80211_ptr; 1451 struct wireless_dev *wdev = dev->ieee80211_ptr;
1452 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1452 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1453 struct cfg80211_pmksa cfg_pmksa; 1453 struct cfg80211_pmksa cfg_pmksa;
1454 struct iw_pmksa *pmksa = (struct iw_pmksa *)extra; 1454 struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
1455 1455
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 5d766b0118e8..ebcacca2f731 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -50,7 +50,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
50 struct iw_point *data, char *extra); 50 struct iw_point *data, char *extra);
51 51
52 52
53int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq); 53int cfg80211_wext_freq(struct iw_freq *freq);
54 54
55 55
56extern const struct iw_handler_def cfg80211_wext_handler; 56extern const struct iw_handler_def cfg80211_wext_handler;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 86c331a65664..c7e5c8eb4f24 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -67,7 +67,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
67 struct iw_freq *wextfreq, char *extra) 67 struct iw_freq *wextfreq, char *extra)
68{ 68{
69 struct wireless_dev *wdev = dev->ieee80211_ptr; 69 struct wireless_dev *wdev = dev->ieee80211_ptr;
70 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 70 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
71 struct ieee80211_channel *chan = NULL; 71 struct ieee80211_channel *chan = NULL;
72 int err, freq; 72 int err, freq;
73 73
@@ -75,7 +75,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
75 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 75 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
76 return -EINVAL; 76 return -EINVAL;
77 77
78 freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); 78 freq = cfg80211_wext_freq(wextfreq);
79 if (freq < 0) 79 if (freq < 0)
80 return freq; 80 return freq;
81 81
@@ -169,7 +169,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
169 struct iw_point *data, char *ssid) 169 struct iw_point *data, char *ssid)
170{ 170{
171 struct wireless_dev *wdev = dev->ieee80211_ptr; 171 struct wireless_dev *wdev = dev->ieee80211_ptr;
172 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 172 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
173 size_t len = data->length; 173 size_t len = data->length;
174 int err; 174 int err;
175 175
@@ -260,7 +260,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
260 struct sockaddr *ap_addr, char *extra) 260 struct sockaddr *ap_addr, char *extra)
261{ 261{
262 struct wireless_dev *wdev = dev->ieee80211_ptr; 262 struct wireless_dev *wdev = dev->ieee80211_ptr;
263 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 263 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
264 u8 *bssid = ap_addr->sa_data; 264 u8 *bssid = ap_addr->sa_data;
265 int err; 265 int err;
266 266
@@ -333,7 +333,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
333 struct iw_point *data, char *extra) 333 struct iw_point *data, char *extra)
334{ 334{
335 struct wireless_dev *wdev = dev->ieee80211_ptr; 335 struct wireless_dev *wdev = dev->ieee80211_ptr;
336 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 336 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
337 u8 *ie = extra; 337 u8 *ie = extra;
338 int ie_len = data->length, err; 338 int ie_len = data->length, err;
339 339
@@ -390,7 +390,7 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
390 if (!wdev) 390 if (!wdev)
391 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
392 392
393 rdev = wiphy_to_dev(wdev->wiphy); 393 rdev = wiphy_to_rdev(wdev->wiphy);
394 394
395 if (wdev->iftype != NL80211_IFTYPE_STATION) 395 if (wdev->iftype != NL80211_IFTYPE_STATION)
396 return -EINVAL; 396 return -EINVAL;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3bb2cdc13b46..c51e8f7b8653 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -199,6 +199,7 @@ int xfrm_output(struct sk_buff *skb)
199 199
200 return xfrm_output2(skb); 200 return xfrm_output2(skb);
201} 201}
202EXPORT_SYMBOL_GPL(xfrm_output);
202 203
203int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) 204int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
204{ 205{
@@ -213,6 +214,7 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
213 return -EAFNOSUPPORT; 214 return -EAFNOSUPPORT;
214 return inner_mode->afinfo->extract_output(x, skb); 215 return inner_mode->afinfo->extract_output(x, skb);
215} 216}
217EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
216 218
217void xfrm_local_error(struct sk_buff *skb, int mtu) 219void xfrm_local_error(struct sk_buff *skb, int mtu)
218{ 220{
@@ -233,7 +235,4 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
233 afinfo->local_error(skb, mtu); 235 afinfo->local_error(skb, mtu);
234 xfrm_state_put_afinfo(afinfo); 236 xfrm_state_put_afinfo(afinfo);
235} 237}
236
237EXPORT_SYMBOL_GPL(xfrm_output);
238EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
239EXPORT_SYMBOL_GPL(xfrm_local_error); 238EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c08fbd11ceff..a8ef5108e0d8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -769,7 +769,7 @@ EXPORT_SYMBOL(xfrm_policy_byid);
769 769
770#ifdef CONFIG_SECURITY_NETWORK_XFRM 770#ifdef CONFIG_SECURITY_NETWORK_XFRM
771static inline int 771static inline int
772xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 772xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
773{ 773{
774 int dir, err = 0; 774 int dir, err = 0;
775 775
@@ -783,10 +783,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
783 continue; 783 continue;
784 err = security_xfrm_policy_delete(pol->security); 784 err = security_xfrm_policy_delete(pol->security);
785 if (err) { 785 if (err) {
786 xfrm_audit_policy_delete(pol, 0, 786 xfrm_audit_policy_delete(pol, 0, task_valid);
787 audit_info->loginuid,
788 audit_info->sessionid,
789 audit_info->secid);
790 return err; 787 return err;
791 } 788 }
792 } 789 }
@@ -800,9 +797,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
800 pol->security); 797 pol->security);
801 if (err) { 798 if (err) {
802 xfrm_audit_policy_delete(pol, 0, 799 xfrm_audit_policy_delete(pol, 0,
803 audit_info->loginuid, 800 task_valid);
804 audit_info->sessionid,
805 audit_info->secid);
806 return err; 801 return err;
807 } 802 }
808 } 803 }
@@ -812,19 +807,19 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
812} 807}
813#else 808#else
814static inline int 809static inline int
815xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 810xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
816{ 811{
817 return 0; 812 return 0;
818} 813}
819#endif 814#endif
820 815
821int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 816int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
822{ 817{
823 int dir, err = 0, cnt = 0; 818 int dir, err = 0, cnt = 0;
824 819
825 write_lock_bh(&net->xfrm.xfrm_policy_lock); 820 write_lock_bh(&net->xfrm.xfrm_policy_lock);
826 821
827 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 822 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
828 if (err) 823 if (err)
829 goto out; 824 goto out;
830 825
@@ -841,9 +836,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
841 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 836 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
842 cnt++; 837 cnt++;
843 838
844 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 839 xfrm_audit_policy_delete(pol, 1, task_valid);
845 audit_info->sessionid,
846 audit_info->secid);
847 840
848 xfrm_policy_kill(pol); 841 xfrm_policy_kill(pol);
849 842
@@ -862,10 +855,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
862 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 855 write_unlock_bh(&net->xfrm.xfrm_policy_lock);
863 cnt++; 856 cnt++;
864 857
865 xfrm_audit_policy_delete(pol, 1, 858 xfrm_audit_policy_delete(pol, 1, task_valid);
866 audit_info->loginuid,
867 audit_info->sessionid,
868 audit_info->secid);
869 xfrm_policy_kill(pol); 859 xfrm_policy_kill(pol);
870 860
871 write_lock_bh(&net->xfrm.xfrm_policy_lock); 861 write_lock_bh(&net->xfrm.xfrm_policy_lock);
@@ -2783,21 +2773,19 @@ static struct notifier_block xfrm_dev_notifier = {
2783static int __net_init xfrm_statistics_init(struct net *net) 2773static int __net_init xfrm_statistics_init(struct net *net)
2784{ 2774{
2785 int rv; 2775 int rv;
2786 2776 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2787 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2777 if (!net->mib.xfrm_statistics)
2788 sizeof(struct linux_xfrm_mib),
2789 __alignof__(struct linux_xfrm_mib)) < 0)
2790 return -ENOMEM; 2778 return -ENOMEM;
2791 rv = xfrm_proc_init(net); 2779 rv = xfrm_proc_init(net);
2792 if (rv < 0) 2780 if (rv < 0)
2793 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2781 free_percpu(net->mib.xfrm_statistics);
2794 return rv; 2782 return rv;
2795} 2783}
2796 2784
2797static void xfrm_statistics_fini(struct net *net) 2785static void xfrm_statistics_fini(struct net *net)
2798{ 2786{
2799 xfrm_proc_fini(net); 2787 xfrm_proc_fini(net);
2800 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2788 free_percpu(net->mib.xfrm_statistics);
2801} 2789}
2802#else 2790#else
2803static int __net_init xfrm_statistics_init(struct net *net) 2791static int __net_init xfrm_statistics_init(struct net *net)
@@ -2862,21 +2850,14 @@ out_byidx:
2862 2850
2863static void xfrm_policy_fini(struct net *net) 2851static void xfrm_policy_fini(struct net *net)
2864{ 2852{
2865 struct xfrm_audit audit_info;
2866 unsigned int sz; 2853 unsigned int sz;
2867 int dir; 2854 int dir;
2868 2855
2869 flush_work(&net->xfrm.policy_hash_work); 2856 flush_work(&net->xfrm.policy_hash_work);
2870#ifdef CONFIG_XFRM_SUB_POLICY 2857#ifdef CONFIG_XFRM_SUB_POLICY
2871 audit_info.loginuid = INVALID_UID; 2858 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2872 audit_info.sessionid = (unsigned int)-1;
2873 audit_info.secid = 0;
2874 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2875#endif 2859#endif
2876 audit_info.loginuid = INVALID_UID; 2860 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2877 audit_info.sessionid = (unsigned int)-1;
2878 audit_info.secid = 0;
2879 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2880 2861
2881 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2862 WARN_ON(!list_empty(&net->xfrm.policy_all));
2882 2863
@@ -2991,15 +2972,14 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2991 } 2972 }
2992} 2973}
2993 2974
2994void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2975void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
2995 kuid_t auid, unsigned int sessionid, u32 secid)
2996{ 2976{
2997 struct audit_buffer *audit_buf; 2977 struct audit_buffer *audit_buf;
2998 2978
2999 audit_buf = xfrm_audit_start("SPD-add"); 2979 audit_buf = xfrm_audit_start("SPD-add");
3000 if (audit_buf == NULL) 2980 if (audit_buf == NULL)
3001 return; 2981 return;
3002 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2982 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3003 audit_log_format(audit_buf, " res=%u", result); 2983 audit_log_format(audit_buf, " res=%u", result);
3004 xfrm_audit_common_policyinfo(xp, audit_buf); 2984 xfrm_audit_common_policyinfo(xp, audit_buf);
3005 audit_log_end(audit_buf); 2985 audit_log_end(audit_buf);
@@ -3007,14 +2987,14 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
3007EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2987EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3008 2988
3009void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2989void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3010 kuid_t auid, unsigned int sessionid, u32 secid) 2990 bool task_valid)
3011{ 2991{
3012 struct audit_buffer *audit_buf; 2992 struct audit_buffer *audit_buf;
3013 2993
3014 audit_buf = xfrm_audit_start("SPD-delete"); 2994 audit_buf = xfrm_audit_start("SPD-delete");
3015 if (audit_buf == NULL) 2995 if (audit_buf == NULL)
3016 return; 2996 return;
3017 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2997 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3018 audit_log_format(audit_buf, " res=%u", result); 2998 audit_log_format(audit_buf, " res=%u", result);
3019 xfrm_audit_common_policyinfo(xp, audit_buf); 2999 xfrm_audit_common_policyinfo(xp, audit_buf);
3020 audit_log_end(audit_buf); 3000 audit_log_end(audit_buf);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fc5abd0b456f..9c4fbd8935f4 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -54,8 +54,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
54 int i; 54 int i;
55 for (i = 0; xfrm_mib_list[i].name; i++) 55 for (i = 0; xfrm_mib_list[i].name; i++)
56 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 56 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
57 snmp_fold_field((void __percpu **) 57 snmp_fold_field(net->mib.xfrm_statistics,
58 net->mib.xfrm_statistics,
59 xfrm_mib_list[i].entry)); 58 xfrm_mib_list[i].entry));
60 return 0; 59 return 0;
61} 60}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 8e9c781a6bba..0ab54134bb40 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -463,9 +463,7 @@ expired:
463 if (!err) 463 if (!err)
464 km_state_expired(x, 1, 0); 464 km_state_expired(x, 1, 0);
465 465
466 xfrm_audit_state_delete(x, err ? 0 : 1, 466 xfrm_audit_state_delete(x, err ? 0 : 1, true);
467 audit_get_loginuid(current),
468 audit_get_sessionid(current), 0);
469 467
470out: 468out:
471 spin_unlock(&x->lock); 469 spin_unlock(&x->lock);
@@ -562,7 +560,7 @@ EXPORT_SYMBOL(xfrm_state_delete);
562 560
563#ifdef CONFIG_SECURITY_NETWORK_XFRM 561#ifdef CONFIG_SECURITY_NETWORK_XFRM
564static inline int 562static inline int
565xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info) 563xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
566{ 564{
567 int i, err = 0; 565 int i, err = 0;
568 566
@@ -572,10 +570,7 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
572 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 570 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
573 if (xfrm_id_proto_match(x->id.proto, proto) && 571 if (xfrm_id_proto_match(x->id.proto, proto) &&
574 (err = security_xfrm_state_delete(x)) != 0) { 572 (err = security_xfrm_state_delete(x)) != 0) {
575 xfrm_audit_state_delete(x, 0, 573 xfrm_audit_state_delete(x, 0, task_valid);
576 audit_info->loginuid,
577 audit_info->sessionid,
578 audit_info->secid);
579 return err; 574 return err;
580 } 575 }
581 } 576 }
@@ -585,18 +580,18 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
585} 580}
586#else 581#else
587static inline int 582static inline int
588xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info) 583xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
589{ 584{
590 return 0; 585 return 0;
591} 586}
592#endif 587#endif
593 588
594int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) 589int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
595{ 590{
596 int i, err = 0, cnt = 0; 591 int i, err = 0, cnt = 0;
597 592
598 spin_lock_bh(&net->xfrm.xfrm_state_lock); 593 spin_lock_bh(&net->xfrm.xfrm_state_lock);
599 err = xfrm_state_flush_secctx_check(net, proto, audit_info); 594 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
600 if (err) 595 if (err)
601 goto out; 596 goto out;
602 597
@@ -612,9 +607,7 @@ restart:
612 607
613 err = xfrm_state_delete(x); 608 err = xfrm_state_delete(x);
614 xfrm_audit_state_delete(x, err ? 0 : 1, 609 xfrm_audit_state_delete(x, err ? 0 : 1,
615 audit_info->loginuid, 610 task_valid);
616 audit_info->sessionid,
617 audit_info->secid);
618 xfrm_state_put(x); 611 xfrm_state_put(x);
619 if (!err) 612 if (!err)
620 cnt++; 613 cnt++;
@@ -2128,14 +2121,10 @@ out_bydst:
2128 2121
2129void xfrm_state_fini(struct net *net) 2122void xfrm_state_fini(struct net *net)
2130{ 2123{
2131 struct xfrm_audit audit_info;
2132 unsigned int sz; 2124 unsigned int sz;
2133 2125
2134 flush_work(&net->xfrm.state_hash_work); 2126 flush_work(&net->xfrm.state_hash_work);
2135 audit_info.loginuid = INVALID_UID; 2127 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2136 audit_info.sessionid = (unsigned int)-1;
2137 audit_info.secid = 0;
2138 xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
2139 flush_work(&net->xfrm.state_gc_work); 2128 flush_work(&net->xfrm.state_gc_work);
2140 2129
2141 WARN_ON(!list_empty(&net->xfrm.state_all)); 2130 WARN_ON(!list_empty(&net->xfrm.state_all));
@@ -2198,30 +2187,28 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2198 } 2187 }
2199} 2188}
2200 2189
2201void xfrm_audit_state_add(struct xfrm_state *x, int result, 2190void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2202 kuid_t auid, unsigned int sessionid, u32 secid)
2203{ 2191{
2204 struct audit_buffer *audit_buf; 2192 struct audit_buffer *audit_buf;
2205 2193
2206 audit_buf = xfrm_audit_start("SAD-add"); 2194 audit_buf = xfrm_audit_start("SAD-add");
2207 if (audit_buf == NULL) 2195 if (audit_buf == NULL)
2208 return; 2196 return;
2209 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2197 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2210 xfrm_audit_helper_sainfo(x, audit_buf); 2198 xfrm_audit_helper_sainfo(x, audit_buf);
2211 audit_log_format(audit_buf, " res=%u", result); 2199 audit_log_format(audit_buf, " res=%u", result);
2212 audit_log_end(audit_buf); 2200 audit_log_end(audit_buf);
2213} 2201}
2214EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 2202EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2215 2203
2216void xfrm_audit_state_delete(struct xfrm_state *x, int result, 2204void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2217 kuid_t auid, unsigned int sessionid, u32 secid)
2218{ 2205{
2219 struct audit_buffer *audit_buf; 2206 struct audit_buffer *audit_buf;
2220 2207
2221 audit_buf = xfrm_audit_start("SAD-delete"); 2208 audit_buf = xfrm_audit_start("SAD-delete");
2222 if (audit_buf == NULL) 2209 if (audit_buf == NULL)
2223 return; 2210 return;
2224 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2211 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2225 xfrm_audit_helper_sainfo(x, audit_buf); 2212 xfrm_audit_helper_sainfo(x, audit_buf);
2226 audit_log_format(audit_buf, " res=%u", result); 2213 audit_log_format(audit_buf, " res=%u", result);
2227 audit_log_end(audit_buf); 2214 audit_log_end(audit_buf);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 09336b268001..412d9dc3a873 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -597,9 +597,6 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
597 struct xfrm_state *x; 597 struct xfrm_state *x;
598 int err; 598 int err;
599 struct km_event c; 599 struct km_event c;
600 kuid_t loginuid = audit_get_loginuid(current);
601 unsigned int sessionid = audit_get_sessionid(current);
602 u32 sid;
603 600
604 err = verify_newsa_info(p, attrs); 601 err = verify_newsa_info(p, attrs);
605 if (err) 602 if (err)
@@ -615,8 +612,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
615 else 612 else
616 err = xfrm_state_update(x); 613 err = xfrm_state_update(x);
617 614
618 security_task_getsecid(current, &sid); 615 xfrm_audit_state_add(x, err ? 0 : 1, true);
619 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
620 616
621 if (err < 0) { 617 if (err < 0) {
622 x->km.state = XFRM_STATE_DEAD; 618 x->km.state = XFRM_STATE_DEAD;
@@ -676,9 +672,6 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
676 int err = -ESRCH; 672 int err = -ESRCH;
677 struct km_event c; 673 struct km_event c;
678 struct xfrm_usersa_id *p = nlmsg_data(nlh); 674 struct xfrm_usersa_id *p = nlmsg_data(nlh);
679 kuid_t loginuid = audit_get_loginuid(current);
680 unsigned int sessionid = audit_get_sessionid(current);
681 u32 sid;
682 675
683 x = xfrm_user_state_lookup(net, p, attrs, &err); 676 x = xfrm_user_state_lookup(net, p, attrs, &err);
684 if (x == NULL) 677 if (x == NULL)
@@ -703,8 +696,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
703 km_state_notify(x, &c); 696 km_state_notify(x, &c);
704 697
705out: 698out:
706 security_task_getsecid(current, &sid); 699 xfrm_audit_state_delete(x, err ? 0 : 1, true);
707 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
708 xfrm_state_put(x); 700 xfrm_state_put(x);
709 return err; 701 return err;
710} 702}
@@ -1428,9 +1420,6 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1428 struct km_event c; 1420 struct km_event c;
1429 int err; 1421 int err;
1430 int excl; 1422 int excl;
1431 kuid_t loginuid = audit_get_loginuid(current);
1432 unsigned int sessionid = audit_get_sessionid(current);
1433 u32 sid;
1434 1423
1435 err = verify_newpolicy_info(p); 1424 err = verify_newpolicy_info(p);
1436 if (err) 1425 if (err)
@@ -1449,8 +1438,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1449 * a type XFRM_MSG_UPDPOLICY - JHS */ 1438 * a type XFRM_MSG_UPDPOLICY - JHS */
1450 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1439 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1451 err = xfrm_policy_insert(p->dir, xp, excl); 1440 err = xfrm_policy_insert(p->dir, xp, excl);
1452 security_task_getsecid(current, &sid); 1441 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
1453 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1454 1442
1455 if (err) { 1443 if (err) {
1456 security_xfrm_policy_free(xp->security); 1444 security_xfrm_policy_free(xp->security);
@@ -1687,13 +1675,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1687 NETLINK_CB(skb).portid); 1675 NETLINK_CB(skb).portid);
1688 } 1676 }
1689 } else { 1677 } else {
1690 kuid_t loginuid = audit_get_loginuid(current); 1678 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
1691 unsigned int sessionid = audit_get_sessionid(current);
1692 u32 sid;
1693
1694 security_task_getsecid(current, &sid);
1695 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1696 sid);
1697 1679
1698 if (err != 0) 1680 if (err != 0)
1699 goto out; 1681 goto out;
@@ -1718,13 +1700,9 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1718 struct net *net = sock_net(skb->sk); 1700 struct net *net = sock_net(skb->sk);
1719 struct km_event c; 1701 struct km_event c;
1720 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1702 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1721 struct xfrm_audit audit_info;
1722 int err; 1703 int err;
1723 1704
1724 audit_info.loginuid = audit_get_loginuid(current); 1705 err = xfrm_state_flush(net, p->proto, true);
1725 audit_info.sessionid = audit_get_sessionid(current);
1726 security_task_getsecid(current, &audit_info.secid);
1727 err = xfrm_state_flush(net, p->proto, &audit_info);
1728 if (err) { 1706 if (err) {
1729 if (err == -ESRCH) /* empty table */ 1707 if (err == -ESRCH) /* empty table */
1730 return 0; 1708 return 0;
@@ -1908,16 +1886,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1908 struct km_event c; 1886 struct km_event c;
1909 u8 type = XFRM_POLICY_TYPE_MAIN; 1887 u8 type = XFRM_POLICY_TYPE_MAIN;
1910 int err; 1888 int err;
1911 struct xfrm_audit audit_info;
1912 1889
1913 err = copy_from_user_policy_type(&type, attrs); 1890 err = copy_from_user_policy_type(&type, attrs);
1914 if (err) 1891 if (err)
1915 return err; 1892 return err;
1916 1893
1917 audit_info.loginuid = audit_get_loginuid(current); 1894 err = xfrm_policy_flush(net, type, true);
1918 audit_info.sessionid = audit_get_sessionid(current);
1919 security_task_getsecid(current, &audit_info.secid);
1920 err = xfrm_policy_flush(net, type, &audit_info);
1921 if (err) { 1895 if (err) {
1922 if (err == -ESRCH) /* empty table */ 1896 if (err == -ESRCH) /* empty table */
1923 return 0; 1897 return 0;
@@ -1983,14 +1957,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1983 1957
1984 err = 0; 1958 err = 0;
1985 if (up->hard) { 1959 if (up->hard) {
1986 kuid_t loginuid = audit_get_loginuid(current);
1987 unsigned int sessionid = audit_get_sessionid(current);
1988 u32 sid;
1989
1990 security_task_getsecid(current, &sid);
1991 xfrm_policy_delete(xp, p->dir); 1960 xfrm_policy_delete(xp, p->dir);
1992 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1961 xfrm_audit_policy_delete(xp, 1, true);
1993
1994 } else { 1962 } else {
1995 // reset the timers here? 1963 // reset the timers here?
1996 WARN(1, "Dont know what to do with soft policy expire\n"); 1964 WARN(1, "Dont know what to do with soft policy expire\n");
@@ -2026,13 +1994,8 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2026 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 1994 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2027 1995
2028 if (ue->hard) { 1996 if (ue->hard) {
2029 kuid_t loginuid = audit_get_loginuid(current);
2030 unsigned int sessionid = audit_get_sessionid(current);
2031 u32 sid;
2032
2033 security_task_getsecid(current, &sid);
2034 __xfrm_state_delete(x); 1997 __xfrm_state_delete(x);
2035 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 1998 xfrm_audit_state_delete(x, 1, true);
2036 } 1999 }
2037 err = 0; 2000 err = 0;
2038out: 2001out: