aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c17
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.h (renamed from net/batman-adv/bat_ogm.h)20
-rw-r--r--net/batman-adv/bat_debugfs.c24
-rw-r--r--net/batman-adv/bat_debugfs.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c304
-rw-r--r--net/batman-adv/bat_sysfs.c31
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c10
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/gateway_client.c37
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c14
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c66
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/icmp_socket.c20
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/main.c113
-rw-r--r--net/batman-adv/main.h45
-rw-r--r--net/batman-adv/originator.c33
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/packet.h40
-rw-r--r--net/batman-adv/ring_buffer.c2
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c67
-rw-r--r--net/batman-adv/routing.h2
-rw-r--r--net/batman-adv/send.c15
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c44
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/translation-table.c251
-rw-r--r--net/batman-adv/translation-table.h2
-rw-r--r--net/batman-adv/types.h23
-rw-r--r--net/batman-adv/unicast.c22
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c19
-rw-r--r--net/batman-adv/vis.h5
-rw-r--r--net/bluetooth/Kconfig1
-rw-r--r--net/bluetooth/bnep/sock.c6
-rw-r--r--net/bluetooth/cmtp/sock.c6
-rw-r--r--net/bluetooth/hci_conn.c73
-rw-r--r--net/bluetooth/hci_core.c645
-rw-r--r--net/bluetooth/hci_event.c631
-rw-r--r--net/bluetooth/hci_sock.c469
-rw-r--r--net/bluetooth/hci_sysfs.c53
-rw-r--r--net/bluetooth/hidp/sock.c6
-rw-r--r--net/bluetooth/l2cap_core.c641
-rw-r--r--net/bluetooth/l2cap_sock.c54
-rw-r--r--net/bluetooth/lib.c27
-rw-r--r--net/bluetooth/mgmt.c2647
-rw-r--r--net/bluetooth/rfcomm/tty.c6
-rw-r--r--net/bluetooth/smp.c108
-rw-r--r--net/bridge/br_device.c5
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c113
-rw-r--r--net/caif/cfdbgl.c4
-rw-r--r--net/caif/cfdgml.c9
-rw-r--r--net/caif/cfrfml.c25
-rw-r--r--net/caif/cfsrvl.c6
-rw-r--r--net/caif/cfutill.c5
-rw-r--r--net/caif/cfvidl.c6
-rw-r--r--net/caif/chnl_net.c24
-rw-r--r--net/compat.c2
-rw-r--r--net/core/datagram.c26
-rw-r--r--net/core/dev.c115
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/iovec.c2
-rw-r--r--net/core/neighbour.c90
-rw-r--r--net/core/netpoll.c71
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c21
-rw-r--r--net/dccp/ccids/ccid3.c3
-rw-r--r--net/dccp/ipv4.c8
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c18
-rw-r--r--net/dccp/output.c10
-rw-r--r--net/decnet/dn_neigh.c24
-rw-r--r--net/decnet/dn_route.c3
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee802154/6lowpan.c16
-rw-r--r--net/ipv4/af_inet.c28
-rw-r--r--net/ipv4/ah4.c17
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/esp4.c10
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/gre.c6
-rw-r--r--net/ipv4/icmp.c21
-rw-r--r--net/ipv4/inet_diag.c18
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_gre.c29
-rw-r--r--net/ipv4/ip_input.c20
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_sockglue.c37
-rw-r--r--net/ipv4/ipcomp.c8
-rw-r--r--net/ipv4/ipconfig.c110
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/Kconfig9
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c516
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c60
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c7
-rw-r--r--net/ipv4/ping.c21
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/route.c52
-rw-r--r--net/ipv4/tcp.c14
-rw-r--r--net/ipv4/tcp_cong.c9
-rw-r--r--net/ipv4/tcp_input.c241
-rw-r--r--net/ipv4/tcp_ipv4.c312
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/tcp_timer.c14
-rw-r--r--net/ipv4/tunnel4.c8
-rw-r--r--net/ipv4/udp.c41
-rw-r--r--net/ipv4/udplite.c7
-rw-r--r--net/ipv4/xfrm4_tunnel.c16
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/anycast.c29
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_fib.c19
-rw-r--r--net/ipv6/ip6_output.c21
-rw-r--r--net/ipv6/ipv6_sockglue.c38
-rw-r--r--net/ipv6/ndisc.c30
-rw-r--r--net/ipv6/netfilter/Kconfig9
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c527
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c60
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c7
-rw-r--r--net/ipv6/route.c15
-rw-r--r--net/ipv6/sit.c20
-rw-r--r--net/ipv6/tcp_ipv6.c231
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/irda/irnet/irnet.h2
-rw-r--r--net/iucv/af_iucv.c381
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/cfg.c241
-rw-r--r--net/mac80211/chan.c55
-rw-r--r--net/mac80211/debugfs.c87
-rw-r--r--net/mac80211/debugfs_netdev.c115
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.h68
-rw-r--r--net/mac80211/driver-trace.h77
-rw-r--r--net/mac80211/ibss.c109
-rw-r--r--net/mac80211/ieee80211_i.h173
-rw-r--r--net/mac80211/iface.c24
-rw-r--r--net/mac80211/key.c39
-rw-r--r--net/mac80211/main.c27
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh.h5
-rw-r--r--net/mac80211/mesh_hwmp.c57
-rw-r--r--net/mac80211/mesh_pathtbl.c40
-rw-r--r--net/mac80211/mesh_plink.c23
-rw-r--r--net/mac80211/mlme.c1712
-rw-r--r--net/mac80211/pm.c11
-rw-r--r--net/mac80211/rate.c151
-rw-r--r--net/mac80211/rate.h4
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c16
-rw-r--r--net/mac80211/rx.c123
-rw-r--r--net/mac80211/scan.c74
-rw-r--r--net/mac80211/sta_info.c341
-rw-r--r--net/mac80211/sta_info.h61
-rw-r--r--net/mac80211/status.c10
-rw-r--r--net/mac80211/tx.c47
-rw-r--r--net/mac80211/util.c80
-rw-r--r--net/mac80211/wep.c21
-rw-r--r--net/mac80211/wep.h1
-rw-r--r--net/mac80211/work.c814
-rw-r--r--net/mac80211/wpa.c22
-rw-r--r--net/netfilter/Kconfig30
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c26
-rw-r--r--net/netfilter/ipset/ip_set_getport.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c18
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c147
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c89
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c84
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c150
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c34
-rw-r--r--net/netfilter/nf_conntrack_ecache.c55
-rw-r--r--net/netfilter/nf_conntrack_helper.c54
-rw-r--r--net/netfilter/nf_conntrack_netlink.c218
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c86
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c77
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c82
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c83
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c168
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c106
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c103
-rw-r--r--net/netfilter/nf_conntrack_timeout.c60
-rw-r--r--net/netfilter/nfnetlink_acct.c6
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c429
-rw-r--r--net/netfilter/xt_CT.c220
-rw-r--r--net/netfilter/xt_LOG.c925
-rw-r--r--net/netlink/af_netlink.c30
-rw-r--r--net/netlink/genetlink.c40
-rw-r--r--net/nfc/af_nfc.c2
-rw-r--r--net/nfc/core.c55
-rw-r--r--net/nfc/llcp/commands.c163
-rw-r--r--net/nfc/llcp/llcp.c201
-rw-r--r--net/nfc/llcp/llcp.h12
-rw-r--r--net/nfc/llcp/sock.c120
-rw-r--r--net/nfc/nci/core.c209
-rw-r--r--net/nfc/nci/data.c32
-rw-r--r--net/nfc/nci/ntf.c360
-rw-r--r--net/nfc/nci/rsp.c41
-rw-r--r--net/nfc/netlink.c73
-rw-r--r--net/nfc/nfc.h18
-rw-r--r--net/nfc/rawsock.c28
-rw-r--r--net/openvswitch/vport-internal_dev.c3
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/rds/send.c1
-rw-r--r--net/sched/Kconfig26
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/sch_plug.c233
-rw-r--r--net/sctp/socket.c24
-rw-r--r--net/socket.c36
-rw-r--r--net/tipc/bcast.c336
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/bearer.c5
-rw-r--r--net/tipc/config.c21
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/tipc/core.h42
-rw-r--r--net/tipc/discover.c79
-rw-r--r--net/tipc/link.c299
-rw-r--r--net/tipc/log.c2
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/tipc/msg.h15
-rw-r--r--net/tipc/name_distr.c8
-rw-r--r--net/tipc/name_table.c48
-rw-r--r--net/tipc/name_table.h2
-rw-r--r--net/tipc/net.c11
-rw-r--r--net/tipc/node.c84
-rw-r--r--net/tipc/node.h37
-rw-r--r--net/tipc/port.c72
-rw-r--r--net/tipc/port.h42
-rw-r--r--net/tipc/socket.c11
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/unix/af_unix.c52
-rw-r--r--net/unix/diag.c10
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/mesh.c4
-rw-r--r--net/wireless/mlme.c333
-rw-r--r--net/wireless/nl80211.c397
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/scan.c19
-rw-r--r--net/wireless/sme.c41
-rw-r--r--net/wireless/util.c1
-rw-r--r--net/wireless/wext-sme.c3
-rw-r--r--net/xfrm/xfrm_user.c9
273 files changed, 13374 insertions, 8740 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 127fe70a1baa..5de42ea309bc 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -330,6 +330,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
330 struct atmarp_entry *entry; 330 struct atmarp_entry *entry;
331 struct neighbour *n; 331 struct neighbour *n;
332 struct atm_vcc *vcc; 332 struct atm_vcc *vcc;
333 struct rtable *rt;
334 __be32 *daddr;
333 int old; 335 int old;
334 unsigned long flags; 336 unsigned long flags;
335 337
@@ -340,7 +342,12 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
340 dev->stats.tx_dropped++; 342 dev->stats.tx_dropped++;
341 return NETDEV_TX_OK; 343 return NETDEV_TX_OK;
342 } 344 }
343 n = dst_get_neighbour_noref(dst); 345 rt = (struct rtable *) dst;
346 if (rt->rt_gateway)
347 daddr = &rt->rt_gateway;
348 else
349 daddr = &ip_hdr(skb)->daddr;
350 n = dst_neigh_lookup(dst, daddr);
344 if (!n) { 351 if (!n) {
345 pr_err("NO NEIGHBOUR !\n"); 352 pr_err("NO NEIGHBOUR !\n");
346 dev_kfree_skb(skb); 353 dev_kfree_skb(skb);
@@ -360,7 +367,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
360 dev_kfree_skb(skb); 367 dev_kfree_skb(skb);
361 dev->stats.tx_dropped++; 368 dev->stats.tx_dropped++;
362 } 369 }
363 return NETDEV_TX_OK; 370 goto out_release_neigh;
364 } 371 }
365 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); 372 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
366 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 373 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
@@ -379,14 +386,14 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
379 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 386 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
380 if (old) { 387 if (old) {
381 pr_warning("XOFF->XOFF transition\n"); 388 pr_warning("XOFF->XOFF transition\n");
382 return NETDEV_TX_OK; 389 goto out_release_neigh;
383 } 390 }
384 dev->stats.tx_packets++; 391 dev->stats.tx_packets++;
385 dev->stats.tx_bytes += skb->len; 392 dev->stats.tx_bytes += skb->len;
386 vcc->send(vcc, skb); 393 vcc->send(vcc, skb);
387 if (atm_may_send(vcc, 0)) { 394 if (atm_may_send(vcc, 0)) {
388 entry->vccs->xoff = 0; 395 entry->vccs->xoff = 0;
389 return NETDEV_TX_OK; 396 goto out_release_neigh;
390 } 397 }
391 spin_lock_irqsave(&clip_priv->xoff_lock, flags); 398 spin_lock_irqsave(&clip_priv->xoff_lock, flags);
392 netif_stop_queue(dev); /* XOFF -> throttle immediately */ 399 netif_stop_queue(dev); /* XOFF -> throttle immediately */
@@ -398,6 +405,8 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
398 of the brief netif_stop_queue. If this isn't true or if it 405 of the brief netif_stop_queue. If this isn't true or if it
399 changes, use netif_wake_queue instead. */ 406 changes, use netif_wake_queue instead. */
400 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags); 407 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
408out_release_neigh:
409 neigh_release(n);
401 return NETDEV_TX_OK; 410 return NETDEV_TX_OK;
402} 411}
403 412
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index df35d9a3b5fe..614d3fc47ede 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -44,7 +44,7 @@
44#include <linux/atmdev.h> 44#include <linux/atmdev.h>
45#include <linux/capability.h> 45#include <linux/capability.h>
46#include <linux/ppp_defs.h> 46#include <linux/ppp_defs.h>
47#include <linux/if_ppp.h> 47#include <linux/ppp-ioctl.h>
48#include <linux/ppp_channel.h> 48#include <linux/ppp_channel.h>
49#include <linux/atmppp.h> 49#include <linux/atmppp.h>
50 50
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index ce6861166499..4e392ebedb64 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
1# 1#
2# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2# Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3# 3#
4# Marek Lindner, Simon Wunderlich 4# Marek Lindner, Simon Wunderlich
5# 5#
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_algo.h
index 69329c107e28..9852a688ba43 100644
--- a/net/batman-adv/bat_ogm.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public 7 * modify it under the terms of version 2 of the GNU General Public
@@ -19,17 +19,9 @@
19 * 19 *
20 */ 20 */
21 21
22#ifndef _NET_BATMAN_ADV_OGM_H_ 22#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
23#define _NET_BATMAN_ADV_OGM_H_ 23#define _NET_BATMAN_ADV_BAT_ALGO_H_
24 24
25#include "main.h" 25int bat_iv_init(void);
26 26
27void bat_ogm_init(struct hard_iface *hard_iface); 27#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
28void bat_ogm_init_primary(struct hard_iface *hard_iface);
29void bat_ogm_update_mac(struct hard_iface *hard_iface);
30void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
31void bat_ogm_emit(struct forw_packet *forw_packet);
32void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
33 int packet_len, struct hard_iface *if_incoming);
34
35#endif /* _NET_BATMAN_ADV_OGM_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index d0af9bf69e46..c3b0548b175d 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -221,6 +221,11 @@ static void debug_log_cleanup(struct bat_priv *bat_priv)
221} 221}
222#endif 222#endif
223 223
224static int bat_algorithms_open(struct inode *inode, struct file *file)
225{
226 return single_open(file, bat_algo_seq_print_text, NULL);
227}
228
224static int originators_open(struct inode *inode, struct file *file) 229static int originators_open(struct inode *inode, struct file *file)
225{ 230{
226 struct net_device *net_dev = (struct net_device *)inode->i_private; 231 struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -274,6 +279,7 @@ struct bat_debuginfo bat_debuginfo_##_name = { \
274 } \ 279 } \
275}; 280};
276 281
282static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
277static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); 283static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
278static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); 284static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
279static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); 285static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
@@ -293,9 +299,25 @@ static struct bat_debuginfo *mesh_debuginfos[] = {
293 299
294void debugfs_init(void) 300void debugfs_init(void)
295{ 301{
302 struct bat_debuginfo *bat_debug;
303 struct dentry *file;
304
296 bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL); 305 bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
297 if (bat_debugfs == ERR_PTR(-ENODEV)) 306 if (bat_debugfs == ERR_PTR(-ENODEV))
298 bat_debugfs = NULL; 307 bat_debugfs = NULL;
308
309 if (!bat_debugfs)
310 goto out;
311
312 bat_debug = &bat_debuginfo_routing_algos;
313 file = debugfs_create_file(bat_debug->attr.name,
314 S_IFREG | bat_debug->attr.mode,
315 bat_debugfs, NULL, &bat_debug->fops);
316 if (!file)
317 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
318
319out:
320 return;
299} 321}
300 322
301void debugfs_destroy(void) 323void debugfs_destroy(void)
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h
index bc9cda3f01e1..d605c6746428 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/bat_debugfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 3512e251545b..a6d5d63fb6ad 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include "main.h" 22#include "main.h"
23#include "bat_ogm.h"
24#include "translation-table.h" 23#include "translation-table.h"
25#include "ring_buffer.h" 24#include "ring_buffer.h"
26#include "originator.h" 25#include "originator.h"
@@ -29,8 +28,9 @@
29#include "gateway_client.h" 28#include "gateway_client.h"
30#include "hard-interface.h" 29#include "hard-interface.h"
31#include "send.h" 30#include "send.h"
31#include "bat_algo.h"
32 32
33void bat_ogm_init(struct hard_iface *hard_iface) 33static void bat_iv_ogm_init(struct hard_iface *hard_iface)
34{ 34{
35 struct batman_ogm_packet *batman_ogm_packet; 35 struct batman_ogm_packet *batman_ogm_packet;
36 36
@@ -38,25 +38,25 @@ void bat_ogm_init(struct hard_iface *hard_iface)
38 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 38 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
39 39
40 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 40 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
41 batman_ogm_packet->packet_type = BAT_OGM; 41 batman_ogm_packet->header.packet_type = BAT_OGM;
42 batman_ogm_packet->version = COMPAT_VERSION; 42 batman_ogm_packet->header.version = COMPAT_VERSION;
43 batman_ogm_packet->header.ttl = 2;
43 batman_ogm_packet->flags = NO_FLAGS; 44 batman_ogm_packet->flags = NO_FLAGS;
44 batman_ogm_packet->ttl = 2;
45 batman_ogm_packet->tq = TQ_MAX_VALUE; 45 batman_ogm_packet->tq = TQ_MAX_VALUE;
46 batman_ogm_packet->tt_num_changes = 0; 46 batman_ogm_packet->tt_num_changes = 0;
47 batman_ogm_packet->ttvn = 0; 47 batman_ogm_packet->ttvn = 0;
48} 48}
49 49
50void bat_ogm_init_primary(struct hard_iface *hard_iface) 50static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
51{ 51{
52 struct batman_ogm_packet *batman_ogm_packet; 52 struct batman_ogm_packet *batman_ogm_packet;
53 53
54 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 54 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
55 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; 55 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
56 batman_ogm_packet->ttl = TTL; 56 batman_ogm_packet->header.ttl = TTL;
57} 57}
58 58
59void bat_ogm_update_mac(struct hard_iface *hard_iface) 59static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
60{ 60{
61 struct batman_ogm_packet *batman_ogm_packet; 61 struct batman_ogm_packet *batman_ogm_packet;
62 62
@@ -68,7 +68,7 @@ void bat_ogm_update_mac(struct hard_iface *hard_iface)
68} 68}
69 69
70/* when do we schedule our own ogm to be sent */ 70/* when do we schedule our own ogm to be sent */
71static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv) 71static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
72{ 72{
73 return jiffies + msecs_to_jiffies( 73 return jiffies + msecs_to_jiffies(
74 atomic_read(&bat_priv->orig_interval) - 74 atomic_read(&bat_priv->orig_interval) -
@@ -76,7 +76,7 @@ static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
76} 76}
77 77
78/* when do we schedule a ogm packet to be sent */ 78/* when do we schedule a ogm packet to be sent */
79static unsigned long bat_ogm_fwd_send_time(void) 79static unsigned long bat_iv_ogm_fwd_send_time(void)
80{ 80{
81 return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); 81 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
82} 82}
@@ -89,8 +89,8 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
89} 89}
90 90
91/* is there another aggregated packet here? */ 91/* is there another aggregated packet here? */
92static int bat_ogm_aggr_packet(int buff_pos, int packet_len, 92static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
93 int tt_num_changes) 93 int tt_num_changes)
94{ 94{
95 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); 95 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
96 96
@@ -99,8 +99,8 @@ static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
99} 99}
100 100
101/* send a batman ogm to a given interface */ 101/* send a batman ogm to a given interface */
102static void bat_ogm_send_to_if(struct forw_packet *forw_packet, 102static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
103 struct hard_iface *hard_iface) 103 struct hard_iface *hard_iface)
104{ 104{
105 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 105 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
106 char *fwd_str; 106 char *fwd_str;
@@ -117,8 +117,8 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
117 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; 117 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
118 118
119 /* adjust all flags and log packets */ 119 /* adjust all flags and log packets */
120 while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 120 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
121 batman_ogm_packet->tt_num_changes)) { 121 batman_ogm_packet->tt_num_changes)) {
122 122
123 /* we might have aggregated direct link packets with an 123 /* we might have aggregated direct link packets with an
124 * ordinary base packet */ 124 * ordinary base packet */
@@ -132,12 +132,11 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
132 "Sending own" : 132 "Sending own" :
133 "Forwarding")); 133 "Forwarding"));
134 bat_dbg(DBG_BATMAN, bat_priv, 134 bat_dbg(DBG_BATMAN, bat_priv,
135 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," 135 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
136 " IDF %s, ttvn %d) on interface %s [%pM]\n",
137 fwd_str, (packet_num > 0 ? "aggregated " : ""), 136 fwd_str, (packet_num > 0 ? "aggregated " : ""),
138 batman_ogm_packet->orig, 137 batman_ogm_packet->orig,
139 ntohl(batman_ogm_packet->seqno), 138 ntohl(batman_ogm_packet->seqno),
140 batman_ogm_packet->tq, batman_ogm_packet->ttl, 139 batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
141 (batman_ogm_packet->flags & DIRECTLINK ? 140 (batman_ogm_packet->flags & DIRECTLINK ?
142 "on" : "off"), 141 "on" : "off"),
143 batman_ogm_packet->ttvn, hard_iface->net_dev->name, 142 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
@@ -157,7 +156,7 @@ static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
157} 156}
158 157
159/* send a batman ogm packet */ 158/* send a batman ogm packet */
160void bat_ogm_emit(struct forw_packet *forw_packet) 159static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
161{ 160{
162 struct hard_iface *hard_iface; 161 struct hard_iface *hard_iface;
163 struct net_device *soft_iface; 162 struct net_device *soft_iface;
@@ -171,8 +170,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
171 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); 170 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
172 171
173 if (!forw_packet->if_incoming) { 172 if (!forw_packet->if_incoming) {
174 pr_err("Error - can't forward packet: incoming iface not " 173 pr_err("Error - can't forward packet: incoming iface not specified\n");
175 "specified\n");
176 goto out; 174 goto out;
177 } 175 }
178 176
@@ -188,17 +186,16 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
188 186
189 /* multihomed peer assumed */ 187 /* multihomed peer assumed */
190 /* non-primary OGMs are only broadcasted on their interface */ 188 /* non-primary OGMs are only broadcasted on their interface */
191 if ((directlink && (batman_ogm_packet->ttl == 1)) || 189 if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
192 (forw_packet->own && (forw_packet->if_incoming != primary_if))) { 190 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
193 191
194 /* FIXME: what about aggregated packets ? */ 192 /* FIXME: what about aggregated packets ? */
195 bat_dbg(DBG_BATMAN, bat_priv, 193 bat_dbg(DBG_BATMAN, bat_priv,
196 "%s packet (originator %pM, seqno %d, TTL %d) " 194 "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n",
197 "on interface %s [%pM]\n",
198 (forw_packet->own ? "Sending own" : "Forwarding"), 195 (forw_packet->own ? "Sending own" : "Forwarding"),
199 batman_ogm_packet->orig, 196 batman_ogm_packet->orig,
200 ntohl(batman_ogm_packet->seqno), 197 ntohl(batman_ogm_packet->seqno),
201 batman_ogm_packet->ttl, 198 batman_ogm_packet->header.ttl,
202 forw_packet->if_incoming->net_dev->name, 199 forw_packet->if_incoming->net_dev->name,
203 forw_packet->if_incoming->net_dev->dev_addr); 200 forw_packet->if_incoming->net_dev->dev_addr);
204 201
@@ -216,7 +213,7 @@ void bat_ogm_emit(struct forw_packet *forw_packet)
216 if (hard_iface->soft_iface != soft_iface) 213 if (hard_iface->soft_iface != soft_iface)
217 continue; 214 continue;
218 215
219 bat_ogm_send_to_if(forw_packet, hard_iface); 216 bat_iv_ogm_send_to_if(forw_packet, hard_iface);
220 } 217 }
221 rcu_read_unlock(); 218 rcu_read_unlock();
222 219
@@ -226,13 +223,13 @@ out:
226} 223}
227 224
228/* return true if new_packet can be aggregated with forw_packet */ 225/* return true if new_packet can be aggregated with forw_packet */
229static bool bat_ogm_can_aggregate(const struct batman_ogm_packet 226static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
230 *new_batman_ogm_packet, 227 *new_batman_ogm_packet,
231 struct bat_priv *bat_priv, 228 struct bat_priv *bat_priv,
232 int packet_len, unsigned long send_time, 229 int packet_len, unsigned long send_time,
233 bool directlink, 230 bool directlink,
234 const struct hard_iface *if_incoming, 231 const struct hard_iface *if_incoming,
235 const struct forw_packet *forw_packet) 232 const struct forw_packet *forw_packet)
236{ 233{
237 struct batman_ogm_packet *batman_ogm_packet; 234 struct batman_ogm_packet *batman_ogm_packet;
238 int aggregated_bytes = forw_packet->packet_len + packet_len; 235 int aggregated_bytes = forw_packet->packet_len + packet_len;
@@ -272,7 +269,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
272 * are flooded through the net */ 269 * are flooded through the net */
273 if ((!directlink) && 270 if ((!directlink) &&
274 (!(batman_ogm_packet->flags & DIRECTLINK)) && 271 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
275 (batman_ogm_packet->ttl != 1) && 272 (batman_ogm_packet->header.ttl != 1) &&
276 273
277 /* own packets originating non-primary 274 /* own packets originating non-primary
278 * interfaces leave only that interface */ 275 * interfaces leave only that interface */
@@ -285,7 +282,7 @@ static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
285 /* if the incoming packet is sent via this one 282 /* if the incoming packet is sent via this one
286 * interface only - we still can aggregate */ 283 * interface only - we still can aggregate */
287 if ((directlink) && 284 if ((directlink) &&
288 (new_batman_ogm_packet->ttl == 1) && 285 (new_batman_ogm_packet->header.ttl == 1) &&
289 (forw_packet->if_incoming == if_incoming) && 286 (forw_packet->if_incoming == if_incoming) &&
290 287
291 /* packets from direct neighbors or 288 /* packets from direct neighbors or
@@ -306,11 +303,11 @@ out:
306} 303}
307 304
308/* create a new aggregated packet and add this packet to it */ 305/* create a new aggregated packet and add this packet to it */
309static void bat_ogm_aggregate_new(const unsigned char *packet_buff, 306static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
310 int packet_len, unsigned long send_time, 307 int packet_len, unsigned long send_time,
311 bool direct_link, 308 bool direct_link,
312 struct hard_iface *if_incoming, 309 struct hard_iface *if_incoming,
313 int own_packet) 310 int own_packet)
314{ 311{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 312 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct forw_packet *forw_packet_aggr; 313 struct forw_packet *forw_packet_aggr;
@@ -385,9 +382,9 @@ out:
385} 382}
386 383
387/* aggregate a new packet into the existing ogm packet */ 384/* aggregate a new packet into the existing ogm packet */
388static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr, 385static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
389 const unsigned char *packet_buff, 386 const unsigned char *packet_buff,
390 int packet_len, bool direct_link) 387 int packet_len, bool direct_link)
391{ 388{
392 unsigned char *skb_buff; 389 unsigned char *skb_buff;
393 390
@@ -402,10 +399,10 @@ static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
402 (1 << forw_packet_aggr->num_packets); 399 (1 << forw_packet_aggr->num_packets);
403} 400}
404 401
405static void bat_ogm_queue_add(struct bat_priv *bat_priv, 402static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
406 unsigned char *packet_buff, 403 unsigned char *packet_buff,
407 int packet_len, struct hard_iface *if_incoming, 404 int packet_len, struct hard_iface *if_incoming,
408 int own_packet, unsigned long send_time) 405 int own_packet, unsigned long send_time)
409{ 406{
410 /** 407 /**
411 * _aggr -> pointer to the packet we want to aggregate with 408 * _aggr -> pointer to the packet we want to aggregate with
@@ -425,11 +422,11 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
425 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { 422 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
426 hlist_for_each_entry(forw_packet_pos, tmp_node, 423 hlist_for_each_entry(forw_packet_pos, tmp_node,
427 &bat_priv->forw_bat_list, list) { 424 &bat_priv->forw_bat_list, list) {
428 if (bat_ogm_can_aggregate(batman_ogm_packet, 425 if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
429 bat_priv, packet_len, 426 bat_priv, packet_len,
430 send_time, direct_link, 427 send_time, direct_link,
431 if_incoming, 428 if_incoming,
432 forw_packet_pos)) { 429 forw_packet_pos)) {
433 forw_packet_aggr = forw_packet_pos; 430 forw_packet_aggr = forw_packet_pos;
434 break; 431 break;
435 } 432 }
@@ -451,27 +448,27 @@ static void bat_ogm_queue_add(struct bat_priv *bat_priv,
451 (atomic_read(&bat_priv->aggregated_ogms))) 448 (atomic_read(&bat_priv->aggregated_ogms)))
452 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS); 449 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
453 450
454 bat_ogm_aggregate_new(packet_buff, packet_len, 451 bat_iv_ogm_aggregate_new(packet_buff, packet_len,
455 send_time, direct_link, 452 send_time, direct_link,
456 if_incoming, own_packet); 453 if_incoming, own_packet);
457 } else { 454 } else {
458 bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len, 455 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
459 direct_link); 456 packet_len, direct_link);
460 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 457 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
461 } 458 }
462} 459}
463 460
464static void bat_ogm_forward(struct orig_node *orig_node, 461static void bat_iv_ogm_forward(struct orig_node *orig_node,
465 const struct ethhdr *ethhdr, 462 const struct ethhdr *ethhdr,
466 struct batman_ogm_packet *batman_ogm_packet, 463 struct batman_ogm_packet *batman_ogm_packet,
467 int directlink, struct hard_iface *if_incoming) 464 int directlink, struct hard_iface *if_incoming)
468{ 465{
469 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 466 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
470 struct neigh_node *router; 467 struct neigh_node *router;
471 uint8_t in_tq, in_ttl, tq_avg = 0; 468 uint8_t in_tq, in_ttl, tq_avg = 0;
472 uint8_t tt_num_changes; 469 uint8_t tt_num_changes;
473 470
474 if (batman_ogm_packet->ttl <= 1) { 471 if (batman_ogm_packet->header.ttl <= 1) {
475 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); 472 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
476 return; 473 return;
477 } 474 }
@@ -479,10 +476,10 @@ static void bat_ogm_forward(struct orig_node *orig_node,
479 router = orig_node_get_router(orig_node); 476 router = orig_node_get_router(orig_node);
480 477
481 in_tq = batman_ogm_packet->tq; 478 in_tq = batman_ogm_packet->tq;
482 in_ttl = batman_ogm_packet->ttl; 479 in_ttl = batman_ogm_packet->header.ttl;
483 tt_num_changes = batman_ogm_packet->tt_num_changes; 480 tt_num_changes = batman_ogm_packet->tt_num_changes;
484 481
485 batman_ogm_packet->ttl--; 482 batman_ogm_packet->header.ttl--;
486 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 483 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
487 484
488 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast 485 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
@@ -494,7 +491,8 @@ static void bat_ogm_forward(struct orig_node *orig_node,
494 batman_ogm_packet->tq = router->tq_avg; 491 batman_ogm_packet->tq = router->tq_avg;
495 492
496 if (router->last_ttl) 493 if (router->last_ttl)
497 batman_ogm_packet->ttl = router->last_ttl - 1; 494 batman_ogm_packet->header.ttl =
495 router->last_ttl - 1;
498 } 496 }
499 497
500 tq_avg = router->tq_avg; 498 tq_avg = router->tq_avg;
@@ -507,10 +505,9 @@ static void bat_ogm_forward(struct orig_node *orig_node,
507 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); 505 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
508 506
509 bat_dbg(DBG_BATMAN, bat_priv, 507 bat_dbg(DBG_BATMAN, bat_priv,
510 "Forwarding packet: tq_orig: %i, tq_avg: %i, " 508 "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
511 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
512 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1, 509 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
513 batman_ogm_packet->ttl); 510 batman_ogm_packet->header.ttl);
514 511
515 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); 512 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
516 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); 513 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
@@ -522,12 +519,13 @@ static void bat_ogm_forward(struct orig_node *orig_node,
522 else 519 else
523 batman_ogm_packet->flags &= ~DIRECTLINK; 520 batman_ogm_packet->flags &= ~DIRECTLINK;
524 521
525 bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, 522 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
526 BATMAN_OGM_LEN + tt_len(tt_num_changes), 523 BATMAN_OGM_LEN + tt_len(tt_num_changes),
527 if_incoming, 0, bat_ogm_fwd_send_time()); 524 if_incoming, 0, bat_iv_ogm_fwd_send_time());
528} 525}
529 526
530void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes) 527static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
528 int tt_num_changes)
531{ 529{
532 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 530 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
533 struct batman_ogm_packet *batman_ogm_packet; 531 struct batman_ogm_packet *batman_ogm_packet;
@@ -564,21 +562,22 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
564 atomic_inc(&hard_iface->seqno); 562 atomic_inc(&hard_iface->seqno);
565 563
566 slide_own_bcast_window(hard_iface); 564 slide_own_bcast_window(hard_iface);
567 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff, 565 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
568 hard_iface->packet_len, hard_iface, 1, 566 hard_iface->packet_len, hard_iface, 1,
569 bat_ogm_emit_send_time(bat_priv)); 567 bat_iv_ogm_emit_send_time(bat_priv));
570 568
571 if (primary_if) 569 if (primary_if)
572 hardif_free_ref(primary_if); 570 hardif_free_ref(primary_if);
573} 571}
574 572
575static void bat_ogm_orig_update(struct bat_priv *bat_priv, 573static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
576 struct orig_node *orig_node, 574 struct orig_node *orig_node,
577 const struct ethhdr *ethhdr, 575 const struct ethhdr *ethhdr,
578 const struct batman_ogm_packet 576 const struct batman_ogm_packet
579 *batman_ogm_packet, 577 *batman_ogm_packet,
580 struct hard_iface *if_incoming, 578 struct hard_iface *if_incoming,
581 const unsigned char *tt_buff, int is_duplicate) 579 const unsigned char *tt_buff,
580 int is_duplicate)
582{ 581{
583 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 582 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
584 struct neigh_node *router = NULL; 583 struct neigh_node *router = NULL;
@@ -586,8 +585,8 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
586 struct hlist_node *node; 585 struct hlist_node *node;
587 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 586 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
588 587
589 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 588 bat_dbg(DBG_BATMAN, bat_priv,
590 "Searching and updating originator entry of received packet\n"); 589 "update_originator(): Searching and updating originator entry of received packet\n");
591 590
592 rcu_read_lock(); 591 rcu_read_lock();
593 hlist_for_each_entry_rcu(tmp_neigh_node, node, 592 hlist_for_each_entry_rcu(tmp_neigh_node, node,
@@ -642,8 +641,8 @@ static void bat_ogm_orig_update(struct bat_priv *bat_priv,
642 spin_unlock_bh(&neigh_node->tq_lock); 641 spin_unlock_bh(&neigh_node->tq_lock);
643 642
644 if (!is_duplicate) { 643 if (!is_duplicate) {
645 orig_node->last_ttl = batman_ogm_packet->ttl; 644 orig_node->last_ttl = batman_ogm_packet->header.ttl;
646 neigh_node->last_ttl = batman_ogm_packet->ttl; 645 neigh_node->last_ttl = batman_ogm_packet->header.ttl;
647 } 646 }
648 647
649 bonding_candidate_add(orig_node, neigh_node); 648 bonding_candidate_add(orig_node, neigh_node);
@@ -683,7 +682,7 @@ update_tt:
683 /* I have to check for transtable changes only if the OGM has been 682 /* I have to check for transtable changes only if the OGM has been
684 * sent through a primary interface */ 683 * sent through a primary interface */
685 if (((batman_ogm_packet->orig != ethhdr->h_source) && 684 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
686 (batman_ogm_packet->ttl > 2)) || 685 (batman_ogm_packet->header.ttl > 2)) ||
687 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 686 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
688 tt_update_orig(bat_priv, orig_node, tt_buff, 687 tt_update_orig(bat_priv, orig_node, tt_buff,
689 batman_ogm_packet->tt_num_changes, 688 batman_ogm_packet->tt_num_changes,
@@ -713,10 +712,10 @@ out:
713 neigh_node_free_ref(router); 712 neigh_node_free_ref(router);
714} 713}
715 714
716static int bat_ogm_calc_tq(struct orig_node *orig_node, 715static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
717 struct orig_node *orig_neigh_node, 716 struct orig_node *orig_neigh_node,
718 struct batman_ogm_packet *batman_ogm_packet, 717 struct batman_ogm_packet *batman_ogm_packet,
719 struct hard_iface *if_incoming) 718 struct hard_iface *if_incoming)
720{ 719{
721 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 720 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
722 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 721 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
@@ -780,8 +779,7 @@ static int bat_ogm_calc_tq(struct orig_node *orig_node,
780 * information */ 779 * information */
781 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; 780 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
782 781
783 /* 782 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
784 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
785 * affect the nearly-symmetric links only a little, but 783 * affect the nearly-symmetric links only a little, but
786 * punishes asymmetric links more. This will give a value 784 * punishes asymmetric links more. This will give a value
787 * between 0 and TQ_MAX_VALUE 785 * between 0 and TQ_MAX_VALUE
@@ -799,10 +797,7 @@ static int bat_ogm_calc_tq(struct orig_node *orig_node,
799 (TQ_MAX_VALUE * TQ_MAX_VALUE)); 797 (TQ_MAX_VALUE * TQ_MAX_VALUE));
800 798
801 bat_dbg(DBG_BATMAN, bat_priv, 799 bat_dbg(DBG_BATMAN, bat_priv,
802 "bidirectional: " 800 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
803 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
804 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
805 "total tq: %3i\n",
806 orig_node->orig, orig_neigh_node->orig, total_count, 801 orig_node->orig, orig_neigh_node->orig, total_count,
807 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); 802 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
808 803
@@ -825,10 +820,10 @@ out:
825 * -1 the packet is old and has been received while the seqno window 820 * -1 the packet is old and has been received while the seqno window
826 * was protected. Caller should drop it. 821 * was protected. Caller should drop it.
827 */ 822 */
828static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr, 823static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
829 const struct batman_ogm_packet 824 const struct batman_ogm_packet
830 *batman_ogm_packet, 825 *batman_ogm_packet,
831 const struct hard_iface *if_incoming) 826 const struct hard_iface *if_incoming)
832{ 827{
833 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 828 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
834 struct orig_node *orig_node; 829 struct orig_node *orig_node;
@@ -890,10 +885,10 @@ out:
890 return ret; 885 return ret;
891} 886}
892 887
893static void bat_ogm_process(const struct ethhdr *ethhdr, 888static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
894 struct batman_ogm_packet *batman_ogm_packet, 889 struct batman_ogm_packet *batman_ogm_packet,
895 const unsigned char *tt_buff, 890 const unsigned char *tt_buff,
896 struct hard_iface *if_incoming) 891 struct hard_iface *if_incoming)
897{ 892{
898 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 893 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
899 struct hard_iface *hard_iface; 894 struct hard_iface *hard_iface;
@@ -918,7 +913,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
918 * packet in an aggregation. Here we expect that the padding 913 * packet in an aggregation. Here we expect that the padding
919 * is always zero (or not 0x01) 914 * is always zero (or not 0x01)
920 */ 915 */
921 if (batman_ogm_packet->packet_type != BAT_OGM) 916 if (batman_ogm_packet->header.packet_type != BAT_OGM)
922 return; 917 return;
923 918
924 /* could be changed by schedule_own_packet() */ 919 /* could be changed by schedule_own_packet() */
@@ -930,16 +925,14 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
930 batman_ogm_packet->orig) ? 1 : 0); 925 batman_ogm_packet->orig) ? 1 : 0);
931 926
932 bat_dbg(DBG_BATMAN, bat_priv, 927 bat_dbg(DBG_BATMAN, bat_priv,
933 "Received BATMAN packet via NB: %pM, IF: %s [%pM] " 928 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
934 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
935 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
936 ethhdr->h_source, if_incoming->net_dev->name, 929 ethhdr->h_source, if_incoming->net_dev->name,
937 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, 930 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
938 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, 931 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
939 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc, 932 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
940 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq, 933 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
941 batman_ogm_packet->ttl, batman_ogm_packet->version, 934 batman_ogm_packet->header.ttl,
942 has_directlink_flag); 935 batman_ogm_packet->header.version, has_directlink_flag);
943 936
944 rcu_read_lock(); 937 rcu_read_lock();
945 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 938 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -966,25 +959,24 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
966 } 959 }
967 rcu_read_unlock(); 960 rcu_read_unlock();
968 961
969 if (batman_ogm_packet->version != COMPAT_VERSION) { 962 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
970 bat_dbg(DBG_BATMAN, bat_priv, 963 bat_dbg(DBG_BATMAN, bat_priv,
971 "Drop packet: incompatible batman version (%i)\n", 964 "Drop packet: incompatible batman version (%i)\n",
972 batman_ogm_packet->version); 965 batman_ogm_packet->header.version);
973 return; 966 return;
974 } 967 }
975 968
976 if (is_my_addr) { 969 if (is_my_addr) {
977 bat_dbg(DBG_BATMAN, bat_priv, 970 bat_dbg(DBG_BATMAN, bat_priv,
978 "Drop packet: received my own broadcast (sender: %pM" 971 "Drop packet: received my own broadcast (sender: %pM)\n",
979 ")\n",
980 ethhdr->h_source); 972 ethhdr->h_source);
981 return; 973 return;
982 } 974 }
983 975
984 if (is_broadcast) { 976 if (is_broadcast) {
985 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 977 bat_dbg(DBG_BATMAN, bat_priv,
986 "ignoring all packets with broadcast source addr (sender: %pM" 978 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
987 ")\n", ethhdr->h_source); 979 ethhdr->h_source);
988 return; 980 return;
989 } 981 }
990 982
@@ -1014,16 +1006,16 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1014 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 1006 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1015 } 1007 }
1016 1008
1017 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " 1009 bat_dbg(DBG_BATMAN, bat_priv,
1018 "originator packet from myself (via neighbor)\n"); 1010 "Drop packet: originator packet from myself (via neighbor)\n");
1019 orig_node_free_ref(orig_neigh_node); 1011 orig_node_free_ref(orig_neigh_node);
1020 return; 1012 return;
1021 } 1013 }
1022 1014
1023 if (is_my_oldorig) { 1015 if (is_my_oldorig) {
1024 bat_dbg(DBG_BATMAN, bat_priv, 1016 bat_dbg(DBG_BATMAN, bat_priv,
1025 "Drop packet: ignoring all rebroadcast echos (sender: " 1017 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1026 "%pM)\n", ethhdr->h_source); 1018 ethhdr->h_source);
1027 return; 1019 return;
1028 } 1020 }
1029 1021
@@ -1031,13 +1023,13 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1031 if (!orig_node) 1023 if (!orig_node)
1032 return; 1024 return;
1033 1025
1034 is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet, 1026 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1035 if_incoming); 1027 if_incoming);
1036 1028
1037 if (is_duplicate == -1) { 1029 if (is_duplicate == -1) {
1038 bat_dbg(DBG_BATMAN, bat_priv, 1030 bat_dbg(DBG_BATMAN, bat_priv,
1039 "Drop packet: packet within seqno protection time " 1031 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1040 "(sender: %pM)\n", ethhdr->h_source); 1032 ethhdr->h_source);
1041 goto out; 1033 goto out;
1042 } 1034 }
1043 1035
@@ -1058,8 +1050,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1058 batman_ogm_packet->prev_sender)) && 1050 batman_ogm_packet->prev_sender)) &&
1059 (compare_eth(router->addr, router_router->addr))) { 1051 (compare_eth(router->addr, router_router->addr))) {
1060 bat_dbg(DBG_BATMAN, bat_priv, 1052 bat_dbg(DBG_BATMAN, bat_priv,
1061 "Drop packet: ignoring all rebroadcast packets that " 1053 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1062 "may make me loop (sender: %pM)\n", ethhdr->h_source); 1054 ethhdr->h_source);
1063 goto out; 1055 goto out;
1064 } 1056 }
1065 1057
@@ -1081,8 +1073,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1081 goto out_neigh; 1073 goto out_neigh;
1082 } 1074 }
1083 1075
1084 is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node, 1076 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1085 batman_ogm_packet, if_incoming); 1077 batman_ogm_packet, if_incoming);
1086 1078
1087 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); 1079 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1088 1080
@@ -1091,20 +1083,20 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1091 if (is_bidirectional && 1083 if (is_bidirectional &&
1092 (!is_duplicate || 1084 (!is_duplicate ||
1093 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) && 1085 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
1094 (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl)))) 1086 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
1095 bat_ogm_orig_update(bat_priv, orig_node, ethhdr, 1087 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1096 batman_ogm_packet, if_incoming, 1088 batman_ogm_packet, if_incoming,
1097 tt_buff, is_duplicate); 1089 tt_buff, is_duplicate);
1098 1090
1099 /* is single hop (direct) neighbor */ 1091 /* is single hop (direct) neighbor */
1100 if (is_single_hop_neigh) { 1092 if (is_single_hop_neigh) {
1101 1093
1102 /* mark direct link on incoming interface */ 1094 /* mark direct link on incoming interface */
1103 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 1095 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1104 1, if_incoming); 1096 1, if_incoming);
1105 1097
1106 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 1098 bat_dbg(DBG_BATMAN, bat_priv,
1107 "rebroadcast neighbor packet with direct link flag\n"); 1099 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
1108 goto out_neigh; 1100 goto out_neigh;
1109 } 1101 }
1110 1102
@@ -1123,7 +1115,8 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
1123 1115
1124 bat_dbg(DBG_BATMAN, bat_priv, 1116 bat_dbg(DBG_BATMAN, bat_priv,
1125 "Forwarding packet: rebroadcast originator packet\n"); 1117 "Forwarding packet: rebroadcast originator packet\n");
1126 bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming); 1118 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1119 0, if_incoming);
1127 1120
1128out_neigh: 1121out_neigh:
1129 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1122 if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1139,13 +1132,17 @@ out:
1139 orig_node_free_ref(orig_node); 1132 orig_node_free_ref(orig_node);
1140} 1133}
1141 1134
1142void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff, 1135static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
1143 int packet_len, struct hard_iface *if_incoming) 1136 struct sk_buff *skb)
1144{ 1137{
1145 struct batman_ogm_packet *batman_ogm_packet; 1138 struct batman_ogm_packet *batman_ogm_packet;
1146 int buff_pos = 0; 1139 struct ethhdr *ethhdr;
1147 unsigned char *tt_buff; 1140 int buff_pos = 0, packet_len;
1141 unsigned char *tt_buff, *packet_buff;
1148 1142
1143 packet_len = skb_headlen(skb);
1144 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1145 packet_buff = skb->data;
1149 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; 1146 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1150 1147
1151 /* unpack the aggregated packets and process them one by one */ 1148 /* unpack the aggregated packets and process them one by one */
@@ -1157,14 +1154,29 @@ void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
1157 1154
1158 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; 1155 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
1159 1156
1160 bat_ogm_process(ethhdr, batman_ogm_packet, 1157 bat_iv_ogm_process(ethhdr, batman_ogm_packet,
1161 tt_buff, if_incoming); 1158 tt_buff, if_incoming);
1162 1159
1163 buff_pos += BATMAN_OGM_LEN + 1160 buff_pos += BATMAN_OGM_LEN +
1164 tt_len(batman_ogm_packet->tt_num_changes); 1161 tt_len(batman_ogm_packet->tt_num_changes);
1165 1162
1166 batman_ogm_packet = (struct batman_ogm_packet *) 1163 batman_ogm_packet = (struct batman_ogm_packet *)
1167 (packet_buff + buff_pos); 1164 (packet_buff + buff_pos);
1168 } while (bat_ogm_aggr_packet(buff_pos, packet_len, 1165 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
1169 batman_ogm_packet->tt_num_changes)); 1166 batman_ogm_packet->tt_num_changes));
1167}
1168
1169static struct bat_algo_ops batman_iv __read_mostly = {
1170 .name = "BATMAN IV",
1171 .bat_ogm_init = bat_iv_ogm_init,
1172 .bat_ogm_init_primary = bat_iv_ogm_init_primary,
1173 .bat_ogm_update_mac = bat_iv_ogm_update_mac,
1174 .bat_ogm_schedule = bat_iv_ogm_schedule,
1175 .bat_ogm_emit = bat_iv_ogm_emit,
1176 .bat_ogm_receive = bat_iv_ogm_receive,
1177};
1178
1179int __init bat_iv_init(void)
1180{
1181 return bat_algo_register(&batman_iv);
1170} 1182}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index c25492f7d665..68ff759fc304 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -255,8 +255,8 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
255 buff[count - 1] = '\0'; 255 buff[count - 1] = '\0';
256 256
257 bat_info(net_dev, 257 bat_info(net_dev,
258 "Invalid parameter for 'vis mode' setting received: " 258 "Invalid parameter for 'vis mode' setting received: %s\n",
259 "%s\n", buff); 259 buff);
260 return -EINVAL; 260 return -EINVAL;
261 } 261 }
262 262
@@ -272,6 +272,13 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
272 return count; 272 return count;
273} 273}
274 274
275static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
276 char *buff)
277{
278 struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
279 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
280}
281
275static void post_gw_deselect(struct net_device *net_dev) 282static void post_gw_deselect(struct net_device *net_dev)
276{ 283{
277 struct bat_priv *bat_priv = netdev_priv(net_dev); 284 struct bat_priv *bat_priv = netdev_priv(net_dev);
@@ -314,17 +321,17 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
314 gw_mode_tmp = GW_MODE_OFF; 321 gw_mode_tmp = GW_MODE_OFF;
315 322
316 if (strncmp(buff, GW_MODE_CLIENT_NAME, 323 if (strncmp(buff, GW_MODE_CLIENT_NAME,
317 strlen(GW_MODE_CLIENT_NAME)) == 0) 324 strlen(GW_MODE_CLIENT_NAME)) == 0)
318 gw_mode_tmp = GW_MODE_CLIENT; 325 gw_mode_tmp = GW_MODE_CLIENT;
319 326
320 if (strncmp(buff, GW_MODE_SERVER_NAME, 327 if (strncmp(buff, GW_MODE_SERVER_NAME,
321 strlen(GW_MODE_SERVER_NAME)) == 0) 328 strlen(GW_MODE_SERVER_NAME)) == 0)
322 gw_mode_tmp = GW_MODE_SERVER; 329 gw_mode_tmp = GW_MODE_SERVER;
323 330
324 if (gw_mode_tmp < 0) { 331 if (gw_mode_tmp < 0) {
325 bat_info(net_dev, 332 bat_info(net_dev,
326 "Invalid parameter for 'gw mode' setting received: " 333 "Invalid parameter for 'gw mode' setting received: %s\n",
327 "%s\n", buff); 334 buff);
328 return -EINVAL; 335 return -EINVAL;
329 } 336 }
330 337
@@ -382,6 +389,7 @@ BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
382BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); 389BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
383BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); 390BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
384static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); 391static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
392static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
385static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); 393static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
386BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); 394BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
387BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); 395BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
@@ -399,6 +407,7 @@ static struct bat_attribute *mesh_attrs[] = {
399 &bat_attr_fragmentation, 407 &bat_attr_fragmentation,
400 &bat_attr_ap_isolation, 408 &bat_attr_ap_isolation,
401 &bat_attr_vis_mode, 409 &bat_attr_vis_mode,
410 &bat_attr_routing_algo,
402 &bat_attr_gw_mode, 411 &bat_attr_gw_mode,
403 &bat_attr_orig_interval, 412 &bat_attr_orig_interval,
404 &bat_attr_hop_penalty, 413 &bat_attr_hop_penalty,
@@ -493,8 +502,8 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
493 buff[count - 1] = '\0'; 502 buff[count - 1] = '\0';
494 503
495 if (strlen(buff) >= IFNAMSIZ) { 504 if (strlen(buff) >= IFNAMSIZ) {
496 pr_err("Invalid parameter for 'mesh_iface' setting received: " 505 pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
497 "interface name too long '%s'\n", buff); 506 buff);
498 hardif_free_ref(hard_iface); 507 hardif_free_ref(hard_iface);
499 return -EINVAL; 508 return -EINVAL;
500 } 509 }
@@ -668,8 +677,8 @@ out:
668 hardif_free_ref(primary_if); 677 hardif_free_ref(primary_if);
669 678
670 if (ret) 679 if (ret)
671 bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send " 680 bat_dbg(DBG_BATMAN, bat_priv,
672 "uevent for (%s,%s,%s) event (err: %d)\n", 681 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
673 uev_type_str[type], uev_action_str[action], 682 uev_type_str[type], uev_action_str[action],
674 (action == UEV_DEL ? "NULL" : data), ret); 683 (action == UEV_DEL ? "NULL" : data), ret);
675 return ret; 684 return ret;
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index a3f75a723c56..fece77ae586e 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 9bc63b209b3f..6d0aa216b232 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
@@ -154,8 +154,8 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
154 154
155 /* sequence number is much newer, probably missed a lot of packets */ 155 /* sequence number is much newer, probably missed a lot of packets */
156 156
157 if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) 157 if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) &&
158 && (seq_num_diff < EXPECTED_SEQNO_RANGE)) { 158 (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
159 bat_dbg(DBG_BATMAN, bat_priv, 159 bat_dbg(DBG_BATMAN, bat_priv,
160 "We missed a lot of packets (%i) !\n", 160 "We missed a lot of packets (%i) !\n",
161 seq_num_diff - 1); 161 seq_num_diff - 1);
@@ -170,8 +170,8 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
170 * packet should be dropped without calling this function if the 170 * packet should be dropped without calling this function if the
171 * seqno window is protected. */ 171 * seqno window is protected. */
172 172
173 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 173 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
174 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 174 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
175 175
176 bat_dbg(DBG_BATMAN, bat_priv, 176 bat_dbg(DBG_BATMAN, bat_priv,
177 "Other host probably restarted!\n"); 177 "Other host probably restarted!\n");
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 9c04422aeb07..c6135728a680 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 24403a7350f7..6f9b9b78f77d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -224,16 +224,13 @@ void gw_election(struct bat_priv *bat_priv)
224 } else if ((!curr_gw) && (next_gw)) { 224 } else if ((!curr_gw) && (next_gw)) {
225 bat_dbg(DBG_BATMAN, bat_priv, 225 bat_dbg(DBG_BATMAN, bat_priv,
226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", 226 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
227 next_gw->orig_node->orig, 227 next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
228 next_gw->orig_node->gw_flags,
229 router->tq_avg); 228 router->tq_avg);
230 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); 229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
231 } else { 230 } else {
232 bat_dbg(DBG_BATMAN, bat_priv, 231 bat_dbg(DBG_BATMAN, bat_priv,
233 "Changing route to gateway %pM " 232 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
234 "(gw_flags: %i, tq: %i)\n", 233 next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
235 next_gw->orig_node->orig,
236 next_gw->orig_node->gw_flags,
237 router->tq_avg); 234 router->tq_avg);
238 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); 235 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
239 } 236 }
@@ -287,8 +284,7 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
287 goto out; 284 goto out;
288 285
289 bat_dbg(DBG_BATMAN, bat_priv, 286 bat_dbg(DBG_BATMAN, bat_priv,
290 "Restarting gateway selection: better gateway found (tq curr: " 287 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
291 "%i, tq new: %i)\n",
292 gw_tq_avg, orig_tq_avg); 288 gw_tq_avg, orig_tq_avg);
293 289
294deselect: 290deselect:
@@ -352,8 +348,7 @@ void gw_node_update(struct bat_priv *bat_priv,
352 continue; 348 continue;
353 349
354 bat_dbg(DBG_BATMAN, bat_priv, 350 bat_dbg(DBG_BATMAN, bat_priv,
355 "Gateway class of originator %pM changed from " 351 "Gateway class of originator %pM changed from %i to %i\n",
356 "%i to %i\n",
357 orig_node->orig, gw_node->orig_node->gw_flags, 352 orig_node->orig, gw_node->orig_node->gw_flags,
358 new_gwflags); 353 new_gwflags);
359 354
@@ -396,7 +391,7 @@ void gw_node_purge(struct bat_priv *bat_priv)
396{ 391{
397 struct gw_node *gw_node, *curr_gw; 392 struct gw_node *gw_node, *curr_gw;
398 struct hlist_node *node, *node_tmp; 393 struct hlist_node *node, *node_tmp;
399 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 394 unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT);
400 int do_deselect = 0; 395 int do_deselect = 0;
401 396
402 curr_gw = gw_get_selected_gw_node(bat_priv); 397 curr_gw = gw_get_selected_gw_node(bat_priv);
@@ -474,23 +469,23 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
474 469
475 primary_if = primary_if_get_selected(bat_priv); 470 primary_if = primary_if_get_selected(bat_priv);
476 if (!primary_if) { 471 if (!primary_if) {
477 ret = seq_printf(seq, "BATMAN mesh %s disabled - please " 472 ret = seq_printf(seq,
478 "specify interfaces to enable it\n", 473 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
479 net_dev->name); 474 net_dev->name);
480 goto out; 475 goto out;
481 } 476 }
482 477
483 if (primary_if->if_status != IF_ACTIVE) { 478 if (primary_if->if_status != IF_ACTIVE) {
484 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 479 ret = seq_printf(seq,
485 "primary interface not active\n", 480 "BATMAN mesh %s disabled - primary interface not active\n",
486 net_dev->name); 481 net_dev->name);
487 goto out; 482 goto out;
488 } 483 }
489 484
490 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 485 seq_printf(seq,
491 "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 486 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
492 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 487 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
493 "outgoingIF", SOURCE_VERSION, primary_if->net_dev->name, 488 SOURCE_VERSION, primary_if->net_dev->name,
494 primary_if->net_dev->dev_addr, net_dev->name); 489 primary_if->net_dev->dev_addr, net_dev->name);
495 490
496 rcu_read_lock(); 491 rcu_read_lock();
@@ -629,7 +624,7 @@ bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
629 624
630 /* check for bootp port */ 625 /* check for bootp port */
631 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && 626 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
632 (ntohs(udphdr->dest) != 67)) 627 (ntohs(udphdr->dest) != 67))
633 return false; 628 return false;
634 629
635 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && 630 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index e1edba08eb1d..bf56a5aea10b 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index c4ac7b0a2a63..ca57ac7d73b2 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -93,7 +93,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
93 multi = 1024; 93 multi = 1024;
94 94
95 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || 95 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
96 (multi > 1)) 96 (multi > 1))
97 *tmp_ptr = '\0'; 97 *tmp_ptr = '\0';
98 } 98 }
99 99
@@ -118,15 +118,15 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
118 multi = 1024; 118 multi = 1024;
119 119
120 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || 120 if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
121 (multi > 1)) 121 (multi > 1))
122 *tmp_ptr = '\0'; 122 *tmp_ptr = '\0';
123 } 123 }
124 124
125 ret = kstrtol(slash_ptr + 1, 10, &lup); 125 ret = kstrtol(slash_ptr + 1, 10, &lup);
126 if (ret) { 126 if (ret) {
127 bat_err(net_dev, 127 bat_err(net_dev,
128 "Upload speed of gateway mode invalid: " 128 "Upload speed of gateway mode invalid: %s\n",
129 "%s\n", slash_ptr + 1); 129 slash_ptr + 1);
130 return false; 130 return false;
131 } 131 }
132 132
@@ -163,8 +163,8 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); 163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
164 164
165 gw_deselect(bat_priv); 165 gw_deselect(bat_priv);
166 bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' " 166 bat_info(net_dev,
167 "(propagating: %d%s/%d%s)\n", 167 "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, 168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
169 (down > 2048 ? down / 1024 : down), 169 (down > 2048 ? down / 1024 : down),
170 (down > 2048 ? "MBit" : "KBit"), 170 (down > 2048 ? "MBit" : "KBit"),
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 55e527a489fe..b8fb11c4f927 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 7704df468e0b..377897701a85 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,7 +28,6 @@
28#include "bat_sysfs.h" 28#include "bat_sysfs.h"
29#include "originator.h" 29#include "originator.h"
30#include "hash.h" 30#include "hash.h"
31#include "bat_ogm.h"
32 31
33#include <linux/if_arp.h> 32#include <linux/if_arp.h>
34 33
@@ -147,7 +146,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
147 if (!new_hard_iface) 146 if (!new_hard_iface)
148 return; 147 return;
149 148
150 bat_ogm_init_primary(new_hard_iface); 149 bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
151 primary_if_update_addr(bat_priv); 150 primary_if_update_addr(bat_priv);
152} 151}
153 152
@@ -176,11 +175,9 @@ static void check_known_mac_addr(const struct net_device *net_dev)
176 net_dev->dev_addr)) 175 net_dev->dev_addr))
177 continue; 176 continue;
178 177
179 pr_warning("The newly added mac address (%pM) already exists " 178 pr_warning("The newly added mac address (%pM) already exists on: %s\n",
180 "on: %s\n", net_dev->dev_addr, 179 net_dev->dev_addr, hard_iface->net_dev->name);
181 hard_iface->net_dev->name); 180 pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
182 pr_warning("It is strongly recommended to keep mac addresses "
183 "unique to avoid problems!\n");
184 } 181 }
185 rcu_read_unlock(); 182 rcu_read_unlock();
186} 183}
@@ -233,7 +230,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
233 230
234 bat_priv = netdev_priv(hard_iface->soft_iface); 231 bat_priv = netdev_priv(hard_iface->soft_iface);
235 232
236 bat_ogm_update_mac(hard_iface); 233 bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
237 hard_iface->if_status = IF_TO_BE_ACTIVATED; 234 hard_iface->if_status = IF_TO_BE_ACTIVATED;
238 235
239 /** 236 /**
@@ -281,6 +278,11 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
281 if (!atomic_inc_not_zero(&hard_iface->refcount)) 278 if (!atomic_inc_not_zero(&hard_iface->refcount))
282 goto out; 279 goto out;
283 280
281 /* hard-interface is part of a bridge */
282 if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
283 pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n",
284 hard_iface->net_dev->name);
285
284 soft_iface = dev_get_by_name(&init_net, iface_name); 286 soft_iface = dev_get_by_name(&init_net, iface_name);
285 287
286 if (!soft_iface) { 288 if (!soft_iface) {
@@ -296,8 +298,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
296 } 298 }
297 299
298 if (!softif_is_valid(soft_iface)) { 300 if (!softif_is_valid(soft_iface)) {
299 pr_err("Can't create batman mesh interface %s: " 301 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
300 "already exists as regular interface\n",
301 soft_iface->name); 302 soft_iface->name);
302 dev_put(soft_iface); 303 dev_put(soft_iface);
303 ret = -EINVAL; 304 ret = -EINVAL;
@@ -307,11 +308,12 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
307 hard_iface->soft_iface = soft_iface; 308 hard_iface->soft_iface = soft_iface;
308 bat_priv = netdev_priv(hard_iface->soft_iface); 309 bat_priv = netdev_priv(hard_iface->soft_iface);
309 310
310 bat_ogm_init(hard_iface); 311 bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
311 312
312 if (!hard_iface->packet_buff) { 313 if (!hard_iface->packet_buff) {
313 bat_err(hard_iface->soft_iface, "Can't add interface packet " 314 bat_err(hard_iface->soft_iface,
314 "(%s): out of memory\n", hard_iface->net_dev->name); 315 "Can't add interface packet (%s): out of memory\n",
316 hard_iface->net_dev->name);
315 ret = -ENOMEM; 317 ret = -ENOMEM;
316 goto err; 318 goto err;
317 } 319 }
@@ -334,29 +336,22 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
334 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 336 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
335 ETH_DATA_LEN + BAT_HEADER_LEN) 337 ETH_DATA_LEN + BAT_HEADER_LEN)
336 bat_info(hard_iface->soft_iface, 338 bat_info(hard_iface->soft_iface,
337 "The MTU of interface %s is too small (%i) to handle " 339 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
338 "the transport of batman-adv packets. Packets going " 340 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
339 "over this interface will be fragmented on layer2 " 341 ETH_DATA_LEN + BAT_HEADER_LEN);
340 "which could impact the performance. Setting the MTU "
341 "to %zi would solve the problem.\n",
342 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
343 ETH_DATA_LEN + BAT_HEADER_LEN);
344 342
345 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < 343 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
346 ETH_DATA_LEN + BAT_HEADER_LEN) 344 ETH_DATA_LEN + BAT_HEADER_LEN)
347 bat_info(hard_iface->soft_iface, 345 bat_info(hard_iface->soft_iface,
348 "The MTU of interface %s is too small (%i) to handle " 346 "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
349 "the transport of batman-adv packets. If you experience" 347 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
350 " problems getting traffic through try increasing the " 348 ETH_DATA_LEN + BAT_HEADER_LEN);
351 "MTU to %zi.\n",
352 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
353 ETH_DATA_LEN + BAT_HEADER_LEN);
354 349
355 if (hardif_is_iface_up(hard_iface)) 350 if (hardif_is_iface_up(hard_iface))
356 hardif_activate_interface(hard_iface); 351 hardif_activate_interface(hard_iface);
357 else 352 else
358 bat_err(hard_iface->soft_iface, "Not using interface %s " 353 bat_err(hard_iface->soft_iface,
359 "(retrying later): interface not active\n", 354 "Not using interface %s (retrying later): interface not active\n",
360 hard_iface->net_dev->name); 355 hard_iface->net_dev->name);
361 356
362 /* begin scheduling originator messages on that interface */ 357 /* begin scheduling originator messages on that interface */
@@ -527,9 +522,10 @@ static int hard_if_event(struct notifier_block *this,
527 goto hardif_put; 522 goto hardif_put;
528 523
529 check_known_mac_addr(hard_iface->net_dev); 524 check_known_mac_addr(hard_iface->net_dev);
530 bat_ogm_update_mac(hard_iface);
531 525
532 bat_priv = netdev_priv(hard_iface->soft_iface); 526 bat_priv = netdev_priv(hard_iface->soft_iface);
527 bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
528
533 primary_if = primary_if_get_selected(bat_priv); 529 primary_if = primary_if_get_selected(bat_priv);
534 if (!primary_if) 530 if (!primary_if)
535 goto hardif_put; 531 goto hardif_put;
@@ -572,8 +568,8 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
572 goto err_free; 568 goto err_free;
573 569
574 /* expect a valid ethernet header here. */ 570 /* expect a valid ethernet header here. */
575 if (unlikely(skb->mac_len != sizeof(struct ethhdr) 571 if (unlikely(skb->mac_len != sizeof(struct ethhdr) ||
576 || !skb_mac_header(skb))) 572 !skb_mac_header(skb)))
577 goto err_free; 573 goto err_free;
578 574
579 if (!hard_iface->soft_iface) 575 if (!hard_iface->soft_iface)
@@ -590,17 +586,17 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
590 586
591 batman_ogm_packet = (struct batman_ogm_packet *)skb->data; 587 batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
592 588
593 if (batman_ogm_packet->version != COMPAT_VERSION) { 589 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
594 bat_dbg(DBG_BATMAN, bat_priv, 590 bat_dbg(DBG_BATMAN, bat_priv,
595 "Drop packet: incompatible batman version (%i)\n", 591 "Drop packet: incompatible batman version (%i)\n",
596 batman_ogm_packet->version); 592 batman_ogm_packet->header.version);
597 goto err_free; 593 goto err_free;
598 } 594 }
599 595
600 /* all receive handlers return whether they received or reused 596 /* all receive handlers return whether they received or reused
601 * the supplied skb. if not, we have to free the skb. */ 597 * the supplied skb. if not, we have to free the skb. */
602 598
603 switch (batman_ogm_packet->packet_type) { 599 switch (batman_ogm_packet->header.packet_type) {
604 /* batman originator packet */ 600 /* batman originator packet */
605 case BAT_OGM: 601 case BAT_OGM:
606 ret = recv_bat_ogm_packet(skb, hard_iface); 602 ret = recv_bat_ogm_packet(skb, hard_iface);
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 67f78d1a63b4..e68c5655e616 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index d1da29da333b..117687bedf25 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 4768717f07f9..d4bd7862719b 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index d9c1e7bb7fbf..b87518edcef9 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
@@ -59,8 +59,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
59 } 59 }
60 60
61 if (i == ARRAY_SIZE(socket_client_hash)) { 61 if (i == ARRAY_SIZE(socket_client_hash)) {
62 pr_err("Error - can't add another packet client: " 62 pr_err("Error - can't add another packet client: maximum number of clients reached\n");
63 "maximum number of clients reached\n");
64 kfree(socket_client); 63 kfree(socket_client);
65 return -EXFULL; 64 return -EXFULL;
66 } 65 }
@@ -162,8 +161,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
162 161
163 if (len < sizeof(struct icmp_packet)) { 162 if (len < sizeof(struct icmp_packet)) {
164 bat_dbg(DBG_BATMAN, bat_priv, 163 bat_dbg(DBG_BATMAN, bat_priv,
165 "Error - can't send packet from char device: " 164 "Error - can't send packet from char device: invalid packet size\n");
166 "invalid packet size\n");
167 return -EINVAL; 165 return -EINVAL;
168 } 166 }
169 167
@@ -191,27 +189,25 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
191 goto free_skb; 189 goto free_skb;
192 } 190 }
193 191
194 if (icmp_packet->packet_type != BAT_ICMP) { 192 if (icmp_packet->header.packet_type != BAT_ICMP) {
195 bat_dbg(DBG_BATMAN, bat_priv, 193 bat_dbg(DBG_BATMAN, bat_priv,
196 "Error - can't send packet from char device: " 194 "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
197 "got bogus packet type (expected: BAT_ICMP)\n");
198 len = -EINVAL; 195 len = -EINVAL;
199 goto free_skb; 196 goto free_skb;
200 } 197 }
201 198
202 if (icmp_packet->msg_type != ECHO_REQUEST) { 199 if (icmp_packet->msg_type != ECHO_REQUEST) {
203 bat_dbg(DBG_BATMAN, bat_priv, 200 bat_dbg(DBG_BATMAN, bat_priv,
204 "Error - can't send packet from char device: " 201 "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
205 "got bogus message type (expected: ECHO_REQUEST)\n");
206 len = -EINVAL; 202 len = -EINVAL;
207 goto free_skb; 203 goto free_skb;
208 } 204 }
209 205
210 icmp_packet->uid = socket_client->index; 206 icmp_packet->uid = socket_client->index;
211 207
212 if (icmp_packet->version != COMPAT_VERSION) { 208 if (icmp_packet->header.version != COMPAT_VERSION) {
213 icmp_packet->msg_type = PARAMETER_PROBLEM; 209 icmp_packet->msg_type = PARAMETER_PROBLEM;
214 icmp_packet->version = COMPAT_VERSION; 210 icmp_packet->header.version = COMPAT_VERSION;
215 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 211 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
216 goto free_skb; 212 goto free_skb;
217 } 213 }
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 462b190fa101..380ed4c2443a 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index fb87bdc2ce9b..6d51caaf8cec 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -32,11 +32,14 @@
32#include "gateway_client.h" 32#include "gateway_client.h"
33#include "vis.h" 33#include "vis.h"
34#include "hash.h" 34#include "hash.h"
35#include "bat_algo.h"
35 36
36 37
37/* List manipulations on hardif_list have to be rtnl_lock()'ed, 38/* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked */ 39 * list traversals just rcu-locked */
39struct list_head hardif_list; 40struct list_head hardif_list;
41char bat_routing_algo[20] = "BATMAN IV";
42static struct hlist_head bat_algo_list;
40 43
41unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 44unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
42 45
@@ -45,6 +48,9 @@ struct workqueue_struct *bat_event_workqueue;
45static int __init batman_init(void) 48static int __init batman_init(void)
46{ 49{
47 INIT_LIST_HEAD(&hardif_list); 50 INIT_LIST_HEAD(&hardif_list);
51 INIT_HLIST_HEAD(&bat_algo_list);
52
53 bat_iv_init();
48 54
49 /* the name should not be longer than 10 chars - see 55 /* the name should not be longer than 10 chars - see
50 * http://lwn.net/Articles/23634/ */ 56 * http://lwn.net/Articles/23634/ */
@@ -58,8 +64,8 @@ static int __init batman_init(void)
58 64
59 register_netdevice_notifier(&hard_if_notifier); 65 register_netdevice_notifier(&hard_if_notifier);
60 66
61 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) " 67 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
62 "loaded\n", SOURCE_VERSION, COMPAT_VERSION); 68 SOURCE_VERSION, COMPAT_VERSION);
63 69
64 return 0; 70 return 0;
65} 71}
@@ -170,9 +176,110 @@ int is_my_mac(const uint8_t *addr)
170 } 176 }
171 rcu_read_unlock(); 177 rcu_read_unlock();
172 return 0; 178 return 0;
179}
180
181static struct bat_algo_ops *bat_algo_get(char *name)
182{
183 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
184 struct hlist_node *node;
185
186 hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
187 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
188 continue;
189
190 bat_algo_ops = bat_algo_ops_tmp;
191 break;
192 }
193
194 return bat_algo_ops;
195}
196
197int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
198{
199 struct bat_algo_ops *bat_algo_ops_tmp;
200 int ret = -1;
201
202 bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
203 if (bat_algo_ops_tmp) {
204 pr_info("Trying to register already registered routing algorithm: %s\n",
205 bat_algo_ops->name);
206 goto out;
207 }
208
209 /* all algorithms must implement all ops (for now) */
210 if (!bat_algo_ops->bat_ogm_init ||
211 !bat_algo_ops->bat_ogm_init_primary ||
212 !bat_algo_ops->bat_ogm_update_mac ||
213 !bat_algo_ops->bat_ogm_schedule ||
214 !bat_algo_ops->bat_ogm_emit ||
215 !bat_algo_ops->bat_ogm_receive) {
216 pr_info("Routing algo '%s' does not implement required ops\n",
217 bat_algo_ops->name);
218 goto out;
219 }
220
221 INIT_HLIST_NODE(&bat_algo_ops->list);
222 hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
223 ret = 0;
224
225out:
226 return ret;
227}
228
229int bat_algo_select(struct bat_priv *bat_priv, char *name)
230{
231 struct bat_algo_ops *bat_algo_ops;
232 int ret = -1;
233
234 bat_algo_ops = bat_algo_get(name);
235 if (!bat_algo_ops)
236 goto out;
237
238 bat_priv->bat_algo_ops = bat_algo_ops;
239 ret = 0;
240
241out:
242 return ret;
243}
244
245int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
246{
247 struct bat_algo_ops *bat_algo_ops;
248 struct hlist_node *node;
249
250 seq_printf(seq, "Available routing algorithms:\n");
251
252 hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
253 seq_printf(seq, "%s\n", bat_algo_ops->name);
254 }
255
256 return 0;
257}
258
259static int param_set_ra(const char *val, const struct kernel_param *kp)
260{
261 struct bat_algo_ops *bat_algo_ops;
173 262
263 bat_algo_ops = bat_algo_get((char *)val);
264 if (!bat_algo_ops) {
265 pr_err("Routing algorithm '%s' is not supported\n", val);
266 return -EINVAL;
267 }
268
269 return param_set_copystring(val, kp);
174} 270}
175 271
272static const struct kernel_param_ops param_ops_ra = {
273 .set = param_set_ra,
274 .get = param_get_string,
275};
276
277static struct kparam_string __param_string_ra = {
278 .maxlen = sizeof(bat_routing_algo),
279 .string = bat_routing_algo,
280};
281
282module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
176module_init(batman_init); 283module_init(batman_init);
177module_exit(batman_exit); 284module_exit(batman_exit);
178 285
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 86354e06eb48..94fa1c2393a6 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,7 +28,7 @@
28#define DRIVER_DEVICE "batman-adv" 28#define DRIVER_DEVICE "batman-adv"
29 29
30#ifndef SOURCE_VERSION 30#ifndef SOURCE_VERSION
31#define SOURCE_VERSION "2012.0.0" 31#define SOURCE_VERSION "2012.1.0"
32#endif 32#endif
33 33
34/* B.A.T.M.A.N. parameters */ 34/* B.A.T.M.A.N. parameters */
@@ -41,13 +41,14 @@
41 41
42/* purge originators after time in seconds if no valid packet comes in 42/* purge originators after time in seconds if no valid packet comes in
43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ 43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
44#define PURGE_TIMEOUT 200 44#define PURGE_TIMEOUT 200000 /* 200 seconds */
45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */ 45#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
46#define TT_CLIENT_ROAM_TIMEOUT 600 46#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
47/* sliding packet range of received originator messages in sequence numbers 47/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) */ 48 * (should be a multiple of our word size) */
49#define TQ_LOCAL_WINDOW_SIZE 64 49#define TQ_LOCAL_WINDOW_SIZE 64
50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */ 50#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep
51 * pending tt_req */
51 52
52#define TQ_GLOBAL_WINDOW_SIZE 5 53#define TQ_GLOBAL_WINDOW_SIZE 5
53#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 54#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
@@ -56,8 +57,8 @@
56 57
57#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ 58#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
58 59
59#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most 60#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most
60 * ROAMING_MAX_COUNT times */ 61 * ROAMING_MAX_COUNT times in miliseconds*/
61#define ROAMING_MAX_COUNT 5 62#define ROAMING_MAX_COUNT 5
62 63
63#define NO_FLAGS 0 64#define NO_FLAGS 0
@@ -106,9 +107,7 @@ enum uev_type {
106 107
107#define GW_THRESHOLD 50 108#define GW_THRESHOLD 50
108 109
109/* 110/* Debug Messages */
110 * Debug Messages
111 */
112#ifdef pr_fmt 111#ifdef pr_fmt
113#undef pr_fmt 112#undef pr_fmt
114#endif 113#endif
@@ -123,14 +122,7 @@ enum dbg_level {
123 DBG_ALL = 7 122 DBG_ALL = 7
124}; 123};
125 124
126 125/* Kernel headers */
127/*
128 * Vis
129 */
130
131/*
132 * Kernel headers
133 */
134 126
135#include <linux/mutex.h> /* mutex */ 127#include <linux/mutex.h> /* mutex */
136#include <linux/module.h> /* needed by all modules */ 128#include <linux/module.h> /* needed by all modules */
@@ -147,6 +139,7 @@ enum dbg_level {
147#include <linux/seq_file.h> 139#include <linux/seq_file.h>
148#include "types.h" 140#include "types.h"
149 141
142extern char bat_routing_algo[];
150extern struct list_head hardif_list; 143extern struct list_head hardif_list;
151 144
152extern unsigned char broadcast_addr[]; 145extern unsigned char broadcast_addr[];
@@ -157,6 +150,9 @@ void mesh_free(struct net_device *soft_iface);
157void inc_module_count(void); 150void inc_module_count(void);
158void dec_module_count(void); 151void dec_module_count(void);
159int is_my_mac(const uint8_t *addr); 152int is_my_mac(const uint8_t *addr);
153int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
154int bat_algo_select(struct bat_priv *bat_priv, char *name);
155int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
160 156
161#ifdef CONFIG_BATMAN_ADV_DEBUG 157#ifdef CONFIG_BATMAN_ADV_DEBUG
162int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); 158int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
@@ -202,6 +198,17 @@ static inline int compare_eth(const void *data1, const void *data2)
202 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 198 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
203} 199}
204 200
201/**
202 * has_timed_out - compares current time (jiffies) and timestamp + timeout
203 * @timestamp: base value to compare with (in jiffies)
204 * @timeout: added to base value before comparing (in milliseconds)
205 *
206 * Returns true if current time is after timestamp + timeout
207 */
208static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
209{
210 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
211}
205 212
206#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 213#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
207 214
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0bc2045a2f2e..43c0a4f1399e 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -143,7 +143,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
143 143
144 frag_list_free(&orig_node->frag_list); 144 frag_list_free(&orig_node->frag_list);
145 tt_global_del_orig(orig_node->bat_priv, orig_node, 145 tt_global_del_orig(orig_node->bat_priv, orig_node,
146 "originator timed out"); 146 "originator timed out");
147 147
148 kfree(orig_node->tt_buff); 148 kfree(orig_node->tt_buff);
149 kfree(orig_node->bcast_own); 149 kfree(orig_node->bcast_own);
@@ -219,6 +219,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
219 /* extra reference for return */ 219 /* extra reference for return */
220 atomic_set(&orig_node->refcount, 2); 220 atomic_set(&orig_node->refcount, 2);
221 221
222 orig_node->tt_initialised = false;
222 orig_node->tt_poss_change = false; 223 orig_node->tt_poss_change = false;
223 orig_node->bat_priv = bat_priv; 224 orig_node->bat_priv = bat_priv;
224 memcpy(orig_node->orig, addr, ETH_ALEN); 225 memcpy(orig_node->orig, addr, ETH_ALEN);
@@ -281,8 +282,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
281 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 282 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
282 &orig_node->neigh_list, list) { 283 &orig_node->neigh_list, list) {
283 284
284 if ((time_after(jiffies, 285 if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) ||
285 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
286 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 286 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
287 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || 287 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
288 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 288 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
@@ -294,14 +294,12 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
294 (neigh_node->if_incoming->if_status == 294 (neigh_node->if_incoming->if_status ==
295 IF_TO_BE_REMOVED)) 295 IF_TO_BE_REMOVED))
296 bat_dbg(DBG_BATMAN, bat_priv, 296 bat_dbg(DBG_BATMAN, bat_priv,
297 "neighbor purge: originator %pM, " 297 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
298 "neighbor: %pM, iface: %s\n",
299 orig_node->orig, neigh_node->addr, 298 orig_node->orig, neigh_node->addr,
300 neigh_node->if_incoming->net_dev->name); 299 neigh_node->if_incoming->net_dev->name);
301 else 300 else
302 bat_dbg(DBG_BATMAN, bat_priv, 301 bat_dbg(DBG_BATMAN, bat_priv,
303 "neighbor timeout: originator %pM, " 302 "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n",
304 "neighbor: %pM, last_valid: %lu\n",
305 orig_node->orig, neigh_node->addr, 303 orig_node->orig, neigh_node->addr,
306 (neigh_node->last_valid / HZ)); 304 (neigh_node->last_valid / HZ));
307 305
@@ -326,18 +324,15 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
326{ 324{
327 struct neigh_node *best_neigh_node; 325 struct neigh_node *best_neigh_node;
328 326
329 if (time_after(jiffies, 327 if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) {
330 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
331
332 bat_dbg(DBG_BATMAN, bat_priv, 328 bat_dbg(DBG_BATMAN, bat_priv,
333 "Originator timeout: originator %pM, last_valid %lu\n", 329 "Originator timeout: originator %pM, last_valid %lu\n",
334 orig_node->orig, (orig_node->last_valid / HZ)); 330 orig_node->orig, (orig_node->last_valid / HZ));
335 return true; 331 return true;
336 } else { 332 } else {
337 if (purge_orig_neighbors(bat_priv, orig_node, 333 if (purge_orig_neighbors(bat_priv, orig_node,
338 &best_neigh_node)) { 334 &best_neigh_node))
339 update_route(bat_priv, orig_node, best_neigh_node); 335 update_route(bat_priv, orig_node, best_neigh_node);
340 }
341 } 336 }
342 337
343 return false; 338 return false;
@@ -371,8 +366,8 @@ static void _purge_orig(struct bat_priv *bat_priv)
371 continue; 366 continue;
372 } 367 }
373 368
374 if (time_after(jiffies, orig_node->last_frag_packet + 369 if (has_timed_out(orig_node->last_frag_packet,
375 msecs_to_jiffies(FRAG_TIMEOUT))) 370 FRAG_TIMEOUT))
376 frag_list_free(&orig_node->frag_list); 371 frag_list_free(&orig_node->frag_list);
377 } 372 }
378 spin_unlock_bh(list_lock); 373 spin_unlock_bh(list_lock);
@@ -419,15 +414,15 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
419 primary_if = primary_if_get_selected(bat_priv); 414 primary_if = primary_if_get_selected(bat_priv);
420 415
421 if (!primary_if) { 416 if (!primary_if) {
422 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 417 ret = seq_printf(seq,
423 "please specify interfaces to enable it\n", 418 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
424 net_dev->name); 419 net_dev->name);
425 goto out; 420 goto out;
426 } 421 }
427 422
428 if (primary_if->if_status != IF_ACTIVE) { 423 if (primary_if->if_status != IF_ACTIVE) {
429 ret = seq_printf(seq, "BATMAN mesh %s " 424 ret = seq_printf(seq,
430 "disabled - primary interface not active\n", 425 "BATMAN mesh %s disabled - primary interface not active\n",
431 net_dev->name); 426 net_dev->name);
432 goto out; 427 goto out;
433 } 428 }
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 67765ffef731..3fe2eda85652 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 4d9e54c57a36..441f3db1bd91 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -90,10 +90,14 @@ enum tt_client_flags {
90 TT_CLIENT_PENDING = 1 << 10 90 TT_CLIENT_PENDING = 1 << 10
91}; 91};
92 92
93struct batman_ogm_packet { 93struct batman_header {
94 uint8_t packet_type; 94 uint8_t packet_type;
95 uint8_t version; /* batman version field */ 95 uint8_t version; /* batman version field */
96 uint8_t ttl; 96 uint8_t ttl;
97} __packed;
98
99struct batman_ogm_packet {
100 struct batman_header header;
97 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 101 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
98 uint32_t seqno; 102 uint32_t seqno;
99 uint8_t orig[6]; 103 uint8_t orig[6];
@@ -108,9 +112,7 @@ struct batman_ogm_packet {
108#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) 112#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
109 113
110struct icmp_packet { 114struct icmp_packet {
111 uint8_t packet_type; 115 struct batman_header header;
112 uint8_t version; /* batman version field */
113 uint8_t ttl;
114 uint8_t msg_type; /* see ICMP message types above */ 116 uint8_t msg_type; /* see ICMP message types above */
115 uint8_t dst[6]; 117 uint8_t dst[6];
116 uint8_t orig[6]; 118 uint8_t orig[6];
@@ -124,9 +126,7 @@ struct icmp_packet {
124/* icmp_packet_rr must start with all fields from imcp_packet 126/* icmp_packet_rr must start with all fields from imcp_packet
125 * as this is assumed by code that handles ICMP packets */ 127 * as this is assumed by code that handles ICMP packets */
126struct icmp_packet_rr { 128struct icmp_packet_rr {
127 uint8_t packet_type; 129 struct batman_header header;
128 uint8_t version; /* batman version field */
129 uint8_t ttl;
130 uint8_t msg_type; /* see ICMP message types above */ 130 uint8_t msg_type; /* see ICMP message types above */
131 uint8_t dst[6]; 131 uint8_t dst[6];
132 uint8_t orig[6]; 132 uint8_t orig[6];
@@ -137,17 +137,13 @@ struct icmp_packet_rr {
137} __packed; 137} __packed;
138 138
139struct unicast_packet { 139struct unicast_packet {
140 uint8_t packet_type; 140 struct batman_header header;
141 uint8_t version; /* batman version field */
142 uint8_t ttl;
143 uint8_t ttvn; /* destination translation table version number */ 141 uint8_t ttvn; /* destination translation table version number */
144 uint8_t dest[6]; 142 uint8_t dest[6];
145} __packed; 143} __packed;
146 144
147struct unicast_frag_packet { 145struct unicast_frag_packet {
148 uint8_t packet_type; 146 struct batman_header header;
149 uint8_t version; /* batman version field */
150 uint8_t ttl;
151 uint8_t ttvn; /* destination translation table version number */ 147 uint8_t ttvn; /* destination translation table version number */
152 uint8_t dest[6]; 148 uint8_t dest[6];
153 uint8_t flags; 149 uint8_t flags;
@@ -157,18 +153,14 @@ struct unicast_frag_packet {
157} __packed; 153} __packed;
158 154
159struct bcast_packet { 155struct bcast_packet {
160 uint8_t packet_type; 156 struct batman_header header;
161 uint8_t version; /* batman version field */
162 uint8_t ttl;
163 uint8_t reserved; 157 uint8_t reserved;
164 uint32_t seqno; 158 uint32_t seqno;
165 uint8_t orig[6]; 159 uint8_t orig[6];
166} __packed; 160} __packed;
167 161
168struct vis_packet { 162struct vis_packet {
169 uint8_t packet_type; 163 struct batman_header header;
170 uint8_t version; /* batman version field */
171 uint8_t ttl; /* TTL */
172 uint8_t vis_type; /* which type of vis-participant sent this? */ 164 uint8_t vis_type; /* which type of vis-participant sent this? */
173 uint32_t seqno; /* sequence number */ 165 uint32_t seqno; /* sequence number */
174 uint8_t entries; /* number of entries behind this struct */ 166 uint8_t entries; /* number of entries behind this struct */
@@ -179,9 +171,7 @@ struct vis_packet {
179} __packed; 171} __packed;
180 172
181struct tt_query_packet { 173struct tt_query_packet {
182 uint8_t packet_type; 174 struct batman_header header;
183 uint8_t version; /* batman version field */
184 uint8_t ttl;
185 /* the flag field is a combination of: 175 /* the flag field is a combination of:
186 * - TT_REQUEST or TT_RESPONSE 176 * - TT_REQUEST or TT_RESPONSE
187 * - TT_FULL_TABLE */ 177 * - TT_FULL_TABLE */
@@ -202,9 +192,7 @@ struct tt_query_packet {
202} __packed; 192} __packed;
203 193
204struct roam_adv_packet { 194struct roam_adv_packet {
205 uint8_t packet_type; 195 struct batman_header header;
206 uint8_t version;
207 uint8_t ttl;
208 uint8_t reserved; 196 uint8_t reserved;
209 uint8_t dst[ETH_ALEN]; 197 uint8_t dst[ETH_ALEN];
210 uint8_t src[ETH_ALEN]; 198 uint8_t src[ETH_ALEN];
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index f1ccfa76ce8a..fd63951d118d 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 7cdfe62b657c..8b58bd82767d 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 773e606f9702..7f8e15899417 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -29,7 +29,6 @@
29#include "originator.h" 29#include "originator.h"
30#include "vis.h" 30#include "vis.h"
31#include "unicast.h" 31#include "unicast.h"
32#include "bat_ogm.h"
33 32
34void slide_own_bcast_window(struct hard_iface *hard_iface) 33void slide_own_bcast_window(struct hard_iface *hard_iface)
35{ 34{
@@ -73,7 +72,7 @@ static void _update_route(struct bat_priv *bat_priv,
73 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 72 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
74 orig_node->orig); 73 orig_node->orig);
75 tt_global_del_orig(bat_priv, orig_node, 74 tt_global_del_orig(bat_priv, orig_node,
76 "Deleted route towards originator"); 75 "Deleted route towards originator");
77 76
78 /* route added */ 77 /* route added */
79 } else if ((!curr_router) && (neigh_node)) { 78 } else if ((!curr_router) && (neigh_node)) {
@@ -84,8 +83,7 @@ static void _update_route(struct bat_priv *bat_priv,
84 /* route changed */ 83 /* route changed */
85 } else if (neigh_node && curr_router) { 84 } else if (neigh_node && curr_router) {
86 bat_dbg(DBG_ROUTES, bat_priv, 85 bat_dbg(DBG_ROUTES, bat_priv,
87 "Changing route towards: %pM " 86 "Changing route towards: %pM (now via %pM - was via %pM)\n",
88 "(now via %pM - was via %pM)\n",
89 orig_node->orig, neigh_node->addr, 87 orig_node->orig, neigh_node->addr,
90 curr_router->addr); 88 curr_router->addr);
91 } 89 }
@@ -230,24 +228,25 @@ void bonding_save_primary(const struct orig_node *orig_node,
230int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, 228int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
231 unsigned long *last_reset) 229 unsigned long *last_reset)
232{ 230{
233 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) 231 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
234 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { 232 (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
235 if (time_after(jiffies, *last_reset + 233 if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
236 msecs_to_jiffies(RESET_PROTECTION_MS))) {
237 234
238 *last_reset = jiffies; 235 *last_reset = jiffies;
239 bat_dbg(DBG_BATMAN, bat_priv, 236 bat_dbg(DBG_BATMAN, bat_priv,
240 "old packet received, start protection\n"); 237 "old packet received, start protection\n");
241 238
242 return 0; 239 return 0;
243 } else 240 } else {
244 return 1; 241 return 1;
242 }
245 } 243 }
246 return 0; 244 return 0;
247} 245}
248 246
249int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) 247int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
250{ 248{
249 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
251 struct ethhdr *ethhdr; 250 struct ethhdr *ethhdr;
252 251
253 /* drop packet if it has not necessary minimum size */ 252 /* drop packet if it has not necessary minimum size */
@@ -272,9 +271,7 @@ int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
272 if (skb_linearize(skb) < 0) 271 if (skb_linearize(skb) < 0)
273 return NET_RX_DROP; 272 return NET_RX_DROP;
274 273
275 ethhdr = (struct ethhdr *)skb_mac_header(skb); 274 bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
276
277 bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
278 275
279 kfree_skb(skb); 276 kfree_skb(skb);
280 return NET_RX_SUCCESS; 277 return NET_RX_SUCCESS;
@@ -320,7 +317,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
320 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 317 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
321 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 318 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
322 icmp_packet->msg_type = ECHO_REPLY; 319 icmp_packet->msg_type = ECHO_REPLY;
323 icmp_packet->ttl = TTL; 320 icmp_packet->header.ttl = TTL;
324 321
325 send_skb_packet(skb, router->if_incoming, router->addr); 322 send_skb_packet(skb, router->if_incoming, router->addr);
326 ret = NET_RX_SUCCESS; 323 ret = NET_RX_SUCCESS;
@@ -348,9 +345,8 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
348 345
349 /* send TTL exceeded if packet is an echo request (traceroute) */ 346 /* send TTL exceeded if packet is an echo request (traceroute) */
350 if (icmp_packet->msg_type != ECHO_REQUEST) { 347 if (icmp_packet->msg_type != ECHO_REQUEST) {
351 pr_debug("Warning - can't forward icmp packet from %pM to " 348 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
352 "%pM: ttl exceeded\n", icmp_packet->orig, 349 icmp_packet->orig, icmp_packet->dst);
353 icmp_packet->dst);
354 goto out; 350 goto out;
355 } 351 }
356 352
@@ -376,7 +372,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
376 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 372 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
377 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 373 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
378 icmp_packet->msg_type = TTL_EXCEEDED; 374 icmp_packet->msg_type = TTL_EXCEEDED;
379 icmp_packet->ttl = TTL; 375 icmp_packet->header.ttl = TTL;
380 376
381 send_skb_packet(skb, router->if_incoming, router->addr); 377 send_skb_packet(skb, router->if_incoming, router->addr);
382 ret = NET_RX_SUCCESS; 378 ret = NET_RX_SUCCESS;
@@ -432,7 +428,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
432 if ((hdr_size == sizeof(struct icmp_packet_rr)) && 428 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
433 (icmp_packet->rr_cur < BAT_RR_LEN)) { 429 (icmp_packet->rr_cur < BAT_RR_LEN)) {
434 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), 430 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
435 ethhdr->h_dest, ETH_ALEN); 431 ethhdr->h_dest, ETH_ALEN);
436 icmp_packet->rr_cur++; 432 icmp_packet->rr_cur++;
437 } 433 }
438 434
@@ -441,7 +437,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
441 return recv_my_icmp_packet(bat_priv, skb, hdr_size); 437 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
442 438
443 /* TTL exceeded */ 439 /* TTL exceeded */
444 if (icmp_packet->ttl < 2) 440 if (icmp_packet->header.ttl < 2)
445 return recv_icmp_ttl_exceeded(bat_priv, skb); 441 return recv_icmp_ttl_exceeded(bat_priv, skb);
446 442
447 /* get routing information */ 443 /* get routing information */
@@ -460,7 +456,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
460 icmp_packet = (struct icmp_packet_rr *)skb->data; 456 icmp_packet = (struct icmp_packet_rr *)skb->data;
461 457
462 /* decrement ttl */ 458 /* decrement ttl */
463 icmp_packet->ttl--; 459 icmp_packet->header.ttl--;
464 460
465 /* route it */ 461 /* route it */
466 send_skb_packet(skb, router->if_incoming, router->addr); 462 send_skb_packet(skb, router->if_incoming, router->addr);
@@ -677,9 +673,9 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
677 if (!orig_node) 673 if (!orig_node)
678 goto out; 674 goto out;
679 675
680 bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM " 676 bat_dbg(DBG_TT, bat_priv,
681 "(client %pM)\n", roam_adv_packet->src, 677 "Received ROAMING_ADV from %pM (client %pM)\n",
682 roam_adv_packet->client); 678 roam_adv_packet->src, roam_adv_packet->client);
683 679
684 tt_global_add(bat_priv, orig_node, roam_adv_packet->client, 680 tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
685 atomic_read(&orig_node->last_ttvn) + 1, true, false); 681 atomic_read(&orig_node->last_ttvn) + 1, true, false);
@@ -815,10 +811,9 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
815 unicast_packet = (struct unicast_packet *)skb->data; 811 unicast_packet = (struct unicast_packet *)skb->data;
816 812
817 /* TTL exceeded */ 813 /* TTL exceeded */
818 if (unicast_packet->ttl < 2) { 814 if (unicast_packet->header.ttl < 2) {
819 pr_debug("Warning - can't forward unicast packet from %pM to " 815 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
820 "%pM: ttl exceeded\n", ethhdr->h_source, 816 ethhdr->h_source, unicast_packet->dest);
821 unicast_packet->dest);
822 goto out; 817 goto out;
823 } 818 }
824 819
@@ -840,7 +835,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
840 835
841 unicast_packet = (struct unicast_packet *)skb->data; 836 unicast_packet = (struct unicast_packet *)skb->data;
842 837
843 if (unicast_packet->packet_type == BAT_UNICAST && 838 if (unicast_packet->header.packet_type == BAT_UNICAST &&
844 atomic_read(&bat_priv->fragmentation) && 839 atomic_read(&bat_priv->fragmentation) &&
845 skb->len > neigh_node->if_incoming->net_dev->mtu) { 840 skb->len > neigh_node->if_incoming->net_dev->mtu) {
846 ret = frag_send_skb(skb, bat_priv, 841 ret = frag_send_skb(skb, bat_priv,
@@ -848,7 +843,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
848 goto out; 843 goto out;
849 } 844 }
850 845
851 if (unicast_packet->packet_type == BAT_UNICAST_FRAG && 846 if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
852 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { 847 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
853 848
854 ret = frag_reassemble_skb(skb, bat_priv, &new_skb); 849 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
@@ -867,7 +862,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
867 } 862 }
868 863
869 /* decrement ttl */ 864 /* decrement ttl */
870 unicast_packet->ttl--; 865 unicast_packet->header.ttl--;
871 866
872 /* route it */ 867 /* route it */
873 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 868 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
@@ -937,10 +932,10 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
937 orig_node_free_ref(orig_node); 932 orig_node_free_ref(orig_node);
938 } 933 }
939 934
940 bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u " 935 bat_dbg(DBG_ROUTES, bat_priv,
941 "new_ttvn %u)! Rerouting unicast packet (for %pM) to " 936 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
942 "%pM\n", unicast_packet->ttvn, curr_ttvn, 937 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
943 ethhdr->h_dest, unicast_packet->dest); 938 unicast_packet->dest);
944 939
945 unicast_packet->ttvn = curr_ttvn; 940 unicast_packet->ttvn = curr_ttvn;
946 } 941 }
@@ -1041,7 +1036,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1041 if (is_my_mac(bcast_packet->orig)) 1036 if (is_my_mac(bcast_packet->orig))
1042 goto out; 1037 goto out;
1043 1038
1044 if (bcast_packet->ttl < 2) 1039 if (bcast_packet->header.ttl < 2)
1045 goto out; 1040 goto out;
1046 1041
1047 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1042 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 7aaee0fb0fdc..92ac100d83da 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 8a684eb738ad..af7a6741a685 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -28,7 +28,6 @@
28#include "vis.h" 28#include "vis.h"
29#include "gateway_common.h" 29#include "gateway_common.h"
30#include "originator.h" 30#include "originator.h"
31#include "bat_ogm.h"
32 31
33static void send_outstanding_bcast_packet(struct work_struct *work); 32static void send_outstanding_bcast_packet(struct work_struct *work);
34 33
@@ -46,8 +45,8 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
46 goto send_skb_err; 45 goto send_skb_err;
47 46
48 if (!(hard_iface->net_dev->flags & IFF_UP)) { 47 if (!(hard_iface->net_dev->flags & IFF_UP)) {
49 pr_warning("Interface %s is not up - can't send packet via " 48 pr_warning("Interface %s is not up - can't send packet via that interface!\n",
50 "that interface!\n", hard_iface->net_dev->name); 49 hard_iface->net_dev->name);
51 goto send_skb_err; 50 goto send_skb_err;
52 } 51 }
53 52
@@ -57,7 +56,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
57 56
58 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
59 58
60 ethhdr = (struct ethhdr *) skb_mac_header(skb); 59 ethhdr = (struct ethhdr *)skb_mac_header(skb);
61 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
62 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
@@ -168,7 +167,7 @@ void schedule_bat_ogm(struct hard_iface *hard_iface)
168 if (primary_if) 167 if (primary_if)
169 hardif_free_ref(primary_if); 168 hardif_free_ref(primary_if);
170 169
171 bat_ogm_schedule(hard_iface, tt_num_changes); 170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
172} 171}
173 172
174static void forw_packet_free(struct forw_packet *forw_packet) 173static void forw_packet_free(struct forw_packet *forw_packet)
@@ -234,7 +233,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
234 233
235 /* as we have a copy now, it is safe to decrease the TTL */ 234 /* as we have a copy now, it is safe to decrease the TTL */
236 bcast_packet = (struct bcast_packet *)newskb->data; 235 bcast_packet = (struct bcast_packet *)newskb->data;
237 bcast_packet->ttl--; 236 bcast_packet->header.ttl--;
238 237
239 skb_reset_mac_header(newskb); 238 skb_reset_mac_header(newskb);
240 239
@@ -318,7 +317,7 @@ void send_outstanding_bat_ogm_packet(struct work_struct *work)
318 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 317 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
319 goto out; 318 goto out;
320 319
321 bat_ogm_emit(forw_packet); 320 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
322 321
323 /** 322 /**
324 * we have to have at least one packet in the queue 323 * we have to have at least one packet in the queue
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index c8ca3ef7385b..824ef06f9b01 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 987c75a775f9..a5590f4193f1 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -252,8 +252,8 @@ static void softif_neigh_vid_select(struct bat_priv *bat_priv,
252 vid, curr_neigh->addr); 252 vid, curr_neigh->addr);
253 else if ((curr_neigh) && (new_neigh)) 253 else if ((curr_neigh) && (new_neigh))
254 bat_dbg(DBG_ROUTES, bat_priv, 254 bat_dbg(DBG_ROUTES, bat_priv,
255 "Changing mesh exit point on vid: %d from %pM " 255 "Changing mesh exit point on vid: %d from %pM to %pM.\n",
256 "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr); 256 vid, curr_neigh->addr, new_neigh->addr);
257 else if ((!curr_neigh) && (new_neigh)) 257 else if ((!curr_neigh) && (new_neigh))
258 bat_dbg(DBG_ROUTES, bat_priv, 258 bat_dbg(DBG_ROUTES, bat_priv,
259 "Setting mesh exit point on vid: %d to %pM.\n", 259 "Setting mesh exit point on vid: %d to %pM.\n",
@@ -327,15 +327,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
327 327
328 primary_if = primary_if_get_selected(bat_priv); 328 primary_if = primary_if_get_selected(bat_priv);
329 if (!primary_if) { 329 if (!primary_if) {
330 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 330 ret = seq_printf(seq,
331 "please specify interfaces to enable it\n", 331 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
332 net_dev->name); 332 net_dev->name);
333 goto out; 333 goto out;
334 } 334 }
335 335
336 if (primary_if->if_status != IF_ACTIVE) { 336 if (primary_if->if_status != IF_ACTIVE) {
337 ret = seq_printf(seq, "BATMAN mesh %s " 337 ret = seq_printf(seq,
338 "disabled - primary interface not active\n", 338 "BATMAN mesh %s disabled - primary interface not active\n",
339 net_dev->name); 339 net_dev->name);
340 goto out; 340 goto out;
341 } 341 }
@@ -396,15 +396,14 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
396 hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2, 396 hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
397 &softif_neigh_vid->softif_neigh_list, 397 &softif_neigh_vid->softif_neigh_list,
398 list) { 398 list) {
399 if ((!time_after(jiffies, softif_neigh->last_seen + 399 if ((!has_timed_out(softif_neigh->last_seen,
400 msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) && 400 SOFTIF_NEIGH_TIMEOUT)) &&
401 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) 401 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
402 continue; 402 continue;
403 403
404 if (curr_softif_neigh == softif_neigh) { 404 if (curr_softif_neigh == softif_neigh) {
405 bat_dbg(DBG_ROUTES, bat_priv, 405 bat_dbg(DBG_ROUTES, bat_priv,
406 "Current mesh exit point on vid: %d " 406 "Current mesh exit point on vid: %d '%pM' vanished.\n",
407 "'%pM' vanished.\n",
408 softif_neigh_vid->vid, 407 softif_neigh_vid->vid,
409 softif_neigh->addr); 408 softif_neigh->addr);
410 do_deselect = 1; 409 do_deselect = 1;
@@ -457,10 +456,10 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
457 batman_ogm_packet = (struct batman_ogm_packet *) 456 batman_ogm_packet = (struct batman_ogm_packet *)
458 (skb->data + ETH_HLEN); 457 (skb->data + ETH_HLEN);
459 458
460 if (batman_ogm_packet->version != COMPAT_VERSION) 459 if (batman_ogm_packet->header.version != COMPAT_VERSION)
461 goto out; 460 goto out;
462 461
463 if (batman_ogm_packet->packet_type != BAT_OGM) 462 if (batman_ogm_packet->header.packet_type != BAT_OGM)
464 goto out; 463 goto out;
465 464
466 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) 465 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
@@ -541,6 +540,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
541 } 540 }
542 541
543 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 542 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
543 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
544 return 0; 544 return 0;
545} 545}
546 546
@@ -632,11 +632,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
632 goto dropped; 632 goto dropped;
633 633
634 bcast_packet = (struct bcast_packet *)skb->data; 634 bcast_packet = (struct bcast_packet *)skb->data;
635 bcast_packet->version = COMPAT_VERSION; 635 bcast_packet->header.version = COMPAT_VERSION;
636 bcast_packet->ttl = TTL; 636 bcast_packet->header.ttl = TTL;
637 637
638 /* batman packet type: broadcast */ 638 /* batman packet type: broadcast */
639 bcast_packet->packet_type = BAT_BCAST; 639 bcast_packet->header.packet_type = BAT_BCAST;
640 640
641 /* hw address of first interface is the orig mac because only 641 /* hw address of first interface is the orig mac because only
642 * this mac is known throughout the mesh */ 642 * this mac is known throughout the mesh */
@@ -725,8 +725,8 @@ void interface_rx(struct net_device *soft_iface,
725 skb_push(skb, hdr_size); 725 skb_push(skb, hdr_size);
726 unicast_packet = (struct unicast_packet *)skb->data; 726 unicast_packet = (struct unicast_packet *)skb->data;
727 727
728 if ((unicast_packet->packet_type != BAT_UNICAST) && 728 if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
729 (unicast_packet->packet_type != BAT_UNICAST_FRAG)) 729 (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
730 goto dropped; 730 goto dropped;
731 731
732 skb_reset_mac_header(skb); 732 skb_reset_mac_header(skb);
@@ -783,7 +783,6 @@ static const struct net_device_ops bat_netdev_ops = {
783static void interface_setup(struct net_device *dev) 783static void interface_setup(struct net_device *dev)
784{ 784{
785 struct bat_priv *priv = netdev_priv(dev); 785 struct bat_priv *priv = netdev_priv(dev);
786 char dev_addr[ETH_ALEN];
787 786
788 ether_setup(dev); 787 ether_setup(dev);
789 788
@@ -800,8 +799,7 @@ static void interface_setup(struct net_device *dev)
800 dev->hard_header_len = BAT_HEADER_LEN; 799 dev->hard_header_len = BAT_HEADER_LEN;
801 800
802 /* generate random address */ 801 /* generate random address */
803 random_ether_addr(dev_addr); 802 eth_hw_addr_random(dev);
804 memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
805 803
806 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); 804 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
807 805
@@ -855,6 +853,10 @@ struct net_device *softif_create(const char *name)
855 bat_priv->primary_if = NULL; 853 bat_priv->primary_if = NULL;
856 bat_priv->num_ifaces = 0; 854 bat_priv->num_ifaces = 0;
857 855
856 ret = bat_algo_select(bat_priv, bat_routing_algo);
857 if (ret < 0)
858 goto unreg_soft_iface;
859
858 ret = sysfs_add_meshif(soft_iface); 860 ret = sysfs_add_meshif(soft_iface);
859 if (ret < 0) 861 if (ret < 0)
860 goto unreg_soft_iface; 862 goto unreg_soft_iface;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 001546fc96f1..756eab5b8dd4 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner 4 * Marek Lindner
5 * 5 *
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index ab8dea8b0b2e..1f8692127840 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -108,14 +108,6 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
108 108
109} 109}
110 110
111static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
112{
113 unsigned long deadline;
114 deadline = starting_time + msecs_to_jiffies(timeout);
115
116 return time_after(jiffies, deadline);
117}
118
119static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) 111static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
120{ 112{
121 if (atomic_dec_and_test(&tt_local_entry->common.refcount)) 113 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
@@ -218,6 +210,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
218 if (compare_eth(addr, soft_iface->dev_addr)) 210 if (compare_eth(addr, soft_iface->dev_addr))
219 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; 211 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
220 212
213 /* The local entry has to be marked as NEW to avoid to send it in
214 * a full table response going out before the next ttvn increment
215 * (consistency check) */
216 tt_local_entry->common.flags |= TT_CLIENT_NEW;
217
221 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, 218 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
222 &tt_local_entry->common, 219 &tt_local_entry->common,
223 &tt_local_entry->common.hash_entry); 220 &tt_local_entry->common.hash_entry);
@@ -230,11 +227,6 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
230 227
231 tt_local_event(bat_priv, addr, tt_local_entry->common.flags); 228 tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
232 229
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry->common.flags |= TT_CLIENT_NEW;
237
238 /* remove address from global hash if present */ 230 /* remove address from global hash if present */
239 tt_global_entry = tt_global_hash_find(bat_priv, addr); 231 tt_global_entry = tt_global_hash_find(bat_priv, addr);
240 232
@@ -269,7 +261,7 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
269 atomic_set(&bat_priv->tt_local_changes, 0); 261 atomic_set(&bat_priv->tt_local_changes, 0);
270 262
271 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 263 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
272 list) { 264 list) {
273 if (count < tot_changes) { 265 if (count < tot_changes) {
274 memcpy(buff + tt_len(count), 266 memcpy(buff + tt_len(count),
275 &entry->change, sizeof(struct tt_change)); 267 &entry->change, sizeof(struct tt_change));
@@ -317,21 +309,21 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
317 309
318 primary_if = primary_if_get_selected(bat_priv); 310 primary_if = primary_if_get_selected(bat_priv);
319 if (!primary_if) { 311 if (!primary_if) {
320 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 312 ret = seq_printf(seq,
321 "please specify interfaces to enable it\n", 313 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
322 net_dev->name); 314 net_dev->name);
323 goto out; 315 goto out;
324 } 316 }
325 317
326 if (primary_if->if_status != IF_ACTIVE) { 318 if (primary_if->if_status != IF_ACTIVE) {
327 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 319 ret = seq_printf(seq,
328 "primary interface not active\n", 320 "BATMAN mesh %s disabled - primary interface not active\n",
329 net_dev->name); 321 net_dev->name);
330 goto out; 322 goto out;
331 } 323 }
332 324
333 seq_printf(seq, "Locally retrieved addresses (from %s) " 325 seq_printf(seq,
334 "announced via TT (TTVN: %u):\n", 326 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
335 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); 327 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
336 328
337 for (i = 0; i < hash->size; i++) { 329 for (i = 0; i < hash->size; i++) {
@@ -341,17 +333,17 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
341 hlist_for_each_entry_rcu(tt_common_entry, node, 333 hlist_for_each_entry_rcu(tt_common_entry, node,
342 head, hash_entry) { 334 head, hash_entry) {
343 seq_printf(seq, " * %pM [%c%c%c%c%c]\n", 335 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
344 tt_common_entry->addr, 336 tt_common_entry->addr,
345 (tt_common_entry->flags & 337 (tt_common_entry->flags &
346 TT_CLIENT_ROAM ? 'R' : '.'), 338 TT_CLIENT_ROAM ? 'R' : '.'),
347 (tt_common_entry->flags & 339 (tt_common_entry->flags &
348 TT_CLIENT_NOPURGE ? 'P' : '.'), 340 TT_CLIENT_NOPURGE ? 'P' : '.'),
349 (tt_common_entry->flags & 341 (tt_common_entry->flags &
350 TT_CLIENT_NEW ? 'N' : '.'), 342 TT_CLIENT_NEW ? 'N' : '.'),
351 (tt_common_entry->flags & 343 (tt_common_entry->flags &
352 TT_CLIENT_PENDING ? 'X' : '.'), 344 TT_CLIENT_PENDING ? 'X' : '.'),
353 (tt_common_entry->flags & 345 (tt_common_entry->flags &
354 TT_CLIENT_WIFI ? 'W' : '.')); 346 TT_CLIENT_WIFI ? 'W' : '.'));
355 } 347 }
356 rcu_read_unlock(); 348 rcu_read_unlock();
357 } 349 }
@@ -363,7 +355,7 @@ out:
363 355
364static void tt_local_set_pending(struct bat_priv *bat_priv, 356static void tt_local_set_pending(struct bat_priv *bat_priv,
365 struct tt_local_entry *tt_local_entry, 357 struct tt_local_entry *tt_local_entry,
366 uint16_t flags) 358 uint16_t flags, const char *message)
367{ 359{
368 tt_local_event(bat_priv, tt_local_entry->common.addr, 360 tt_local_event(bat_priv, tt_local_entry->common.addr,
369 tt_local_entry->common.flags | flags); 361 tt_local_entry->common.flags | flags);
@@ -372,6 +364,10 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
372 * to be kept in the table in order to send it in a full table 364 * to be kept in the table in order to send it in a full table
373 * response issued before the net ttvn increment (consistency check) */ 365 * response issued before the net ttvn increment (consistency check) */
374 tt_local_entry->common.flags |= TT_CLIENT_PENDING; 366 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
367
368 bat_dbg(DBG_TT, bat_priv,
369 "Local tt entry (%pM) pending to be removed: %s\n",
370 tt_local_entry->common.addr, message);
375} 371}
376 372
377void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, 373void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -384,10 +380,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
384 goto out; 380 goto out;
385 381
386 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL | 382 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
387 (roaming ? TT_CLIENT_ROAM : NO_FLAGS)); 383 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
388
389 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
390 "%s\n", tt_local_entry->common.addr, message);
391out: 384out:
392 if (tt_local_entry) 385 if (tt_local_entry)
393 tt_local_entry_free_ref(tt_local_entry); 386 tt_local_entry_free_ref(tt_local_entry);
@@ -420,15 +413,12 @@ static void tt_local_purge(struct bat_priv *bat_priv)
420 if (tt_local_entry->common.flags & TT_CLIENT_PENDING) 413 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
421 continue; 414 continue;
422 415
423 if (!is_out_of_time(tt_local_entry->last_seen, 416 if (!has_timed_out(tt_local_entry->last_seen,
424 TT_LOCAL_TIMEOUT * 1000)) 417 TT_LOCAL_TIMEOUT))
425 continue; 418 continue;
426 419
427 tt_local_set_pending(bat_priv, tt_local_entry, 420 tt_local_set_pending(bat_priv, tt_local_entry,
428 TT_CLIENT_DEL); 421 TT_CLIENT_DEL, "timed out");
429 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
430 "pending to be removed: timed out\n",
431 tt_local_entry->common.addr);
432 } 422 }
433 spin_unlock_bh(list_lock); 423 spin_unlock_bh(list_lock);
434 } 424 }
@@ -585,15 +575,15 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
585 575
586 primary_if = primary_if_get_selected(bat_priv); 576 primary_if = primary_if_get_selected(bat_priv);
587 if (!primary_if) { 577 if (!primary_if) {
588 ret = seq_printf(seq, "BATMAN mesh %s disabled - please " 578 ret = seq_printf(seq,
589 "specify interfaces to enable it\n", 579 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
590 net_dev->name); 580 net_dev->name);
591 goto out; 581 goto out;
592 } 582 }
593 583
594 if (primary_if->if_status != IF_ACTIVE) { 584 if (primary_if->if_status != IF_ACTIVE) {
595 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 585 ret = seq_printf(seq,
596 "primary interface not active\n", 586 "BATMAN mesh %s disabled - primary interface not active\n",
597 net_dev->name); 587 net_dev->name);
598 goto out; 588 goto out;
599 } 589 }
@@ -613,20 +603,18 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
613 tt_global_entry = container_of(tt_common_entry, 603 tt_global_entry = container_of(tt_common_entry,
614 struct tt_global_entry, 604 struct tt_global_entry,
615 common); 605 common);
616 seq_printf(seq, " * %pM (%3u) via %pM (%3u) " 606 seq_printf(seq,
617 "[%c%c%c]\n", 607 " * %pM (%3u) via %pM (%3u) [%c%c]\n",
618 tt_global_entry->common.addr, 608 tt_global_entry->common.addr,
619 tt_global_entry->ttvn, 609 tt_global_entry->ttvn,
620 tt_global_entry->orig_node->orig, 610 tt_global_entry->orig_node->orig,
621 (uint8_t) atomic_read( 611 (uint8_t) atomic_read(
622 &tt_global_entry->orig_node-> 612 &tt_global_entry->orig_node->
623 last_ttvn), 613 last_ttvn),
624 (tt_global_entry->common.flags & 614 (tt_global_entry->common.flags &
625 TT_CLIENT_ROAM ? 'R' : '.'), 615 TT_CLIENT_ROAM ? 'R' : '.'),
626 (tt_global_entry->common.flags & 616 (tt_global_entry->common.flags &
627 TT_CLIENT_PENDING ? 'X' : '.'), 617 TT_CLIENT_WIFI ? 'W' : '.'));
628 (tt_global_entry->common.flags &
629 TT_CLIENT_WIFI ? 'W' : '.'));
630 } 618 }
631 rcu_read_unlock(); 619 rcu_read_unlock();
632 } 620 }
@@ -665,29 +653,31 @@ void tt_global_del(struct bat_priv *bat_priv,
665 struct tt_local_entry *tt_local_entry = NULL; 653 struct tt_local_entry *tt_local_entry = NULL;
666 654
667 tt_global_entry = tt_global_hash_find(bat_priv, addr); 655 tt_global_entry = tt_global_hash_find(bat_priv, addr);
668 if (!tt_global_entry) 656 if (!tt_global_entry || tt_global_entry->orig_node != orig_node)
669 goto out; 657 goto out;
670 658
671 if (tt_global_entry->orig_node == orig_node) { 659 if (!roaming)
672 if (roaming) { 660 goto out_del;
673 /* if we are deleting a global entry due to a roam 661
674 * event, there are two possibilities: 662 /* if we are deleting a global entry due to a roam
675 * 1) the client roamed from node A to node B => we mark 663 * event, there are two possibilities:
676 * it with TT_CLIENT_ROAM, we start a timer and we 664 * 1) the client roamed from node A to node B => we mark
677 * wait for node B to claim it. In case of timeout 665 * it with TT_CLIENT_ROAM, we start a timer and we
678 * the entry is purged. 666 * wait for node B to claim it. In case of timeout
679 * 2) the client roamed to us => we can directly delete 667 * the entry is purged.
680 * the global entry, since it is useless now. */ 668 * 2) the client roamed to us => we can directly delete
681 tt_local_entry = tt_local_hash_find(bat_priv, 669 * the global entry, since it is useless now. */
682 tt_global_entry->common.addr); 670 tt_local_entry = tt_local_hash_find(bat_priv,
683 if (!tt_local_entry) { 671 tt_global_entry->common.addr);
684 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 672 if (!tt_local_entry) {
685 tt_global_entry->roam_at = jiffies; 673 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
686 goto out; 674 tt_global_entry->roam_at = jiffies;
687 } 675 goto out;
688 }
689 _tt_global_del(bat_priv, tt_global_entry, message);
690 } 676 }
677
678out_del:
679 _tt_global_del(bat_priv, tt_global_entry, message);
680
691out: 681out:
692 if (tt_global_entry) 682 if (tt_global_entry)
693 tt_global_entry_free_ref(tt_global_entry); 683 tt_global_entry_free_ref(tt_global_entry);
@@ -715,14 +705,13 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
715 705
716 spin_lock_bh(list_lock); 706 spin_lock_bh(list_lock);
717 hlist_for_each_entry_safe(tt_common_entry, node, safe, 707 hlist_for_each_entry_safe(tt_common_entry, node, safe,
718 head, hash_entry) { 708 head, hash_entry) {
719 tt_global_entry = container_of(tt_common_entry, 709 tt_global_entry = container_of(tt_common_entry,
720 struct tt_global_entry, 710 struct tt_global_entry,
721 common); 711 common);
722 if (tt_global_entry->orig_node == orig_node) { 712 if (tt_global_entry->orig_node == orig_node) {
723 bat_dbg(DBG_TT, bat_priv, 713 bat_dbg(DBG_TT, bat_priv,
724 "Deleting global tt entry %pM " 714 "Deleting global tt entry %pM (via %pM): %s\n",
725 "(via %pM): %s\n",
726 tt_global_entry->common.addr, 715 tt_global_entry->common.addr,
727 tt_global_entry->orig_node->orig, 716 tt_global_entry->orig_node->orig,
728 message); 717 message);
@@ -733,6 +722,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
733 spin_unlock_bh(list_lock); 722 spin_unlock_bh(list_lock);
734 } 723 }
735 atomic_set(&orig_node->tt_size, 0); 724 atomic_set(&orig_node->tt_size, 0);
725 orig_node->tt_initialised = false;
736} 726}
737 727
738static void tt_global_roam_purge(struct bat_priv *bat_priv) 728static void tt_global_roam_purge(struct bat_priv *bat_priv)
@@ -757,12 +747,12 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
757 common); 747 common);
758 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM)) 748 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
759 continue; 749 continue;
760 if (!is_out_of_time(tt_global_entry->roam_at, 750 if (!has_timed_out(tt_global_entry->roam_at,
761 TT_CLIENT_ROAM_TIMEOUT * 1000)) 751 TT_CLIENT_ROAM_TIMEOUT))
762 continue; 752 continue;
763 753
764 bat_dbg(DBG_TT, bat_priv, "Deleting global " 754 bat_dbg(DBG_TT, bat_priv,
765 "tt entry (%pM): Roaming timeout\n", 755 "Deleting global tt entry (%pM): Roaming timeout\n",
766 tt_global_entry->common.addr); 756 tt_global_entry->common.addr);
767 atomic_dec(&tt_global_entry->orig_node->tt_size); 757 atomic_dec(&tt_global_entry->orig_node->tt_size);
768 hlist_del_rcu(node); 758 hlist_del_rcu(node);
@@ -846,11 +836,6 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
846 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 836 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
847 goto out; 837 goto out;
848 838
849 /* A global client marked as PENDING has already moved from that
850 * originator */
851 if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
852 goto out;
853
854 orig_node = tt_global_entry->orig_node; 839 orig_node = tt_global_entry->orig_node;
855 840
856out: 841out:
@@ -977,8 +962,7 @@ static void tt_req_purge(struct bat_priv *bat_priv)
977 962
978 spin_lock_bh(&bat_priv->tt_req_list_lock); 963 spin_lock_bh(&bat_priv->tt_req_list_lock);
979 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 964 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
980 if (is_out_of_time(node->issued_at, 965 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
981 TT_REQUEST_TIMEOUT * 1000)) {
982 list_del(&node->list); 966 list_del(&node->list);
983 kfree(node); 967 kfree(node);
984 } 968 }
@@ -996,8 +980,8 @@ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
996 spin_lock_bh(&bat_priv->tt_req_list_lock); 980 spin_lock_bh(&bat_priv->tt_req_list_lock);
997 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 981 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
998 if (compare_eth(tt_req_node_tmp, orig_node) && 982 if (compare_eth(tt_req_node_tmp, orig_node) &&
999 !is_out_of_time(tt_req_node_tmp->issued_at, 983 !has_timed_out(tt_req_node_tmp->issued_at,
1000 TT_REQUEST_TIMEOUT * 1000)) 984 TT_REQUEST_TIMEOUT))
1001 goto unlock; 985 goto unlock;
1002 } 986 }
1003 987
@@ -1134,11 +1118,11 @@ static int send_tt_request(struct bat_priv *bat_priv,
1134 tt_request = (struct tt_query_packet *)skb_put(skb, 1118 tt_request = (struct tt_query_packet *)skb_put(skb,
1135 sizeof(struct tt_query_packet)); 1119 sizeof(struct tt_query_packet));
1136 1120
1137 tt_request->packet_type = BAT_TT_QUERY; 1121 tt_request->header.packet_type = BAT_TT_QUERY;
1138 tt_request->version = COMPAT_VERSION; 1122 tt_request->header.version = COMPAT_VERSION;
1139 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1123 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1140 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1124 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1141 tt_request->ttl = TTL; 1125 tt_request->header.ttl = TTL;
1142 tt_request->ttvn = ttvn; 1126 tt_request->ttvn = ttvn;
1143 tt_request->tt_data = tt_crc; 1127 tt_request->tt_data = tt_crc;
1144 tt_request->flags = TT_REQUEST; 1128 tt_request->flags = TT_REQUEST;
@@ -1150,8 +1134,9 @@ static int send_tt_request(struct bat_priv *bat_priv,
1150 if (!neigh_node) 1134 if (!neigh_node)
1151 goto out; 1135 goto out;
1152 1136
1153 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM " 1137 bat_dbg(DBG_TT, bat_priv,
1154 "[%c]\n", dst_orig_node->orig, neigh_node->addr, 1138 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1139 dst_orig_node->orig, neigh_node->addr,
1155 (full_table ? 'F' : '.')); 1140 (full_table ? 'F' : '.'));
1156 1141
1157 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1142 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
@@ -1188,9 +1173,8 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1188 struct tt_query_packet *tt_response; 1173 struct tt_query_packet *tt_response;
1189 1174
1190 bat_dbg(DBG_TT, bat_priv, 1175 bat_dbg(DBG_TT, bat_priv,
1191 "Received TT_REQUEST from %pM for " 1176 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1192 "ttvn: %u (%pM) [%c]\n", tt_request->src, 1177 tt_request->src, tt_request->ttvn, tt_request->dst,
1193 tt_request->ttvn, tt_request->dst,
1194 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1178 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1195 1179
1196 /* Let's get the orig node of the REAL destination */ 1180 /* Let's get the orig node of the REAL destination */
@@ -1264,9 +1248,9 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
1264 tt_response = (struct tt_query_packet *)skb->data; 1248 tt_response = (struct tt_query_packet *)skb->data;
1265 } 1249 }
1266 1250
1267 tt_response->packet_type = BAT_TT_QUERY; 1251 tt_response->header.packet_type = BAT_TT_QUERY;
1268 tt_response->version = COMPAT_VERSION; 1252 tt_response->header.version = COMPAT_VERSION;
1269 tt_response->ttl = TTL; 1253 tt_response->header.ttl = TTL;
1270 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); 1254 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1271 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1255 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1272 tt_response->flags = TT_RESPONSE; 1256 tt_response->flags = TT_RESPONSE;
@@ -1315,9 +1299,8 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
1315 struct tt_query_packet *tt_response; 1299 struct tt_query_packet *tt_response;
1316 1300
1317 bat_dbg(DBG_TT, bat_priv, 1301 bat_dbg(DBG_TT, bat_priv,
1318 "Received TT_REQUEST from %pM for " 1302 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1319 "ttvn: %u (me) [%c]\n", tt_request->src, 1303 tt_request->src, tt_request->ttvn,
1320 tt_request->ttvn,
1321 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); 1304 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1322 1305
1323 1306
@@ -1381,9 +1364,9 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
1381 tt_response = (struct tt_query_packet *)skb->data; 1364 tt_response = (struct tt_query_packet *)skb->data;
1382 } 1365 }
1383 1366
1384 tt_response->packet_type = BAT_TT_QUERY; 1367 tt_response->header.packet_type = BAT_TT_QUERY;
1385 tt_response->version = COMPAT_VERSION; 1368 tt_response->header.version = COMPAT_VERSION;
1386 tt_response->ttl = TTL; 1369 tt_response->header.ttl = TTL;
1387 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1370 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1388 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1371 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1389 tt_response->flags = TT_RESPONSE; 1372 tt_response->flags = TT_RESPONSE;
@@ -1450,6 +1433,7 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
1450 */ 1433 */
1451 return; 1434 return;
1452 } 1435 }
1436 orig_node->tt_initialised = true;
1453} 1437}
1454 1438
1455static void tt_fill_gtable(struct bat_priv *bat_priv, 1439static void tt_fill_gtable(struct bat_priv *bat_priv,
@@ -1519,10 +1503,9 @@ void handle_tt_response(struct bat_priv *bat_priv,
1519 struct tt_req_node *node, *safe; 1503 struct tt_req_node *node, *safe;
1520 struct orig_node *orig_node = NULL; 1504 struct orig_node *orig_node = NULL;
1521 1505
1522 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for " 1506 bat_dbg(DBG_TT, bat_priv,
1523 "ttvn %d t_size: %d [%c]\n", 1507 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1524 tt_response->src, tt_response->ttvn, 1508 tt_response->src, tt_response->ttvn, tt_response->tt_data,
1525 tt_response->tt_data,
1526 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1509 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1527 1510
1528 orig_node = orig_hash_find(bat_priv, tt_response->src); 1511 orig_node = orig_hash_find(bat_priv, tt_response->src);
@@ -1589,8 +1572,7 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
1589 1572
1590 spin_lock_bh(&bat_priv->tt_roam_list_lock); 1573 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1591 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 1574 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1592 if (!is_out_of_time(node->first_time, 1575 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1593 ROAMING_MAX_TIME * 1000))
1594 continue; 1576 continue;
1595 1577
1596 list_del(&node->list); 1578 list_del(&node->list);
@@ -1617,8 +1599,7 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
1617 if (!compare_eth(tt_roam_node->addr, client)) 1599 if (!compare_eth(tt_roam_node->addr, client))
1618 continue; 1600 continue;
1619 1601
1620 if (is_out_of_time(tt_roam_node->first_time, 1602 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1621 ROAMING_MAX_TIME * 1000))
1622 continue; 1603 continue;
1623 1604
1624 if (!atomic_dec_not_zero(&tt_roam_node->counter)) 1605 if (!atomic_dec_not_zero(&tt_roam_node->counter))
@@ -1669,9 +1650,9 @@ void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1669 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, 1650 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1670 sizeof(struct roam_adv_packet)); 1651 sizeof(struct roam_adv_packet));
1671 1652
1672 roam_adv_packet->packet_type = BAT_ROAM_ADV; 1653 roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1673 roam_adv_packet->version = COMPAT_VERSION; 1654 roam_adv_packet->header.version = COMPAT_VERSION;
1674 roam_adv_packet->ttl = TTL; 1655 roam_adv_packet->header.ttl = TTL;
1675 primary_if = primary_if_get_selected(bat_priv); 1656 primary_if = primary_if_get_selected(bat_priv);
1676 if (!primary_if) 1657 if (!primary_if)
1677 goto out; 1658 goto out;
@@ -1788,8 +1769,9 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1788 if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) 1769 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
1789 continue; 1770 continue;
1790 1771
1791 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry " 1772 bat_dbg(DBG_TT, bat_priv,
1792 "(%pM): pending\n", tt_common_entry->addr); 1773 "Deleting local tt entry (%pM): pending\n",
1774 tt_common_entry->addr);
1793 1775
1794 atomic_dec(&bat_priv->num_local_tt); 1776 atomic_dec(&bat_priv->num_local_tt);
1795 hlist_del_rcu(node); 1777 hlist_del_rcu(node);
@@ -1854,8 +1836,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1854 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 1836 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1855 bool full_table = true; 1837 bool full_table = true;
1856 1838
1857 /* the ttvn increased by one -> we can apply the attached changes */ 1839 /* orig table not initialised AND first diff is in the OGM OR the ttvn
1858 if (ttvn - orig_ttvn == 1) { 1840 * increased by one -> we can apply the attached changes */
1841 if ((!orig_node->tt_initialised && ttvn == 1) ||
1842 ttvn - orig_ttvn == 1) {
1859 /* the OGM could not contain the changes due to their size or 1843 /* the OGM could not contain the changes due to their size or
1860 * because they have already been sent TT_OGM_APPEND_MAX times. 1844 * because they have already been sent TT_OGM_APPEND_MAX times.
1861 * In this case send a tt request */ 1845 * In this case send a tt request */
@@ -1889,14 +1873,13 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1889 } else { 1873 } else {
1890 /* if we missed more than one change or our tables are not 1874 /* if we missed more than one change or our tables are not
1891 * in sync anymore -> request fresh tt data */ 1875 * in sync anymore -> request fresh tt data */
1892 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { 1876 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
1877 orig_node->tt_crc != tt_crc) {
1893request_table: 1878request_table:
1894 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. " 1879 bat_dbg(DBG_TT, bat_priv,
1895 "Need to retrieve the correct information " 1880 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
1896 "(ttvn: %u last_ttvn: %u crc: %u last_crc: " 1881 orig_node->orig, ttvn, orig_ttvn, tt_crc,
1897 "%u num_changes: %u)\n", orig_node->orig, ttvn, 1882 orig_node->tt_crc, tt_num_changes);
1898 orig_ttvn, tt_crc, orig_node->tt_crc,
1899 tt_num_changes);
1900 send_tt_request(bat_priv, orig_node, ttvn, tt_crc, 1883 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
1901 full_table); 1884 full_table);
1902 return; 1885 return;
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 30efd49881a3..c753633b1da1 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index e9eb043719ac..302efb523475 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -81,6 +81,7 @@ struct orig_node {
81 int16_t tt_buff_len; 81 int16_t tt_buff_len;
82 spinlock_t tt_buff_lock; /* protects tt_buff */ 82 spinlock_t tt_buff_lock; /* protects tt_buff */
83 atomic_t tt_size; 83 atomic_t tt_size;
84 bool tt_initialised;
84 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 85 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
85 * If true, then I sent a Roaming_adv to this orig_node and I have to 86 * If true, then I sent a Roaming_adv to this orig_node and I have to
86 * inspect every packet directed to it to check whether it is still 87 * inspect every packet directed to it to check whether it is still
@@ -205,6 +206,7 @@ struct bat_priv {
205 atomic_t gw_reselect; 206 atomic_t gw_reselect;
206 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 207 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
207 struct vis_info *my_vis_info; 208 struct vis_info *my_vis_info;
209 struct bat_algo_ops *bat_algo_ops;
208}; 210};
209 211
210struct socket_client { 212struct socket_client {
@@ -343,4 +345,23 @@ struct softif_neigh {
343 struct rcu_head rcu; 345 struct rcu_head rcu;
344}; 346};
345 347
348struct bat_algo_ops {
349 struct hlist_node list;
350 char *name;
351 /* init OGM when hard-interface is enabled */
352 void (*bat_ogm_init)(struct hard_iface *hard_iface);
353 /* init primary OGM when primary interface is selected */
354 void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
355 /* init mac addresses of the OGM belonging to this hard-interface */
356 void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
357 /* prepare a new outgoing OGM for the send queue */
358 void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
359 int tt_num_changes);
360 /* send scheduled OGM */
361 void (*bat_ogm_emit)(struct forw_packet *forw_packet);
362 /* receive incoming OGM */
363 void (*bat_ogm_receive)(struct hard_iface *if_incoming,
364 struct sk_buff *skb);
365};
366
346#endif /* _NET_BATMAN_ADV_TYPES_H_ */ 367#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 07d1c1da89dd..676f6a626b2c 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Andreas Langer 4 * Andreas Langer
5 * 5 *
@@ -66,8 +66,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
66 kfree_skb(tmp_skb); 66 kfree_skb(tmp_skb);
67 67
68 memmove(skb->data + uni_diff, skb->data, hdr_len); 68 memmove(skb->data + uni_diff, skb->data, hdr_len);
69 unicast_packet = (struct unicast_packet *) skb_pull(skb, uni_diff); 69 unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff);
70 unicast_packet->packet_type = BAT_UNICAST; 70 unicast_packet->header.packet_type = BAT_UNICAST;
71 71
72 return skb; 72 return skb;
73 73
@@ -238,7 +238,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
238 goto dropped; 238 goto dropped;
239 skb_reserve(frag_skb, ucf_hdr_len); 239 skb_reserve(frag_skb, ucf_hdr_len);
240 240
241 unicast_packet = (struct unicast_packet *) skb->data; 241 unicast_packet = (struct unicast_packet *)skb->data;
242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len); 242 memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); 243 skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
244 244
@@ -251,9 +251,9 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
251 251
252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc)); 252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
253 253
254 frag1->ttl--; 254 frag1->header.ttl--;
255 frag1->version = COMPAT_VERSION; 255 frag1->header.version = COMPAT_VERSION;
256 frag1->packet_type = BAT_UNICAST_FRAG; 256 frag1->header.packet_type = BAT_UNICAST_FRAG;
257 257
258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
259 memcpy(frag2, frag1, sizeof(*frag2)); 259 memcpy(frag2, frag1, sizeof(*frag2));
@@ -320,11 +320,11 @@ find_router:
320 320
321 unicast_packet = (struct unicast_packet *)skb->data; 321 unicast_packet = (struct unicast_packet *)skb->data;
322 322
323 unicast_packet->version = COMPAT_VERSION; 323 unicast_packet->header.version = COMPAT_VERSION;
324 /* batman packet type: unicast */ 324 /* batman packet type: unicast */
325 unicast_packet->packet_type = BAT_UNICAST; 325 unicast_packet->header.packet_type = BAT_UNICAST;
326 /* set unicast ttl */ 326 /* set unicast ttl */
327 unicast_packet->ttl = TTL; 327 unicast_packet->header.ttl = TTL;
328 /* copy the destination for faster routing */ 328 /* copy the destination for faster routing */
329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 329 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
330 /* set the destination tt version number */ 330 /* set the destination tt version number */
@@ -335,7 +335,7 @@ find_router:
335 data_len + sizeof(*unicast_packet) > 335 data_len + sizeof(*unicast_packet) >
336 neigh_node->if_incoming->net_dev->mtu) { 336 neigh_node->if_incoming->net_dev->mtu) {
337 /* send frag skb decreases ttl */ 337 /* send frag skb decreases ttl */
338 unicast_packet->ttl++; 338 unicast_packet->header.ttl++;
339 ret = frag_send_skb(skb, bat_priv, 339 ret = frag_send_skb(skb, bat_priv,
340 neigh_node->if_incoming, neigh_node->addr); 340 neigh_node->if_incoming, neigh_node->addr);
341 goto out; 341 goto out;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 8fd5535544b9..a9faf6b1db19 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Andreas Langer 4 * Andreas Langer
5 * 5 *
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cc3b9f2f3b5d..c4a5b8cafada 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich 4 * Simon Wunderlich
5 * 5 *
@@ -617,7 +617,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
617 packet->vis_type = atomic_read(&bat_priv->vis_mode); 617 packet->vis_type = atomic_read(&bat_priv->vis_mode);
618 618
619 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); 619 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
620 packet->ttl = TTL; 620 packet->header.ttl = TTL;
621 packet->seqno = htonl(ntohl(packet->seqno) + 1); 621 packet->seqno = htonl(ntohl(packet->seqno) + 1);
622 packet->entries = 0; 622 packet->entries = 0;
623 skb_trim(info->skb_packet, sizeof(*packet)); 623 skb_trim(info->skb_packet, sizeof(*packet));
@@ -714,8 +714,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
714 if (info == bat_priv->my_vis_info) 714 if (info == bat_priv->my_vis_info)
715 continue; 715 continue;
716 716
717 if (time_after(jiffies, 717 if (has_timed_out(info->first_seen, VIS_TIMEOUT)) {
718 info->first_seen + VIS_TIMEOUT * HZ)) {
719 hlist_del(node); 718 hlist_del(node);
720 send_list_del(info); 719 send_list_del(info);
721 kref_put(&info->refcount, free_info); 720 kref_put(&info->refcount, free_info);
@@ -818,19 +817,19 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
818 goto out; 817 goto out;
819 818
820 packet = (struct vis_packet *)info->skb_packet->data; 819 packet = (struct vis_packet *)info->skb_packet->data;
821 if (packet->ttl < 2) { 820 if (packet->header.ttl < 2) {
822 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 821 pr_debug("Error - can't send vis packet: ttl exceeded\n");
823 goto out; 822 goto out;
824 } 823 }
825 824
826 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 825 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
827 packet->ttl--; 826 packet->header.ttl--;
828 827
829 if (is_broadcast_ether_addr(packet->target_orig)) 828 if (is_broadcast_ether_addr(packet->target_orig))
830 broadcast_vis_packet(bat_priv, info); 829 broadcast_vis_packet(bat_priv, info);
831 else 830 else
832 unicast_vis_packet(bat_priv, info); 831 unicast_vis_packet(bat_priv, info);
833 packet->ttl++; /* restore TTL */ 832 packet->header.ttl++; /* restore TTL */
834 833
835out: 834out:
836 if (primary_if) 835 if (primary_if)
@@ -910,9 +909,9 @@ int vis_init(struct bat_priv *bat_priv)
910 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); 909 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
911 kref_init(&bat_priv->my_vis_info->refcount); 910 kref_init(&bat_priv->my_vis_info->refcount);
912 bat_priv->my_vis_info->bat_priv = bat_priv; 911 bat_priv->my_vis_info->bat_priv = bat_priv;
913 packet->version = COMPAT_VERSION; 912 packet->header.version = COMPAT_VERSION;
914 packet->packet_type = BAT_VIS; 913 packet->header.packet_type = BAT_VIS;
915 packet->ttl = TTL; 914 packet->header.ttl = TTL;
916 packet->seqno = 0; 915 packet->seqno = 0;
917 packet->entries = 0; 916 packet->entries = 0;
918 917
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 31b820d07f23..ee2e46e5347b 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Simon Wunderlich, Marek Lindner 4 * Simon Wunderlich, Marek Lindner
5 * 5 *
@@ -22,7 +22,8 @@
22#ifndef _NET_BATMAN_ADV_VIS_H_ 22#ifndef _NET_BATMAN_ADV_VIS_H_
23#define _NET_BATMAN_ADV_VIS_H_ 23#define _NET_BATMAN_ADV_VIS_H_
24 24
25#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */ 25#define VIS_TIMEOUT 200000 /* timeout of vis packets
26 * in miliseconds */
26 27
27int vis_seq_print_text(struct seq_file *seq, void *offset); 28int vis_seq_print_text(struct seq_file *seq, void *offset);
28void receive_server_sync_packet(struct bat_priv *bat_priv, 29void receive_server_sync_packet(struct bat_priv *bat_priv,
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 9ec85eb8853d..3537d385035e 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -29,7 +29,6 @@ menuconfig BT
29 BNEP Module (Bluetooth Network Encapsulation Protocol) 29 BNEP Module (Bluetooth Network Encapsulation Protocol)
30 CMTP Module (CAPI Message Transport Protocol) 30 CMTP Module (CAPI Message Transport Protocol)
31 HIDP Module (Human Interface Device Protocol) 31 HIDP Module (Human Interface Device Protocol)
32 SMP Module (Security Manager Protocol)
33 32
34 Say Y here to compile Bluetooth support into the kernel or say M to 33 Say Y here to compile Bluetooth support into the kernel or say M to
35 compile it as module (bluetooth). 34 compile it as module (bluetooth).
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 17800b1d28ea..9f9c8dcd8af0 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -143,10 +143,10 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
143{ 143{
144 if (cmd == BNEPGETCONNLIST) { 144 if (cmd == BNEPGETCONNLIST) {
145 struct bnep_connlist_req cl; 145 struct bnep_connlist_req cl;
146 uint32_t uci; 146 u32 uci;
147 int err; 147 int err;
148 148
149 if (get_user(cl.cnum, (uint32_t __user *) arg) || 149 if (get_user(cl.cnum, (u32 __user *) arg) ||
150 get_user(uci, (u32 __user *) (arg + 4))) 150 get_user(uci, (u32 __user *) (arg + 4)))
151 return -EFAULT; 151 return -EFAULT;
152 152
@@ -157,7 +157,7 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
157 157
158 err = bnep_get_connlist(&cl); 158 err = bnep_get_connlist(&cl);
159 159
160 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 160 if (!err && put_user(cl.cnum, (u32 __user *) arg))
161 err = -EFAULT; 161 err = -EFAULT;
162 162
163 return err; 163 return err;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 3f2dd5c25ae5..1230faaac29b 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -137,10 +137,10 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
137{ 137{
138 if (cmd == CMTPGETCONNLIST) { 138 if (cmd == CMTPGETCONNLIST) {
139 struct cmtp_connlist_req cl; 139 struct cmtp_connlist_req cl;
140 uint32_t uci; 140 u32 uci;
141 int err; 141 int err;
142 142
143 if (get_user(cl.cnum, (uint32_t __user *) arg) || 143 if (get_user(cl.cnum, (u32 __user *) arg) ||
144 get_user(uci, (u32 __user *) (arg + 4))) 144 get_user(uci, (u32 __user *) (arg + 4)))
145 return -EFAULT; 145 return -EFAULT;
146 146
@@ -151,7 +151,7 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
151 151
152 err = cmtp_get_connlist(&cl); 152 err = cmtp_get_connlist(&cl);
153 153
154 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 154 if (!err && put_user(cl.cnum, (u32 __user *) arg))
155 err = -EFAULT; 155 err = -EFAULT;
156 156
157 return err; 157 return err;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 07bc69ed9498..947172bf1621 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -35,7 +35,6 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h> 38#include <net/sock.h>
40 39
41#include <asm/system.h> 40#include <asm/system.h>
@@ -51,7 +50,7 @@ static void hci_le_connect(struct hci_conn *conn)
51 struct hci_cp_le_create_conn cp; 50 struct hci_cp_le_create_conn cp;
52 51
53 conn->state = BT_CONNECT; 52 conn->state = BT_CONNECT;
54 conn->out = 1; 53 conn->out = true;
55 conn->link_mode |= HCI_LM_MASTER; 54 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW; 55 conn->sec_level = BT_SECURITY_LOW;
57 56
@@ -80,10 +79,10 @@ void hci_acl_connect(struct hci_conn *conn)
80 struct inquiry_entry *ie; 79 struct inquiry_entry *ie;
81 struct hci_cp_create_conn cp; 80 struct hci_cp_create_conn cp;
82 81
83 BT_DBG("%p", conn); 82 BT_DBG("hcon %p", conn);
84 83
85 conn->state = BT_CONNECT; 84 conn->state = BT_CONNECT;
86 conn->out = 1; 85 conn->out = true;
87 86
88 conn->link_mode = HCI_LM_MASTER; 87 conn->link_mode = HCI_LM_MASTER;
89 88
@@ -105,7 +104,8 @@ void hci_acl_connect(struct hci_conn *conn)
105 } 104 }
106 105
107 memcpy(conn->dev_class, ie->data.dev_class, 3); 106 memcpy(conn->dev_class, ie->data.dev_class, 3);
108 conn->ssp_mode = ie->data.ssp_mode; 107 if (ie->data.ssp_mode > 0)
108 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
109 } 109 }
110 110
111 cp.pkt_type = cpu_to_le16(conn->pkt_type); 111 cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -151,7 +151,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
151 BT_DBG("%p", conn); 151 BT_DBG("%p", conn);
152 152
153 conn->state = BT_CONNECT; 153 conn->state = BT_CONNECT;
154 conn->out = 1; 154 conn->out = true;
155 155
156 conn->attempt++; 156 conn->attempt++;
157 157
@@ -169,7 +169,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
169 BT_DBG("%p", conn); 169 BT_DBG("%p", conn);
170 170
171 conn->state = BT_CONNECT; 171 conn->state = BT_CONNECT;
172 conn->out = 1; 172 conn->out = true;
173 173
174 conn->attempt++; 174 conn->attempt++;
175 175
@@ -279,16 +279,13 @@ static void hci_conn_timeout(struct work_struct *work)
279{ 279{
280 struct hci_conn *conn = container_of(work, struct hci_conn, 280 struct hci_conn *conn = container_of(work, struct hci_conn,
281 disc_work.work); 281 disc_work.work);
282 struct hci_dev *hdev = conn->hdev;
283 __u8 reason; 282 __u8 reason;
284 283
285 BT_DBG("conn %p state %d", conn, conn->state); 284 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
286 285
287 if (atomic_read(&conn->refcnt)) 286 if (atomic_read(&conn->refcnt))
288 return; 287 return;
289 288
290 hci_dev_lock(hdev);
291
292 switch (conn->state) { 289 switch (conn->state) {
293 case BT_CONNECT: 290 case BT_CONNECT:
294 case BT_CONNECT2: 291 case BT_CONNECT2:
@@ -308,8 +305,6 @@ static void hci_conn_timeout(struct work_struct *work)
308 conn->state = BT_CLOSED; 305 conn->state = BT_CLOSED;
309 break; 306 break;
310 } 307 }
311
312 hci_dev_unlock(hdev);
313} 308}
314 309
315/* Enter sniff mode */ 310/* Enter sniff mode */
@@ -337,7 +332,7 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
337 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 332 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
338 } 333 }
339 334
340 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 335 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
341 struct hci_cp_sniff_mode cp; 336 struct hci_cp_sniff_mode cp;
342 cp.handle = cpu_to_le16(conn->handle); 337 cp.handle = cpu_to_le16(conn->handle);
343 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 338 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
@@ -372,7 +367,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
372 367
373 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 368 BT_DBG("%s dst %s", hdev->name, batostr(dst));
374 369
375 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); 370 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
376 if (!conn) 371 if (!conn)
377 return NULL; 372 return NULL;
378 373
@@ -386,7 +381,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
386 conn->remote_auth = 0xff; 381 conn->remote_auth = 0xff;
387 conn->key_type = 0xff; 382 conn->key_type = 0xff;
388 383
389 conn->power_save = 1; 384 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
390 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 385 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
391 386
392 switch (type) { 387 switch (type) {
@@ -407,7 +402,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
407 402
408 skb_queue_head_init(&conn->data_q); 403 skb_queue_head_init(&conn->data_q);
409 404
410 INIT_LIST_HEAD(&conn->chan_list);; 405 INIT_LIST_HEAD(&conn->chan_list);
411 406
412 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 407 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
413 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 408 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
@@ -555,7 +550,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
555 if (!acl) { 550 if (!acl) {
556 acl = hci_conn_add(hdev, ACL_LINK, dst); 551 acl = hci_conn_add(hdev, ACL_LINK, dst);
557 if (!acl) 552 if (!acl)
558 return NULL; 553 return ERR_PTR(-ENOMEM);
559 } 554 }
560 555
561 hci_conn_hold(acl); 556 hci_conn_hold(acl);
@@ -575,7 +570,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
575 sco = hci_conn_add(hdev, type, dst); 570 sco = hci_conn_add(hdev, type, dst);
576 if (!sco) { 571 if (!sco) {
577 hci_conn_put(acl); 572 hci_conn_put(acl);
578 return NULL; 573 return ERR_PTR(-ENOMEM);
579 } 574 }
580 } 575 }
581 576
@@ -586,12 +581,12 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
586 581
587 if (acl->state == BT_CONNECTED && 582 if (acl->state == BT_CONNECTED &&
588 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 583 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
589 acl->power_save = 1; 584 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
590 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 585 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
591 586
592 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { 587 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
593 /* defer SCO setup until mode change completed */ 588 /* defer SCO setup until mode change completed */
594 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend); 589 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
595 return sco; 590 return sco;
596 } 591 }
597 592
@@ -607,8 +602,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
607{ 602{
608 BT_DBG("conn %p", conn); 603 BT_DBG("conn %p", conn);
609 604
610 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 && 605 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
611 !(conn->link_mode & HCI_LM_ENCRYPT))
612 return 0; 606 return 0;
613 607
614 return 1; 608 return 1;
@@ -633,17 +627,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
633 627
634 conn->auth_type = auth_type; 628 conn->auth_type = auth_type;
635 629
636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 630 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
637 struct hci_cp_auth_requested cp; 631 struct hci_cp_auth_requested cp;
638 632
639 /* encrypt must be pending if auth is also pending */ 633 /* encrypt must be pending if auth is also pending */
640 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 634 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
641 635
642 cp.handle = cpu_to_le16(conn->handle); 636 cp.handle = cpu_to_le16(conn->handle);
643 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 637 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
644 sizeof(cp), &cp); 638 sizeof(cp), &cp);
645 if (conn->key_type != 0xff) 639 if (conn->key_type != 0xff)
646 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 640 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
647 } 641 }
648 642
649 return 0; 643 return 0;
@@ -654,7 +648,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
654{ 648{
655 BT_DBG("conn %p", conn); 649 BT_DBG("conn %p", conn);
656 650
657 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 651 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
658 struct hci_cp_set_conn_encrypt cp; 652 struct hci_cp_set_conn_encrypt cp;
659 cp.handle = cpu_to_le16(conn->handle); 653 cp.handle = cpu_to_le16(conn->handle);
660 cp.encrypt = 0x01; 654 cp.encrypt = 0x01;
@@ -674,8 +668,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
674 668
675 /* For non 2.1 devices and low security level we don't need the link 669 /* For non 2.1 devices and low security level we don't need the link
676 key. */ 670 key. */
677 if (sec_level == BT_SECURITY_LOW && 671 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
678 (!conn->ssp_mode || !conn->hdev->ssp_mode))
679 return 1; 672 return 1;
680 673
681 /* For other security levels we need the link key. */ 674 /* For other security levels we need the link key. */
@@ -704,7 +697,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
704 goto encrypt; 697 goto encrypt;
705 698
706auth: 699auth:
707 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 700 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
708 return 0; 701 return 0;
709 702
710 if (!hci_conn_auth(conn, sec_level, auth_type)) 703 if (!hci_conn_auth(conn, sec_level, auth_type))
@@ -739,7 +732,7 @@ int hci_conn_change_link_key(struct hci_conn *conn)
739{ 732{
740 BT_DBG("conn %p", conn); 733 BT_DBG("conn %p", conn);
741 734
742 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 735 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
743 struct hci_cp_change_conn_link_key cp; 736 struct hci_cp_change_conn_link_key cp;
744 cp.handle = cpu_to_le16(conn->handle); 737 cp.handle = cpu_to_le16(conn->handle);
745 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 738 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
@@ -758,7 +751,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
758 if (!role && conn->link_mode & HCI_LM_MASTER) 751 if (!role && conn->link_mode & HCI_LM_MASTER)
759 return 1; 752 return 1;
760 753
761 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) { 754 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
762 struct hci_cp_switch_role cp; 755 struct hci_cp_switch_role cp;
763 bacpy(&cp.bdaddr, &conn->dst); 756 bacpy(&cp.bdaddr, &conn->dst);
764 cp.role = role; 757 cp.role = role;
@@ -782,10 +775,10 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
782 if (conn->mode != HCI_CM_SNIFF) 775 if (conn->mode != HCI_CM_SNIFF)
783 goto timer; 776 goto timer;
784 777
785 if (!conn->power_save && !force_active) 778 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
786 goto timer; 779 goto timer;
787 780
788 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 781 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
789 struct hci_cp_exit_sniff_mode cp; 782 struct hci_cp_exit_sniff_mode cp;
790 cp.handle = cpu_to_le16(conn->handle); 783 cp.handle = cpu_to_le16(conn->handle);
791 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); 784 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
@@ -801,11 +794,11 @@ timer:
801void hci_conn_hash_flush(struct hci_dev *hdev) 794void hci_conn_hash_flush(struct hci_dev *hdev)
802{ 795{
803 struct hci_conn_hash *h = &hdev->conn_hash; 796 struct hci_conn_hash *h = &hdev->conn_hash;
804 struct hci_conn *c; 797 struct hci_conn *c, *n;
805 798
806 BT_DBG("hdev %s", hdev->name); 799 BT_DBG("hdev %s", hdev->name);
807 800
808 list_for_each_entry_rcu(c, &h->list, list) { 801 list_for_each_entry_safe(c, n, &h->list, list) {
809 c->state = BT_CLOSED; 802 c->state = BT_CLOSED;
810 803
811 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); 804 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
@@ -950,7 +943,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
950 943
951 BT_DBG("%s conn %p", hdev->name, conn); 944 BT_DBG("%s conn %p", hdev->name, conn);
952 945
953 chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC); 946 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
954 if (!chan) 947 if (!chan)
955 return NULL; 948 return NULL;
956 949
@@ -981,10 +974,10 @@ int hci_chan_del(struct hci_chan *chan)
981 974
982void hci_chan_list_flush(struct hci_conn *conn) 975void hci_chan_list_flush(struct hci_conn *conn)
983{ 976{
984 struct hci_chan *chan; 977 struct hci_chan *chan, *n;
985 978
986 BT_DBG("conn %p", conn); 979 BT_DBG("conn %p", conn);
987 980
988 list_for_each_entry_rcu(chan, &conn->chan_list, list) 981 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
989 hci_chan_del(chan); 982 hci_chan_del(chan);
990} 983}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 5aeb62491198..59ec99eb739b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -40,7 +40,6 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/notifier.h>
44#include <linux/rfkill.h> 43#include <linux/rfkill.h>
45#include <linux/timer.h> 44#include <linux/timer.h>
46#include <linux/crypto.h> 45#include <linux/crypto.h>
@@ -55,8 +54,6 @@
55 54
56#define AUTO_OFF_TIMEOUT 2000 55#define AUTO_OFF_TIMEOUT 2000
57 56
58bool enable_hs;
59
60static void hci_rx_work(struct work_struct *work); 57static void hci_rx_work(struct work_struct *work);
61static void hci_cmd_work(struct work_struct *work); 58static void hci_cmd_work(struct work_struct *work);
62static void hci_tx_work(struct work_struct *work); 59static void hci_tx_work(struct work_struct *work);
@@ -69,24 +66,11 @@ DEFINE_RWLOCK(hci_dev_list_lock);
69LIST_HEAD(hci_cb_list); 66LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock); 67DEFINE_RWLOCK(hci_cb_list_lock);
71 68
72/* HCI notifiers list */
73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75/* ---- HCI notifications ---- */ 69/* ---- HCI notifications ---- */
76 70
77int hci_register_notifier(struct notifier_block *nb)
78{
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85}
86
87static void hci_notify(struct hci_dev *hdev, int event) 71static void hci_notify(struct hci_dev *hdev, int event)
88{ 72{
89 atomic_notifier_call_chain(&hci_notifier, event, hdev); 73 hci_sock_dev_event(hdev, event);
90} 74}
91 75
92/* ---- HCI requests ---- */ 76/* ---- HCI requests ---- */
@@ -98,8 +82,28 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 /* If this is the init phase check if the completed command matches 82 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return. 83 * the last init command, and if not just return.
100 */ 84 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) 85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
102 return; 105 return;
106 }
103 107
104 if (hdev->req_status == HCI_REQ_PEND) { 108 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result; 109 hdev->req_result = result;
@@ -355,72 +359,209 @@ struct hci_dev *hci_dev_get(int index)
355} 359}
356 360
357/* ---- Inquiry support ---- */ 361/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev) 362
363bool hci_discovery_active(struct hci_dev *hdev)
359{ 364{
360 struct inquiry_cache *cache = &hdev->inq_cache; 365 struct discovery_state *discov = &hdev->discovery;
361 struct inquiry_entry *next = cache->list, *e; 366
367 switch (discov->state) {
368 case DISCOVERY_FINDING:
369 case DISCOVERY_RESOLVING:
370 return true;
371
372 default:
373 return false;
374 }
375}
376
377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
388 hdev->discovery.type = 0;
389 break;
390 case DISCOVERY_STARTING:
391 break;
392 case DISCOVERY_FINDING:
393 mgmt_discovering(hdev, 1);
394 break;
395 case DISCOVERY_RESOLVING:
396 break;
397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
362 403
363 BT_DBG("cache %p", cache); 404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
406 struct discovery_state *cache = &hdev->discovery;
407 struct inquiry_entry *p, *n;
364 408
365 cache->list = NULL; 409 list_for_each_entry_safe(p, n, &cache->all, all) {
366 while ((e = next)) { 410 list_del(&p->all);
367 next = e->next; 411 kfree(p);
368 kfree(e);
369 } 412 }
413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
370} 416}
371 417
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{ 419{
374 struct inquiry_cache *cache = &hdev->inq_cache; 420 struct discovery_state *cache = &hdev->discovery;
375 struct inquiry_entry *e; 421 struct inquiry_entry *e;
376 422
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378 424
379 for (e = cache->list; e; e = e->next) 425 list_for_each_entry(e, &cache->all, all) {
426 if (!bacmp(&e->data.bdaddr, bdaddr))
427 return e;
428 }
429
430 return NULL;
431}
432
433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434 bdaddr_t *bdaddr)
435{
436 struct discovery_state *cache = &hdev->discovery;
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450 bdaddr_t *bdaddr,
451 int state)
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
380 if (!bacmp(&e->data.bdaddr, bdaddr)) 461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469 struct inquiry_entry *ie)
470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
381 break; 480 break;
382 return e; 481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
383} 485}
384 486
385void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
488 bool name_known, bool *ssp)
386{ 489{
387 struct inquiry_cache *cache = &hdev->inq_cache; 490 struct discovery_state *cache = &hdev->discovery;
388 struct inquiry_entry *ie; 491 struct inquiry_entry *ie;
389 492
390 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
391 494
495 if (ssp)
496 *ssp = data->ssp_mode;
497
392 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 if (!ie) { 499 if (ie) {
394 /* Entry not in the cache. Add new one. */ 500 if (ie->data.ssp_mode && ssp)
395 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); 501 *ssp = true;
396 if (!ie) 502
397 return; 503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
398 508
399 ie->next = cache->list; 509 goto update;
400 cache->list = ie; 510 }
511
512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
515 return false;
516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
525
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
401 } 531 }
402 532
403 memcpy(&ie->data, data, sizeof(*data)); 533 memcpy(&ie->data, data, sizeof(*data));
404 ie->timestamp = jiffies; 534 ie->timestamp = jiffies;
405 cache->timestamp = jiffies; 535 cache->timestamp = jiffies;
536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
406} 541}
407 542
408static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
409{ 544{
410 struct inquiry_cache *cache = &hdev->inq_cache; 545 struct discovery_state *cache = &hdev->discovery;
411 struct inquiry_info *info = (struct inquiry_info *) buf; 546 struct inquiry_info *info = (struct inquiry_info *) buf;
412 struct inquiry_entry *e; 547 struct inquiry_entry *e;
413 int copied = 0; 548 int copied = 0;
414 549
415 for (e = cache->list; e && copied < num; e = e->next, copied++) { 550 list_for_each_entry(e, &cache->all, all) {
416 struct inquiry_data *data = &e->data; 551 struct inquiry_data *data = &e->data;
552
553 if (copied >= num)
554 break;
555
417 bacpy(&info->bdaddr, &data->bdaddr); 556 bacpy(&info->bdaddr, &data->bdaddr);
418 info->pscan_rep_mode = data->pscan_rep_mode; 557 info->pscan_rep_mode = data->pscan_rep_mode;
419 info->pscan_period_mode = data->pscan_period_mode; 558 info->pscan_period_mode = data->pscan_period_mode;
420 info->pscan_mode = data->pscan_mode; 559 info->pscan_mode = data->pscan_mode;
421 memcpy(info->dev_class, data->dev_class, 3); 560 memcpy(info->dev_class, data->dev_class, 3);
422 info->clock_offset = data->clock_offset; 561 info->clock_offset = data->clock_offset;
562
423 info++; 563 info++;
564 copied++;
424 } 565 }
425 566
426 BT_DBG("cache %p, copied %d", cache, copied); 567 BT_DBG("cache %p, copied %d", cache, copied);
@@ -567,7 +708,7 @@ int hci_dev_open(__u16 dev)
567 hci_dev_hold(hdev); 708 hci_dev_hold(hdev);
568 set_bit(HCI_UP, &hdev->flags); 709 set_bit(HCI_UP, &hdev->flags);
569 hci_notify(hdev, HCI_DEV_UP); 710 hci_notify(hdev, HCI_DEV_UP);
570 if (!test_bit(HCI_SETUP, &hdev->flags)) { 711 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
571 hci_dev_lock(hdev); 712 hci_dev_lock(hdev);
572 mgmt_powered(hdev, 1); 713 mgmt_powered(hdev, 1);
573 hci_dev_unlock(hdev); 714 hci_dev_unlock(hdev);
@@ -603,6 +744,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
603{ 744{
604 BT_DBG("%s %p", hdev->name, hdev); 745 BT_DBG("%s %p", hdev->name, hdev);
605 746
747 cancel_work_sync(&hdev->le_scan);
748
606 hci_req_cancel(hdev, ENODEV); 749 hci_req_cancel(hdev, ENODEV);
607 hci_req_lock(hdev); 750 hci_req_lock(hdev);
608 751
@@ -619,14 +762,14 @@ static int hci_dev_do_close(struct hci_dev *hdev)
619 if (hdev->discov_timeout > 0) { 762 if (hdev->discov_timeout > 0) {
620 cancel_delayed_work(&hdev->discov_off); 763 cancel_delayed_work(&hdev->discov_off);
621 hdev->discov_timeout = 0; 764 hdev->discov_timeout = 0;
765 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
622 } 766 }
623 767
624 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 768 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
625 cancel_delayed_work(&hdev->power_off);
626
627 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
628 cancel_delayed_work(&hdev->service_cache); 769 cancel_delayed_work(&hdev->service_cache);
629 770
771 cancel_delayed_work_sync(&hdev->le_scan_disable);
772
630 hci_dev_lock(hdev); 773 hci_dev_lock(hdev);
631 inquiry_cache_flush(hdev); 774 inquiry_cache_flush(hdev);
632 hci_conn_hash_flush(hdev); 775 hci_conn_hash_flush(hdev);
@@ -667,13 +810,18 @@ static int hci_dev_do_close(struct hci_dev *hdev)
667 * and no tasks are scheduled. */ 810 * and no tasks are scheduled. */
668 hdev->close(hdev); 811 hdev->close(hdev);
669 812
670 hci_dev_lock(hdev); 813 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
671 mgmt_powered(hdev, 0); 814 hci_dev_lock(hdev);
672 hci_dev_unlock(hdev); 815 mgmt_powered(hdev, 0);
816 hci_dev_unlock(hdev);
817 }
673 818
674 /* Clear flags */ 819 /* Clear flags */
675 hdev->flags = 0; 820 hdev->flags = 0;
676 821
822 memset(hdev->eir, 0, sizeof(hdev->eir));
823 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
824
677 hci_req_unlock(hdev); 825 hci_req_unlock(hdev);
678 826
679 hci_dev_put(hdev); 827 hci_dev_put(hdev);
@@ -688,7 +836,12 @@ int hci_dev_close(__u16 dev)
688 hdev = hci_dev_get(dev); 836 hdev = hci_dev_get(dev);
689 if (!hdev) 837 if (!hdev)
690 return -ENODEV; 838 return -ENODEV;
839
840 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
841 cancel_delayed_work(&hdev->power_off);
842
691 err = hci_dev_do_close(hdev); 843 err = hci_dev_do_close(hdev);
844
692 hci_dev_put(hdev); 845 hci_dev_put(hdev);
693 return err; 846 return err;
694} 847}
@@ -847,11 +1000,11 @@ int hci_get_dev_list(void __user *arg)
847 1000
848 read_lock(&hci_dev_list_lock); 1001 read_lock(&hci_dev_list_lock);
849 list_for_each_entry(hdev, &hci_dev_list, list) { 1002 list_for_each_entry(hdev, &hci_dev_list, list) {
850 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 1003 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
851 cancel_delayed_work(&hdev->power_off); 1004 cancel_delayed_work(&hdev->power_off);
852 1005
853 if (!test_bit(HCI_MGMT, &hdev->flags)) 1006 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags); 1007 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
855 1008
856 (dr + n)->dev_id = hdev->id; 1009 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags; 1010 (dr + n)->dev_opt = hdev->flags;
@@ -883,11 +1036,11 @@ int hci_get_dev_info(void __user *arg)
883 if (!hdev) 1036 if (!hdev)
884 return -ENODEV; 1037 return -ENODEV;
885 1038
886 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 1039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
887 cancel_delayed_work_sync(&hdev->power_off); 1040 cancel_delayed_work_sync(&hdev->power_off);
888 1041
889 if (!test_bit(HCI_MGMT, &hdev->flags)) 1042 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
890 set_bit(HCI_PAIRABLE, &hdev->flags); 1043 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
891 1044
892 strcpy(di.name, hdev->name); 1045 strcpy(di.name, hdev->name);
893 di.bdaddr = hdev->bdaddr; 1046 di.bdaddr = hdev->bdaddr;
@@ -967,11 +1120,11 @@ static void hci_power_on(struct work_struct *work)
967 if (hci_dev_open(hdev->id) < 0) 1120 if (hci_dev_open(hdev->id) < 0)
968 return; 1121 return;
969 1122
970 if (test_bit(HCI_AUTO_OFF, &hdev->flags)) 1123 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
971 schedule_delayed_work(&hdev->power_off, 1124 schedule_delayed_work(&hdev->power_off,
972 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 1125 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
973 1126
974 if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) 1127 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
975 mgmt_index_added(hdev); 1128 mgmt_index_added(hdev);
976} 1129}
977 1130
@@ -982,9 +1135,7 @@ static void hci_power_off(struct work_struct *work)
982 1135
983 BT_DBG("%s", hdev->name); 1136 BT_DBG("%s", hdev->name);
984 1137
985 clear_bit(HCI_AUTO_OFF, &hdev->flags); 1138 hci_dev_do_close(hdev);
986
987 hci_dev_close(hdev->id);
988} 1139}
989 1140
990static void hci_discov_off(struct work_struct *work) 1141static void hci_discov_off(struct work_struct *work)
@@ -1037,6 +1188,18 @@ int hci_link_keys_clear(struct hci_dev *hdev)
1037 return 0; 1188 return 0;
1038} 1189}
1039 1190
1191int hci_smp_ltks_clear(struct hci_dev *hdev)
1192{
1193 struct smp_ltk *k, *tmp;
1194
1195 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1196 list_del(&k->list);
1197 kfree(k);
1198 }
1199
1200 return 0;
1201}
1202
1040struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1203struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1041{ 1204{
1042 struct link_key *k; 1205 struct link_key *k;
@@ -1084,44 +1247,38 @@ static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1084 return 0; 1247 return 0;
1085} 1248}
1086 1249
1087struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1250struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1088{ 1251{
1089 struct link_key *k; 1252 struct smp_ltk *k;
1090 1253
1091 list_for_each_entry(k, &hdev->link_keys, list) { 1254 list_for_each_entry(k, &hdev->long_term_keys, list) {
1092 struct key_master_id *id; 1255 if (k->ediv != ediv ||
1093 1256 memcmp(rand, k->rand, sizeof(k->rand)))
1094 if (k->type != HCI_LK_SMP_LTK)
1095 continue; 1257 continue;
1096 1258
1097 if (k->dlen != sizeof(*id)) 1259 return k;
1098 continue;
1099
1100 id = (void *) &k->data;
1101 if (id->ediv == ediv &&
1102 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1103 return k;
1104 } 1260 }
1105 1261
1106 return NULL; 1262 return NULL;
1107} 1263}
1108EXPORT_SYMBOL(hci_find_ltk); 1264EXPORT_SYMBOL(hci_find_ltk);
1109 1265
1110struct link_key *hci_find_link_key_type(struct hci_dev *hdev, 1266struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1111 bdaddr_t *bdaddr, u8 type) 1267 u8 addr_type)
1112{ 1268{
1113 struct link_key *k; 1269 struct smp_ltk *k;
1114 1270
1115 list_for_each_entry(k, &hdev->link_keys, list) 1271 list_for_each_entry(k, &hdev->long_term_keys, list)
1116 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0) 1272 if (addr_type == k->bdaddr_type &&
1273 bacmp(bdaddr, &k->bdaddr) == 0)
1117 return k; 1274 return k;
1118 1275
1119 return NULL; 1276 return NULL;
1120} 1277}
1121EXPORT_SYMBOL(hci_find_link_key_type); 1278EXPORT_SYMBOL(hci_find_ltk_by_addr);
1122 1279
1123int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1280int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1124 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1281 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1125{ 1282{
1126 struct link_key *key, *old_key; 1283 struct link_key *key, *old_key;
1127 u8 old_key_type, persistent; 1284 u8 old_key_type, persistent;
@@ -1175,40 +1332,39 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1175 return 0; 1332 return 0;
1176} 1333}
1177 1334
1178int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, 1335int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1179 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]) 1336 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1337 ediv, u8 rand[8])
1180{ 1338{
1181 struct link_key *key, *old_key; 1339 struct smp_ltk *key, *old_key;
1182 struct key_master_id *id;
1183 u8 old_key_type;
1184 1340
1185 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr)); 1341 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1342 return 0;
1186 1343
1187 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK); 1344 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1188 if (old_key) { 1345 if (old_key)
1189 key = old_key; 1346 key = old_key;
1190 old_key_type = old_key->type; 1347 else {
1191 } else { 1348 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1192 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1193 if (!key) 1349 if (!key)
1194 return -ENOMEM; 1350 return -ENOMEM;
1195 list_add(&key->list, &hdev->link_keys); 1351 list_add(&key->list, &hdev->long_term_keys);
1196 old_key_type = 0xff;
1197 } 1352 }
1198 1353
1199 key->dlen = sizeof(*id);
1200
1201 bacpy(&key->bdaddr, bdaddr); 1354 bacpy(&key->bdaddr, bdaddr);
1202 memcpy(key->val, ltk, sizeof(key->val)); 1355 key->bdaddr_type = addr_type;
1203 key->type = HCI_LK_SMP_LTK; 1356 memcpy(key->val, tk, sizeof(key->val));
1204 key->pin_len = key_size; 1357 key->authenticated = authenticated;
1358 key->ediv = ediv;
1359 key->enc_size = enc_size;
1360 key->type = type;
1361 memcpy(key->rand, rand, sizeof(key->rand));
1205 1362
1206 id = (void *) &key->data; 1363 if (!new_key)
1207 id->ediv = ediv; 1364 return 0;
1208 memcpy(id->rand, rand, sizeof(id->rand));
1209 1365
1210 if (new_key) 1366 if (type & HCI_SMP_LTK)
1211 mgmt_new_link_key(hdev, key, old_key_type); 1367 mgmt_new_ltk(hdev, key, 1);
1212 1368
1213 return 0; 1369 return 0;
1214} 1370}
@@ -1229,6 +1385,23 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1229 return 0; 1385 return 0;
1230} 1386}
1231 1387
1388int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1389{
1390 struct smp_ltk *k, *tmp;
1391
1392 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1393 if (bacmp(bdaddr, &k->bdaddr))
1394 continue;
1395
1396 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1397
1398 list_del(&k->list);
1399 kfree(k);
1400 }
1401
1402 return 0;
1403}
1404
1232/* HCI command timer function */ 1405/* HCI command timer function */
1233static void hci_cmd_timer(unsigned long arg) 1406static void hci_cmd_timer(unsigned long arg)
1234{ 1407{
@@ -1240,7 +1413,7 @@ static void hci_cmd_timer(unsigned long arg)
1240} 1413}
1241 1414
1242struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1415struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1243 bdaddr_t *bdaddr) 1416 bdaddr_t *bdaddr)
1244{ 1417{
1245 struct oob_data *data; 1418 struct oob_data *data;
1246 1419
@@ -1280,7 +1453,7 @@ int hci_remote_oob_data_clear(struct hci_dev *hdev)
1280} 1453}
1281 1454
1282int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, 1455int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1283 u8 *randomizer) 1456 u8 *randomizer)
1284{ 1457{
1285 struct oob_data *data; 1458 struct oob_data *data;
1286 1459
@@ -1303,8 +1476,7 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1303 return 0; 1476 return 0;
1304} 1477}
1305 1478
1306struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, 1479struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1307 bdaddr_t *bdaddr)
1308{ 1480{
1309 struct bdaddr_list *b; 1481 struct bdaddr_list *b;
1310 1482
@@ -1331,7 +1503,7 @@ int hci_blacklist_clear(struct hci_dev *hdev)
1331 return 0; 1503 return 0;
1332} 1504}
1333 1505
1334int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) 1506int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1335{ 1507{
1336 struct bdaddr_list *entry; 1508 struct bdaddr_list *entry;
1337 1509
@@ -1349,10 +1521,10 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1349 1521
1350 list_add(&entry->list, &hdev->blacklist); 1522 list_add(&entry->list, &hdev->blacklist);
1351 1523
1352 return mgmt_device_blocked(hdev, bdaddr); 1524 return mgmt_device_blocked(hdev, bdaddr, type);
1353} 1525}
1354 1526
1355int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) 1527int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1356{ 1528{
1357 struct bdaddr_list *entry; 1529 struct bdaddr_list *entry;
1358 1530
@@ -1366,13 +1538,13 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1366 list_del(&entry->list); 1538 list_del(&entry->list);
1367 kfree(entry); 1539 kfree(entry);
1368 1540
1369 return mgmt_device_unblocked(hdev, bdaddr); 1541 return mgmt_device_unblocked(hdev, bdaddr, type);
1370} 1542}
1371 1543
1372static void hci_clear_adv_cache(struct work_struct *work) 1544static void hci_clear_adv_cache(struct work_struct *work)
1373{ 1545{
1374 struct hci_dev *hdev = container_of(work, struct hci_dev, 1546 struct hci_dev *hdev = container_of(work, struct hci_dev,
1375 adv_work.work); 1547 adv_work.work);
1376 1548
1377 hci_dev_lock(hdev); 1549 hci_dev_lock(hdev);
1378 1550
@@ -1415,11 +1587,7 @@ static inline int is_connectable_adv(u8 evt_type)
1415} 1587}
1416 1588
1417int hci_add_adv_entry(struct hci_dev *hdev, 1589int hci_add_adv_entry(struct hci_dev *hdev,
1418 struct hci_ev_le_advertising_info *ev) 1590 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1419{
1420 struct adv_entry *entry;
1421
1422 if (!is_connectable_adv(ev->evt_type))
1423 return -EINVAL; 1591 return -EINVAL;
1424 1592
1425 /* Only new entries should be added to adv_entries. So, if 1593 /* Only new entries should be added to adv_entries. So, if
@@ -1427,7 +1595,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
1427 if (hci_find_adv_entry(hdev, &ev->bdaddr)) 1595 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1428 return 0; 1596 return 0;
1429 1597
1430 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1598 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1431 if (!entry) 1599 if (!entry)
1432 return -ENOMEM; 1600 return -ENOMEM;
1433 1601
@@ -1442,16 +1610,116 @@ int hci_add_adv_entry(struct hci_dev *hdev,
1442 return 0; 1610 return 0;
1443} 1611}
1444 1612
1613static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1614{
1615 struct le_scan_params *param = (struct le_scan_params *) opt;
1616 struct hci_cp_le_set_scan_param cp;
1617
1618 memset(&cp, 0, sizeof(cp));
1619 cp.type = param->type;
1620 cp.interval = cpu_to_le16(param->interval);
1621 cp.window = cpu_to_le16(param->window);
1622
1623 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1624}
1625
1626static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1627{
1628 struct hci_cp_le_set_scan_enable cp;
1629
1630 memset(&cp, 0, sizeof(cp));
1631 cp.enable = 1;
1632
1633 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1634}
1635
1636static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1637 u16 window, int timeout)
1638{
1639 long timeo = msecs_to_jiffies(3000);
1640 struct le_scan_params param;
1641 int err;
1642
1643 BT_DBG("%s", hdev->name);
1644
1645 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1646 return -EINPROGRESS;
1647
1648 param.type = type;
1649 param.interval = interval;
1650 param.window = window;
1651
1652 hci_req_lock(hdev);
1653
1654 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1655 timeo);
1656 if (!err)
1657 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1658
1659 hci_req_unlock(hdev);
1660
1661 if (err < 0)
1662 return err;
1663
1664 schedule_delayed_work(&hdev->le_scan_disable,
1665 msecs_to_jiffies(timeout));
1666
1667 return 0;
1668}
1669
1670static void le_scan_disable_work(struct work_struct *work)
1671{
1672 struct hci_dev *hdev = container_of(work, struct hci_dev,
1673 le_scan_disable.work);
1674 struct hci_cp_le_set_scan_enable cp;
1675
1676 BT_DBG("%s", hdev->name);
1677
1678 memset(&cp, 0, sizeof(cp));
1679
1680 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1681}
1682
1683static void le_scan_work(struct work_struct *work)
1684{
1685 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1686 struct le_scan_params *param = &hdev->le_scan_params;
1687
1688 BT_DBG("%s", hdev->name);
1689
1690 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1691 param->timeout);
1692}
1693
1694int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1695 int timeout)
1696{
1697 struct le_scan_params *param = &hdev->le_scan_params;
1698
1699 BT_DBG("%s", hdev->name);
1700
1701 if (work_busy(&hdev->le_scan))
1702 return -EINPROGRESS;
1703
1704 param->type = type;
1705 param->interval = interval;
1706 param->window = window;
1707 param->timeout = timeout;
1708
1709 queue_work(system_long_wq, &hdev->le_scan);
1710
1711 return 0;
1712}
1713
1445/* Register HCI device */ 1714/* Register HCI device */
1446int hci_register_dev(struct hci_dev *hdev) 1715int hci_register_dev(struct hci_dev *hdev)
1447{ 1716{
1448 struct list_head *head = &hci_dev_list, *p; 1717 struct list_head *head = &hci_dev_list, *p;
1449 int i, id, error; 1718 int i, id, error;
1450 1719
1451 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, 1720 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1452 hdev->bus, hdev->owner);
1453 1721
1454 if (!hdev->open || !hdev->close || !hdev->destruct) 1722 if (!hdev->open || !hdev->close)
1455 return -EINVAL; 1723 return -EINVAL;
1456 1724
1457 /* Do not allow HCI_AMP devices to register at index 0, 1725 /* Do not allow HCI_AMP devices to register at index 0,
@@ -1472,7 +1740,6 @@ int hci_register_dev(struct hci_dev *hdev)
1472 hdev->id = id; 1740 hdev->id = id;
1473 list_add_tail(&hdev->list, head); 1741 list_add_tail(&hdev->list, head);
1474 1742
1475 atomic_set(&hdev->refcnt, 1);
1476 mutex_init(&hdev->lock); 1743 mutex_init(&hdev->lock);
1477 1744
1478 hdev->flags = 0; 1745 hdev->flags = 0;
@@ -1503,7 +1770,7 @@ int hci_register_dev(struct hci_dev *hdev)
1503 init_waitqueue_head(&hdev->req_wait_q); 1770 init_waitqueue_head(&hdev->req_wait_q);
1504 mutex_init(&hdev->req_lock); 1771 mutex_init(&hdev->req_lock);
1505 1772
1506 inquiry_cache_init(hdev); 1773 discovery_init(hdev);
1507 1774
1508 hci_conn_hash_init(hdev); 1775 hci_conn_hash_init(hdev);
1509 1776
@@ -1514,6 +1781,7 @@ int hci_register_dev(struct hci_dev *hdev)
1514 INIT_LIST_HEAD(&hdev->uuids); 1781 INIT_LIST_HEAD(&hdev->uuids);
1515 1782
1516 INIT_LIST_HEAD(&hdev->link_keys); 1783 INIT_LIST_HEAD(&hdev->link_keys);
1784 INIT_LIST_HEAD(&hdev->long_term_keys);
1517 1785
1518 INIT_LIST_HEAD(&hdev->remote_oob_data); 1786 INIT_LIST_HEAD(&hdev->remote_oob_data);
1519 1787
@@ -1529,6 +1797,10 @@ int hci_register_dev(struct hci_dev *hdev)
1529 1797
1530 atomic_set(&hdev->promisc, 0); 1798 atomic_set(&hdev->promisc, 0);
1531 1799
1800 INIT_WORK(&hdev->le_scan, le_scan_work);
1801
1802 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1803
1532 write_unlock(&hci_dev_list_lock); 1804 write_unlock(&hci_dev_list_lock);
1533 1805
1534 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1806 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
@@ -1551,11 +1823,12 @@ int hci_register_dev(struct hci_dev *hdev)
1551 } 1823 }
1552 } 1824 }
1553 1825
1554 set_bit(HCI_AUTO_OFF, &hdev->flags); 1826 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1555 set_bit(HCI_SETUP, &hdev->flags); 1827 set_bit(HCI_SETUP, &hdev->dev_flags);
1556 schedule_work(&hdev->power_on); 1828 schedule_work(&hdev->power_on);
1557 1829
1558 hci_notify(hdev, HCI_DEV_REG); 1830 hci_notify(hdev, HCI_DEV_REG);
1831 hci_dev_hold(hdev);
1559 1832
1560 return id; 1833 return id;
1561 1834
@@ -1587,7 +1860,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1587 kfree_skb(hdev->reassembly[i]); 1860 kfree_skb(hdev->reassembly[i]);
1588 1861
1589 if (!test_bit(HCI_INIT, &hdev->flags) && 1862 if (!test_bit(HCI_INIT, &hdev->flags) &&
1590 !test_bit(HCI_SETUP, &hdev->flags)) { 1863 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1591 hci_dev_lock(hdev); 1864 hci_dev_lock(hdev);
1592 mgmt_index_removed(hdev); 1865 mgmt_index_removed(hdev);
1593 hci_dev_unlock(hdev); 1866 hci_dev_unlock(hdev);
@@ -1614,11 +1887,12 @@ void hci_unregister_dev(struct hci_dev *hdev)
1614 hci_blacklist_clear(hdev); 1887 hci_blacklist_clear(hdev);
1615 hci_uuids_clear(hdev); 1888 hci_uuids_clear(hdev);
1616 hci_link_keys_clear(hdev); 1889 hci_link_keys_clear(hdev);
1890 hci_smp_ltks_clear(hdev);
1617 hci_remote_oob_data_clear(hdev); 1891 hci_remote_oob_data_clear(hdev);
1618 hci_adv_entries_clear(hdev); 1892 hci_adv_entries_clear(hdev);
1619 hci_dev_unlock(hdev); 1893 hci_dev_unlock(hdev);
1620 1894
1621 __hci_dev_put(hdev); 1895 hci_dev_put(hdev);
1622} 1896}
1623EXPORT_SYMBOL(hci_unregister_dev); 1897EXPORT_SYMBOL(hci_unregister_dev);
1624 1898
@@ -1706,7 +1980,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1706 1980
1707 while (count) { 1981 while (count) {
1708 scb = (void *) skb->cb; 1982 scb = (void *) skb->cb;
1709 len = min(scb->expect, (__u16)count); 1983 len = min_t(uint, scb->expect, count);
1710 1984
1711 memcpy(skb_put(skb, len), data, len); 1985 memcpy(skb_put(skb, len), data, len);
1712 1986
@@ -1862,11 +2136,15 @@ static int hci_send_frame(struct sk_buff *skb)
1862 2136
1863 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 2137 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1864 2138
1865 if (atomic_read(&hdev->promisc)) { 2139 /* Time stamp */
1866 /* Time stamp */ 2140 __net_timestamp(skb);
1867 __net_timestamp(skb); 2141
2142 /* Send copy to monitor */
2143 hci_send_to_monitor(hdev, skb);
1868 2144
1869 hci_send_to_sock(hdev, skb, NULL); 2145 if (atomic_read(&hdev->promisc)) {
2146 /* Send copy to the sockets */
2147 hci_send_to_sock(hdev, skb);
1870 } 2148 }
1871 2149
1872 /* Get rid of skb owner, prior to sending to the driver. */ 2150 /* Get rid of skb owner, prior to sending to the driver. */
@@ -2235,26 +2513,31 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2235 2513
2236} 2514}
2237 2515
2238static inline void hci_sched_acl(struct hci_dev *hdev) 2516static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2239{ 2517{
2240 struct hci_chan *chan; 2518 /* Calculate count of blocks used by this packet */
2241 struct sk_buff *skb; 2519 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2242 int quote; 2520}
2243 unsigned int cnt;
2244
2245 BT_DBG("%s", hdev->name);
2246
2247 if (!hci_conn_num(hdev, ACL_LINK))
2248 return;
2249 2521
2522static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2523{
2250 if (!test_bit(HCI_RAW, &hdev->flags)) { 2524 if (!test_bit(HCI_RAW, &hdev->flags)) {
2251 /* ACL tx timeout must be longer than maximum 2525 /* ACL tx timeout must be longer than maximum
2252 * link supervision timeout (40.9 seconds) */ 2526 * link supervision timeout (40.9 seconds) */
2253 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 2527 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2528 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2254 hci_link_tx_to(hdev, ACL_LINK); 2529 hci_link_tx_to(hdev, ACL_LINK);
2255 } 2530 }
2531}
2532
2533static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2534{
2535 unsigned int cnt = hdev->acl_cnt;
2536 struct hci_chan *chan;
2537 struct sk_buff *skb;
2538 int quote;
2256 2539
2257 cnt = hdev->acl_cnt; 2540 __check_timeout(hdev, cnt);
2258 2541
2259 while (hdev->acl_cnt && 2542 while (hdev->acl_cnt &&
2260 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2543 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
@@ -2270,7 +2553,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2270 skb = skb_dequeue(&chan->data_q); 2553 skb = skb_dequeue(&chan->data_q);
2271 2554
2272 hci_conn_enter_active_mode(chan->conn, 2555 hci_conn_enter_active_mode(chan->conn,
2273 bt_cb(skb)->force_active); 2556 bt_cb(skb)->force_active);
2274 2557
2275 hci_send_frame(skb); 2558 hci_send_frame(skb);
2276 hdev->acl_last_tx = jiffies; 2559 hdev->acl_last_tx = jiffies;
@@ -2285,6 +2568,70 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2285 hci_prio_recalculate(hdev, ACL_LINK); 2568 hci_prio_recalculate(hdev, ACL_LINK);
2286} 2569}
2287 2570
2571static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2572{
2573 unsigned int cnt = hdev->block_cnt;
2574 struct hci_chan *chan;
2575 struct sk_buff *skb;
2576 int quote;
2577
2578 __check_timeout(hdev, cnt);
2579
2580 while (hdev->block_cnt > 0 &&
2581 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2582 u32 priority = (skb_peek(&chan->data_q))->priority;
2583 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2584 int blocks;
2585
2586 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2587 skb->len, skb->priority);
2588
2589 /* Stop if priority has changed */
2590 if (skb->priority < priority)
2591 break;
2592
2593 skb = skb_dequeue(&chan->data_q);
2594
2595 blocks = __get_blocks(hdev, skb);
2596 if (blocks > hdev->block_cnt)
2597 return;
2598
2599 hci_conn_enter_active_mode(chan->conn,
2600 bt_cb(skb)->force_active);
2601
2602 hci_send_frame(skb);
2603 hdev->acl_last_tx = jiffies;
2604
2605 hdev->block_cnt -= blocks;
2606 quote -= blocks;
2607
2608 chan->sent += blocks;
2609 chan->conn->sent += blocks;
2610 }
2611 }
2612
2613 if (cnt != hdev->block_cnt)
2614 hci_prio_recalculate(hdev, ACL_LINK);
2615}
2616
2617static inline void hci_sched_acl(struct hci_dev *hdev)
2618{
2619 BT_DBG("%s", hdev->name);
2620
2621 if (!hci_conn_num(hdev, ACL_LINK))
2622 return;
2623
2624 switch (hdev->flow_ctl_mode) {
2625 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2626 hci_sched_acl_pkt(hdev);
2627 break;
2628
2629 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2630 hci_sched_acl_blk(hdev);
2631 break;
2632 }
2633}
2634
2288/* Schedule SCO */ 2635/* Schedule SCO */
2289static inline void hci_sched_sco(struct hci_dev *hdev) 2636static inline void hci_sched_sco(struct hci_dev *hdev)
2290{ 2637{
@@ -2482,9 +2829,12 @@ static void hci_rx_work(struct work_struct *work)
2482 BT_DBG("%s", hdev->name); 2829 BT_DBG("%s", hdev->name);
2483 2830
2484 while ((skb = skb_dequeue(&hdev->rx_q))) { 2831 while ((skb = skb_dequeue(&hdev->rx_q))) {
2832 /* Send copy to monitor */
2833 hci_send_to_monitor(hdev, skb);
2834
2485 if (atomic_read(&hdev->promisc)) { 2835 if (atomic_read(&hdev->promisc)) {
2486 /* Send copy to the sockets */ 2836 /* Send copy to the sockets */
2487 hci_send_to_sock(hdev, skb, NULL); 2837 hci_send_to_sock(hdev, skb);
2488 } 2838 }
2489 2839
2490 if (test_bit(HCI_RAW, &hdev->flags)) { 2840 if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -2568,6 +2918,8 @@ int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2568 if (test_bit(HCI_INQUIRY, &hdev->flags)) 2918 if (test_bit(HCI_INQUIRY, &hdev->flags))
2569 return -EINPROGRESS; 2919 return -EINPROGRESS;
2570 2920
2921 inquiry_cache_flush(hdev);
2922
2571 memset(&cp, 0, sizeof(cp)); 2923 memset(&cp, 0, sizeof(cp));
2572 memcpy(&cp.lap, lap, sizeof(cp.lap)); 2924 memcpy(&cp.lap, lap, sizeof(cp.lap));
2573 cp.length = length; 2925 cp.length = length;
@@ -2584,6 +2936,3 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
2584 2936
2585 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); 2937 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2586} 2938}
2587
2588module_param(enable_hs, bool, 0644);
2589MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 001307f81057..badb7851d116 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -35,7 +35,6 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h> 38#include <net/sock.h>
40 39
41#include <asm/system.h> 40#include <asm/system.h>
@@ -45,8 +44,6 @@
45#include <net/bluetooth/bluetooth.h> 44#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 45#include <net/bluetooth/hci_core.h>
47 46
48static bool enable_le;
49
50/* Handle HCI Event packets */ 47/* Handle HCI Event packets */
51 48
52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 49static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -65,7 +62,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
65 clear_bit(HCI_INQUIRY, &hdev->flags); 62 clear_bit(HCI_INQUIRY, &hdev->flags);
66 63
67 hci_dev_lock(hdev); 64 hci_dev_lock(hdev);
68 mgmt_discovering(hdev, 0); 65 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev); 66 hci_dev_unlock(hdev);
70 67
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 68 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -195,7 +192,10 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 192
196 hci_req_complete(hdev, HCI_OP_RESET, status); 193 hci_req_complete(hdev, HCI_OP_RESET, status);
197 194
198 hdev->dev_flags = 0; 195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199} 199}
200 200
201static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 201static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -211,13 +211,14 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211 211
212 hci_dev_lock(hdev); 212 hci_dev_lock(hdev);
213 213
214 if (test_bit(HCI_MGMT, &hdev->flags)) 214 if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 mgmt_set_local_name_complete(hdev, sent, status); 215 mgmt_set_local_name_complete(hdev, sent, status);
216 216 else if (!status)
217 if (status == 0)
218 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 217 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219 218
220 hci_dev_unlock(hdev); 219 hci_dev_unlock(hdev);
220
221 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221} 222}
222 223
223static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 224static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -229,7 +230,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 if (rp->status) 230 if (rp->status)
230 return; 231 return;
231 232
232 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 233 if (test_bit(HCI_SETUP, &hdev->dev_flags))
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233} 235}
234 236
235static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 237static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -252,6 +254,9 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
252 clear_bit(HCI_AUTH, &hdev->flags); 254 clear_bit(HCI_AUTH, &hdev->flags);
253 } 255 }
254 256
257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 mgmt_auth_enable_complete(hdev, status);
259
255 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 260 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256} 261}
257 262
@@ -349,14 +354,19 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
349 354
350 BT_DBG("%s status 0x%x", hdev->name, status); 355 BT_DBG("%s status 0x%x", hdev->name, status);
351 356
352 if (status)
353 return;
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 if (!sent) 358 if (!sent)
357 return; 359 return;
358 360
359 memcpy(hdev->dev_class, sent, 3); 361 hci_dev_lock(hdev);
362
363 if (status == 0)
364 memcpy(hdev->dev_class, sent, 3);
365
366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
367 mgmt_set_class_of_dev_complete(hdev, sent, status);
368
369 hci_dev_unlock(hdev);
360} 370}
361 371
362static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 372static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -419,18 +429,6 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
419 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 429 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
420} 430}
421 431
422static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
423{
424 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->ssp_mode = rp->mode;
432}
433
434static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 432static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
435{ 433{
436 __u8 status = *((__u8 *) skb->data); 434 __u8 status = *((__u8 *) skb->data);
@@ -438,14 +436,18 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
438 436
439 BT_DBG("%s status 0x%x", hdev->name, status); 437 BT_DBG("%s status 0x%x", hdev->name, status);
440 438
441 if (status)
442 return;
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 439 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent) 440 if (!sent)
446 return; 441 return;
447 442
448 hdev->ssp_mode = *((__u8 *) sent); 443 if (test_bit(HCI_MGMT, &hdev->dev_flags))
444 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
445 else if (!status) {
446 if (*((u8 *) sent))
447 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 else
449 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
450 }
449} 451}
450 452
451static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 453static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
@@ -540,20 +542,6 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
540 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 542 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
541} 543}
542 544
543static void hci_set_le_support(struct hci_dev *hdev)
544{
545 struct hci_cp_write_le_host_supported cp;
546
547 memset(&cp, 0, sizeof(cp));
548
549 if (enable_le) {
550 cp.le = 1;
551 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
552 }
553
554 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
555}
556
557static void hci_setup(struct hci_dev *hdev) 545static void hci_setup(struct hci_dev *hdev)
558{ 546{
559 if (hdev->dev_type != HCI_BREDR) 547 if (hdev->dev_type != HCI_BREDR)
@@ -565,8 +553,18 @@ static void hci_setup(struct hci_dev *hdev)
565 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 553 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566 554
567 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 555 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
568 u8 mode = 0x01; 556 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
569 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); 557 u8 mode = 0x01;
558 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
559 sizeof(mode), &mode);
560 } else {
561 struct hci_cp_write_eir cp;
562
563 memset(hdev->eir, 0, sizeof(hdev->eir));
564 memset(&cp, 0, sizeof(cp));
565
566 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
567 }
570 } 568 }
571 569
572 if (hdev->features[3] & LMP_RSSI_INQ) 570 if (hdev->features[3] & LMP_RSSI_INQ)
@@ -579,12 +577,15 @@ static void hci_setup(struct hci_dev *hdev)
579 struct hci_cp_read_local_ext_features cp; 577 struct hci_cp_read_local_ext_features cp;
580 578
581 cp.page = 0x01; 579 cp.page = 0x01;
582 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 580 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
583 sizeof(cp), &cp); 581 &cp);
584 } 582 }
585 583
586 if (hdev->features[4] & LMP_LE) 584 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
587 hci_set_le_support(hdev); 585 u8 enable = 1;
586 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
587 &enable);
588 }
588} 589}
589 590
590static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 591static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -594,7 +595,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
594 BT_DBG("%s status 0x%x", hdev->name, rp->status); 595 BT_DBG("%s status 0x%x", hdev->name, rp->status);
595 596
596 if (rp->status) 597 if (rp->status)
597 return; 598 goto done;
598 599
599 hdev->hci_ver = rp->hci_ver; 600 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
@@ -608,6 +609,9 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
608 609
609 if (test_bit(HCI_INIT, &hdev->flags)) 610 if (test_bit(HCI_INIT, &hdev->flags))
610 hci_setup(hdev); 611 hci_setup(hdev);
612
613done:
614 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
611} 615}
612 616
613static void hci_setup_link_policy(struct hci_dev *hdev) 617static void hci_setup_link_policy(struct hci_dev *hdev)
@@ -624,8 +628,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
624 link_policy |= HCI_LP_PARK; 628 link_policy |= HCI_LP_PARK;
625 629
626 link_policy = cpu_to_le16(link_policy); 630 link_policy = cpu_to_le16(link_policy);
627 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 631 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
628 sizeof(link_policy), &link_policy); 632 &link_policy);
629} 633}
630 634
631static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 635static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -701,6 +705,22 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
701 hdev->features[6], hdev->features[7]); 705 hdev->features[6], hdev->features[7]);
702} 706}
703 707
708static void hci_set_le_support(struct hci_dev *hdev)
709{
710 struct hci_cp_write_le_host_supported cp;
711
712 memset(&cp, 0, sizeof(cp));
713
714 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
715 cp.le = 1;
716 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
717 }
718
719 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
720 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
721 &cp);
722}
723
704static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 724static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
705 struct sk_buff *skb) 725 struct sk_buff *skb)
706{ 726{
@@ -709,7 +729,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
709 BT_DBG("%s status 0x%x", hdev->name, rp->status); 729 BT_DBG("%s status 0x%x", hdev->name, rp->status);
710 730
711 if (rp->status) 731 if (rp->status)
712 return; 732 goto done;
713 733
714 switch (rp->page) { 734 switch (rp->page) {
715 case 0: 735 case 0:
@@ -720,6 +740,10 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
720 break; 740 break;
721 } 741 }
722 742
743 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
744 hci_set_le_support(hdev);
745
746done:
723 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 747 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
724} 748}
725 749
@@ -890,7 +914,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
890 914
891 hci_dev_lock(hdev); 915 hci_dev_lock(hdev);
892 916
893 if (test_bit(HCI_MGMT, &hdev->flags)) 917 if (test_bit(HCI_MGMT, &hdev->dev_flags))
894 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 918 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
895 919
896 if (rp->status != 0) 920 if (rp->status != 0)
@@ -916,7 +940,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 940
917 hci_dev_lock(hdev); 941 hci_dev_lock(hdev);
918 942
919 if (test_bit(HCI_MGMT, &hdev->flags)) 943 if (test_bit(HCI_MGMT, &hdev->dev_flags))
920 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 944 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
921 rp->status); 945 rp->status);
922 946
@@ -951,9 +975,9 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
951 975
952 hci_dev_lock(hdev); 976 hci_dev_lock(hdev);
953 977
954 if (test_bit(HCI_MGMT, &hdev->flags)) 978 if (test_bit(HCI_MGMT, &hdev->dev_flags))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, 979 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 rp->status); 980 rp->status);
957 981
958 hci_dev_unlock(hdev); 982 hci_dev_unlock(hdev);
959} 983}
@@ -967,9 +991,9 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
967 991
968 hci_dev_lock(hdev); 992 hci_dev_lock(hdev);
969 993
970 if (test_bit(HCI_MGMT, &hdev->flags)) 994 if (test_bit(HCI_MGMT, &hdev->dev_flags))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 995 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 rp->status); 996 ACL_LINK, 0, rp->status);
973 997
974 hci_dev_unlock(hdev); 998 hci_dev_unlock(hdev);
975} 999}
@@ -982,9 +1006,9 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 1006
983 hci_dev_lock(hdev); 1007 hci_dev_lock(hdev);
984 1008
985 if (test_bit(HCI_MGMT, &hdev->flags)) 1009 if (test_bit(HCI_MGMT, &hdev->dev_flags))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, 1010 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 rp->status); 1011 0, rp->status);
988 1012
989 hci_dev_unlock(hdev); 1013 hci_dev_unlock(hdev);
990} 1014}
@@ -998,9 +1022,9 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
998 1022
999 hci_dev_lock(hdev); 1023 hci_dev_lock(hdev);
1000 1024
1001 if (test_bit(HCI_MGMT, &hdev->flags)) 1025 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1026 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 rp->status); 1027 ACL_LINK, 0, rp->status);
1004 1028
1005 hci_dev_unlock(hdev); 1029 hci_dev_unlock(hdev);
1006} 1030}
@@ -1023,6 +1047,15 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1023 __u8 status = *((__u8 *) skb->data); 1047 __u8 status = *((__u8 *) skb->data);
1024 1048
1025 BT_DBG("%s status 0x%x", hdev->name, status); 1049 BT_DBG("%s status 0x%x", hdev->name, status);
1050
1051 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1052
1053 if (status) {
1054 hci_dev_lock(hdev);
1055 mgmt_start_discovery_failed(hdev, status);
1056 hci_dev_unlock(hdev);
1057 return;
1058 }
1026} 1059}
1027 1060
1028static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1061static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -1033,28 +1066,47 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1033 1066
1034 BT_DBG("%s status 0x%x", hdev->name, status); 1067 BT_DBG("%s status 0x%x", hdev->name, status);
1035 1068
1036 if (status)
1037 return;
1038
1039 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1069 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1040 if (!cp) 1070 if (!cp)
1041 return; 1071 return;
1042 1072
1043 switch (cp->enable) { 1073 switch (cp->enable) {
1044 case LE_SCANNING_ENABLED: 1074 case LE_SCANNING_ENABLED:
1075 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1076
1077 if (status) {
1078 hci_dev_lock(hdev);
1079 mgmt_start_discovery_failed(hdev, status);
1080 hci_dev_unlock(hdev);
1081 return;
1082 }
1083
1045 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1084 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1046 1085
1047 cancel_delayed_work_sync(&hdev->adv_work); 1086 cancel_delayed_work_sync(&hdev->adv_work);
1048 1087
1049 hci_dev_lock(hdev); 1088 hci_dev_lock(hdev);
1050 hci_adv_entries_clear(hdev); 1089 hci_adv_entries_clear(hdev);
1090 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1051 hci_dev_unlock(hdev); 1091 hci_dev_unlock(hdev);
1052 break; 1092 break;
1053 1093
1054 case LE_SCANNING_DISABLED: 1094 case LE_SCANNING_DISABLED:
1095 if (status)
1096 return;
1097
1055 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1098 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1056 1099
1057 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT); 1100 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1101
1102 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1103 mgmt_interleaved_discovery(hdev);
1104 } else {
1105 hci_dev_lock(hdev);
1106 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1107 hci_dev_unlock(hdev);
1108 }
1109
1058 break; 1110 break;
1059 1111
1060 default: 1112 default:
@@ -1090,16 +1142,27 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1090static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1142static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1091 struct sk_buff *skb) 1143 struct sk_buff *skb)
1092{ 1144{
1093 struct hci_cp_read_local_ext_features cp; 1145 struct hci_cp_write_le_host_supported *sent;
1094 __u8 status = *((__u8 *) skb->data); 1146 __u8 status = *((__u8 *) skb->data);
1095 1147
1096 BT_DBG("%s status 0x%x", hdev->name, status); 1148 BT_DBG("%s status 0x%x", hdev->name, status);
1097 1149
1098 if (status) 1150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1151 if (!sent)
1099 return; 1152 return;
1100 1153
1101 cp.page = 0x01; 1154 if (!status) {
1102 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); 1155 if (sent->le)
1156 hdev->host_features[0] |= LMP_HOST_LE;
1157 else
1158 hdev->host_features[0] &= ~LMP_HOST_LE;
1159 }
1160
1161 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1162 !test_bit(HCI_INIT, &hdev->flags))
1163 mgmt_le_enable_complete(hdev, sent->le, status);
1164
1165 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1103} 1166}
1104 1167
1105static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1168static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -1110,7 +1173,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1110 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1173 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1111 hci_conn_check_pending(hdev); 1174 hci_conn_check_pending(hdev);
1112 hci_dev_lock(hdev); 1175 hci_dev_lock(hdev);
1113 if (test_bit(HCI_MGMT, &hdev->flags)) 1176 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1114 mgmt_start_discovery_failed(hdev, status); 1177 mgmt_start_discovery_failed(hdev, status);
1115 hci_dev_unlock(hdev); 1178 hci_dev_unlock(hdev);
1116 return; 1179 return;
@@ -1119,7 +1182,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1119 set_bit(HCI_INQUIRY, &hdev->flags); 1182 set_bit(HCI_INQUIRY, &hdev->flags);
1120 1183
1121 hci_dev_lock(hdev); 1184 hci_dev_lock(hdev);
1122 mgmt_discovering(hdev, 1); 1185 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1123 hci_dev_unlock(hdev); 1186 hci_dev_unlock(hdev);
1124} 1187}
1125 1188
@@ -1153,7 +1216,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1153 if (!conn) { 1216 if (!conn) {
1154 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1217 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1155 if (conn) { 1218 if (conn) {
1156 conn->out = 1; 1219 conn->out = true;
1157 conn->link_mode |= HCI_LM_MASTER; 1220 conn->link_mode |= HCI_LM_MASTER;
1158 } else 1221 } else
1159 BT_ERR("No memory for new connection"); 1222 BT_ERR("No memory for new connection");
@@ -1263,7 +1326,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1263 1326
1264 /* Only request authentication for SSP connections or non-SSP 1327 /* Only request authentication for SSP connections or non-SSP
1265 * devices with sec_level HIGH or if MITM protection is requested */ 1328 * devices with sec_level HIGH or if MITM protection is requested */
1266 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1329 if (!hci_conn_ssp_enabled(conn) &&
1267 conn->pending_sec_level != BT_SECURITY_HIGH && 1330 conn->pending_sec_level != BT_SECURITY_HIGH &&
1268 !(conn->auth_type & 0x01)) 1331 !(conn->auth_type & 0x01))
1269 return 0; 1332 return 0;
@@ -1271,6 +1334,73 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1271 return 1; 1334 return 1;
1272} 1335}
1273 1336
1337static inline int hci_resolve_name(struct hci_dev *hdev,
1338 struct inquiry_entry *e)
1339{
1340 struct hci_cp_remote_name_req cp;
1341
1342 memset(&cp, 0, sizeof(cp));
1343
1344 bacpy(&cp.bdaddr, &e->data.bdaddr);
1345 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1346 cp.pscan_mode = e->data.pscan_mode;
1347 cp.clock_offset = e->data.clock_offset;
1348
1349 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1350}
1351
1352static bool hci_resolve_next_name(struct hci_dev *hdev)
1353{
1354 struct discovery_state *discov = &hdev->discovery;
1355 struct inquiry_entry *e;
1356
1357 if (list_empty(&discov->resolve))
1358 return false;
1359
1360 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1361 if (hci_resolve_name(hdev, e) == 0) {
1362 e->name_state = NAME_PENDING;
1363 return true;
1364 }
1365
1366 return false;
1367}
1368
1369static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1370 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1371{
1372 struct discovery_state *discov = &hdev->discovery;
1373 struct inquiry_entry *e;
1374
1375 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1376 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1377 name_len, conn->dev_class);
1378
1379 if (discov->state == DISCOVERY_STOPPED)
1380 return;
1381
1382 if (discov->state == DISCOVERY_STOPPING)
1383 goto discov_complete;
1384
1385 if (discov->state != DISCOVERY_RESOLVING)
1386 return;
1387
1388 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1389 if (e) {
1390 e->name_state = NAME_KNOWN;
1391 list_del(&e->list);
1392 if (name)
1393 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1394 e->data.rssi, name, name_len);
1395 }
1396
1397 if (hci_resolve_next_name(hdev))
1398 return;
1399
1400discov_complete:
1401 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1402}
1403
1274static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1404static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1275{ 1405{
1276 struct hci_cp_remote_name_req *cp; 1406 struct hci_cp_remote_name_req *cp;
@@ -1290,13 +1420,17 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1290 hci_dev_lock(hdev); 1420 hci_dev_lock(hdev);
1291 1421
1292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1423
1424 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1425 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1426
1293 if (!conn) 1427 if (!conn)
1294 goto unlock; 1428 goto unlock;
1295 1429
1296 if (!hci_outgoing_auth_needed(hdev, conn)) 1430 if (!hci_outgoing_auth_needed(hdev, conn))
1297 goto unlock; 1431 goto unlock;
1298 1432
1299 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1433 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1300 struct hci_cp_auth_requested cp; 1434 struct hci_cp_auth_requested cp;
1301 cp.handle = __cpu_to_le16(conn->handle); 1435 cp.handle = __cpu_to_le16(conn->handle);
1302 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1436 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
@@ -1413,9 +1547,9 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1413 1547
1414 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1415 if (conn) { 1549 if (conn) {
1416 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1550 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1417 1551
1418 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1552 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1419 hci_sco_setup(conn, status); 1553 hci_sco_setup(conn, status);
1420 } 1554 }
1421 1555
@@ -1440,15 +1574,37 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1440 1574
1441 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1442 if (conn) { 1576 if (conn) {
1443 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1577 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1444 1578
1445 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1579 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1446 hci_sco_setup(conn, status); 1580 hci_sco_setup(conn, status);
1447 } 1581 }
1448 1582
1449 hci_dev_unlock(hdev); 1583 hci_dev_unlock(hdev);
1450} 1584}
1451 1585
1586static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1587{
1588 struct hci_cp_disconnect *cp;
1589 struct hci_conn *conn;
1590
1591 if (!status)
1592 return;
1593
1594 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1595 if (!cp)
1596 return;
1597
1598 hci_dev_lock(hdev);
1599
1600 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1601 if (conn)
1602 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1603 conn->dst_type, status);
1604
1605 hci_dev_unlock(hdev);
1606}
1607
1452static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1608static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1453{ 1609{
1454 struct hci_cp_le_create_conn *cp; 1610 struct hci_cp_le_create_conn *cp;
@@ -1478,7 +1634,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1478 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1634 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1479 if (conn) { 1635 if (conn) {
1480 conn->dst_type = cp->peer_addr_type; 1636 conn->dst_type = cp->peer_addr_type;
1481 conn->out = 1; 1637 conn->out = true;
1482 } else { 1638 } else {
1483 BT_ERR("No memory for new connection"); 1639 BT_ERR("No memory for new connection");
1484 } 1640 }
@@ -1496,6 +1652,8 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1496static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1652static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1497{ 1653{
1498 __u8 status = *((__u8 *) skb->data); 1654 __u8 status = *((__u8 *) skb->data);
1655 struct discovery_state *discov = &hdev->discovery;
1656 struct inquiry_entry *e;
1499 1657
1500 BT_DBG("%s status %d", hdev->name, status); 1658 BT_DBG("%s status %d", hdev->name, status);
1501 1659
@@ -1506,8 +1664,28 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1506 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1664 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1507 return; 1665 return;
1508 1666
1667 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1668 return;
1669
1509 hci_dev_lock(hdev); 1670 hci_dev_lock(hdev);
1510 mgmt_discovering(hdev, 0); 1671
1672 if (discov->state != DISCOVERY_FINDING)
1673 goto unlock;
1674
1675 if (list_empty(&discov->resolve)) {
1676 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1677 goto unlock;
1678 }
1679
1680 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1681 if (e && hci_resolve_name(hdev, e) == 0) {
1682 e->name_state = NAME_PENDING;
1683 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1684 } else {
1685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1686 }
1687
1688unlock:
1511 hci_dev_unlock(hdev); 1689 hci_dev_unlock(hdev);
1512} 1690}
1513 1691
@@ -1525,6 +1703,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1525 hci_dev_lock(hdev); 1703 hci_dev_lock(hdev);
1526 1704
1527 for (; num_rsp; num_rsp--, info++) { 1705 for (; num_rsp; num_rsp--, info++) {
1706 bool name_known, ssp;
1707
1528 bacpy(&data.bdaddr, &info->bdaddr); 1708 bacpy(&data.bdaddr, &info->bdaddr);
1529 data.pscan_rep_mode = info->pscan_rep_mode; 1709 data.pscan_rep_mode = info->pscan_rep_mode;
1530 data.pscan_period_mode = info->pscan_period_mode; 1710 data.pscan_period_mode = info->pscan_period_mode;
@@ -1533,9 +1713,11 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1533 data.clock_offset = info->clock_offset; 1713 data.clock_offset = info->clock_offset;
1534 data.rssi = 0x00; 1714 data.rssi = 0x00;
1535 data.ssp_mode = 0x00; 1715 data.ssp_mode = 0x00;
1536 hci_inquiry_cache_update(hdev, &data); 1716
1717 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1537 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1718 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1538 info->dev_class, 0, NULL); 1719 info->dev_class, 0, !name_known, ssp, NULL,
1720 0);
1539 } 1721 }
1540 1722
1541 hci_dev_unlock(hdev); 1723 hci_dev_unlock(hdev);
@@ -1569,8 +1751,6 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1569 conn->state = BT_CONFIG; 1751 conn->state = BT_CONFIG;
1570 hci_conn_hold(conn); 1752 hci_conn_hold(conn);
1571 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1753 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1572 mgmt_connected(hdev, &ev->bdaddr, conn->type,
1573 conn->dst_type);
1574 } else 1754 } else
1575 conn->state = BT_CONNECTED; 1755 conn->state = BT_CONNECTED;
1576 1756
@@ -1588,7 +1768,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1588 struct hci_cp_read_remote_features cp; 1768 struct hci_cp_read_remote_features cp;
1589 cp.handle = ev->handle; 1769 cp.handle = ev->handle;
1590 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1770 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1591 sizeof(cp), &cp); 1771 sizeof(cp), &cp);
1592 } 1772 }
1593 1773
1594 /* Set packet type for incoming connection */ 1774 /* Set packet type for incoming connection */
@@ -1596,14 +1776,14 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1596 struct hci_cp_change_conn_ptype cp; 1776 struct hci_cp_change_conn_ptype cp;
1597 cp.handle = ev->handle; 1777 cp.handle = ev->handle;
1598 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1778 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1599 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1779 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1600 sizeof(cp), &cp); 1780 &cp);
1601 } 1781 }
1602 } else { 1782 } else {
1603 conn->state = BT_CLOSED; 1783 conn->state = BT_CLOSED;
1604 if (conn->type == ACL_LINK) 1784 if (conn->type == ACL_LINK)
1605 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, 1785 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1606 conn->dst_type, ev->status); 1786 conn->dst_type, ev->status);
1607 } 1787 }
1608 1788
1609 if (conn->type == ACL_LINK) 1789 if (conn->type == ACL_LINK)
@@ -1668,8 +1848,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1668 else 1848 else
1669 cp.role = 0x01; /* Remain slave */ 1849 cp.role = 0x01; /* Remain slave */
1670 1850
1671 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, 1851 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1672 sizeof(cp), &cp); 1852 &cp);
1673 } else { 1853 } else {
1674 struct hci_cp_accept_sync_conn_req cp; 1854 struct hci_cp_accept_sync_conn_req cp;
1675 1855
@@ -1683,7 +1863,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1683 cp.retrans_effort = 0xff; 1863 cp.retrans_effort = 0xff;
1684 1864
1685 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1865 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1686 sizeof(cp), &cp); 1866 sizeof(cp), &cp);
1687 } 1867 }
1688 } else { 1868 } else {
1689 /* Connection rejected */ 1869 /* Connection rejected */
@@ -1711,12 +1891,14 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1711 if (ev->status == 0) 1891 if (ev->status == 0)
1712 conn->state = BT_CLOSED; 1892 conn->state = BT_CLOSED;
1713 1893
1714 if (conn->type == ACL_LINK || conn->type == LE_LINK) { 1894 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1895 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1715 if (ev->status != 0) 1896 if (ev->status != 0)
1716 mgmt_disconnect_failed(hdev, &conn->dst, ev->status); 1897 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1898 conn->dst_type, ev->status);
1717 else 1899 else
1718 mgmt_disconnected(hdev, &conn->dst, conn->type, 1900 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1719 conn->dst_type); 1901 conn->dst_type);
1720 } 1902 }
1721 1903
1722 if (ev->status == 0) { 1904 if (ev->status == 0) {
@@ -1742,22 +1924,23 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1742 goto unlock; 1924 goto unlock;
1743 1925
1744 if (!ev->status) { 1926 if (!ev->status) {
1745 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && 1927 if (!hci_conn_ssp_enabled(conn) &&
1746 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) { 1928 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1747 BT_INFO("re-auth of legacy device is not possible."); 1929 BT_INFO("re-auth of legacy device is not possible.");
1748 } else { 1930 } else {
1749 conn->link_mode |= HCI_LM_AUTH; 1931 conn->link_mode |= HCI_LM_AUTH;
1750 conn->sec_level = conn->pending_sec_level; 1932 conn->sec_level = conn->pending_sec_level;
1751 } 1933 }
1752 } else { 1934 } else {
1753 mgmt_auth_failed(hdev, &conn->dst, ev->status); 1935 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1936 ev->status);
1754 } 1937 }
1755 1938
1756 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1939 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1757 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 1940 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1758 1941
1759 if (conn->state == BT_CONFIG) { 1942 if (conn->state == BT_CONFIG) {
1760 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { 1943 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1761 struct hci_cp_set_conn_encrypt cp; 1944 struct hci_cp_set_conn_encrypt cp;
1762 cp.handle = ev->handle; 1945 cp.handle = ev->handle;
1763 cp.encrypt = 0x01; 1946 cp.encrypt = 0x01;
@@ -1776,7 +1959,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1776 hci_conn_put(conn); 1959 hci_conn_put(conn);
1777 } 1960 }
1778 1961
1779 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1962 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1780 if (!ev->status) { 1963 if (!ev->status) {
1781 struct hci_cp_set_conn_encrypt cp; 1964 struct hci_cp_set_conn_encrypt cp;
1782 cp.handle = ev->handle; 1965 cp.handle = ev->handle;
@@ -1784,7 +1967,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1784 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1967 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1785 &cp); 1968 &cp);
1786 } else { 1969 } else {
1787 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1970 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1788 hci_encrypt_cfm(conn, ev->status, 0x00); 1971 hci_encrypt_cfm(conn, ev->status, 0x00);
1789 } 1972 }
1790 } 1973 }
@@ -1804,17 +1987,25 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
1804 1987
1805 hci_dev_lock(hdev); 1988 hci_dev_lock(hdev);
1806 1989
1807 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1808 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1809
1810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1991
1992 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1993 goto check_auth;
1994
1995 if (ev->status == 0)
1996 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1997 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1998 else
1999 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2000
2001check_auth:
1811 if (!conn) 2002 if (!conn)
1812 goto unlock; 2003 goto unlock;
1813 2004
1814 if (!hci_outgoing_auth_needed(hdev, conn)) 2005 if (!hci_outgoing_auth_needed(hdev, conn))
1815 goto unlock; 2006 goto unlock;
1816 2007
1817 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 2008 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1818 struct hci_cp_auth_requested cp; 2009 struct hci_cp_auth_requested cp;
1819 cp.handle = __cpu_to_le16(conn->handle); 2010 cp.handle = __cpu_to_le16(conn->handle);
1820 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2011 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
@@ -1845,7 +2036,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1845 conn->link_mode &= ~HCI_LM_ENCRYPT; 2036 conn->link_mode &= ~HCI_LM_ENCRYPT;
1846 } 2037 }
1847 2038
1848 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 2039 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1849 2040
1850 if (conn->state == BT_CONFIG) { 2041 if (conn->state == BT_CONFIG) {
1851 if (!ev->status) 2042 if (!ev->status)
@@ -1874,7 +2065,7 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
1874 if (!ev->status) 2065 if (!ev->status)
1875 conn->link_mode |= HCI_LM_SECURE; 2066 conn->link_mode |= HCI_LM_SECURE;
1876 2067
1877 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 2068 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1878 2069
1879 hci_key_change_cfm(conn, ev->status); 2070 hci_key_change_cfm(conn, ev->status);
1880 } 2071 }
@@ -1916,7 +2107,10 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1916 bacpy(&cp.bdaddr, &conn->dst); 2107 bacpy(&cp.bdaddr, &conn->dst);
1917 cp.pscan_rep_mode = 0x02; 2108 cp.pscan_rep_mode = 0x02;
1918 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2109 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1919 } 2110 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2111 mgmt_device_connected(hdev, &conn->dst, conn->type,
2112 conn->dst_type, 0, NULL, 0,
2113 conn->dev_class);
1920 2114
1921 if (!hci_outgoing_auth_needed(hdev, conn)) { 2115 if (!hci_outgoing_auth_needed(hdev, conn)) {
1922 conn->state = BT_CONNECTED; 2116 conn->state = BT_CONNECTED;
@@ -2024,10 +2218,6 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2024 hci_cc_host_buffer_size(hdev, skb); 2218 hci_cc_host_buffer_size(hdev, skb);
2025 break; 2219 break;
2026 2220
2027 case HCI_OP_READ_SSP_MODE:
2028 hci_cc_read_ssp_mode(hdev, skb);
2029 break;
2030
2031 case HCI_OP_WRITE_SSP_MODE: 2221 case HCI_OP_WRITE_SSP_MODE:
2032 hci_cc_write_ssp_mode(hdev, skb); 2222 hci_cc_write_ssp_mode(hdev, skb);
2033 break; 2223 break;
@@ -2213,8 +2403,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2213 break; 2403 break;
2214 2404
2215 case HCI_OP_DISCONNECT: 2405 case HCI_OP_DISCONNECT:
2216 if (ev->status != 0) 2406 hci_cs_disconnect(hdev, ev->status);
2217 mgmt_disconnect_failed(hdev, NULL, ev->status);
2218 break; 2407 break;
2219 2408
2220 case HCI_OP_LE_CREATE_CONN: 2409 case HCI_OP_LE_CREATE_CONN:
@@ -2258,7 +2447,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2258 conn->link_mode |= HCI_LM_MASTER; 2447 conn->link_mode |= HCI_LM_MASTER;
2259 } 2448 }
2260 2449
2261 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 2450 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2262 2451
2263 hci_role_switch_cfm(conn, ev->status, ev->role); 2452 hci_role_switch_cfm(conn, ev->status, ev->role);
2264 } 2453 }
@@ -2332,6 +2521,56 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2332 queue_work(hdev->workqueue, &hdev->tx_work); 2521 queue_work(hdev->workqueue, &hdev->tx_work);
2333} 2522}
2334 2523
2524static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2525 struct sk_buff *skb)
2526{
2527 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2528 int i;
2529
2530 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2531 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2532 return;
2533 }
2534
2535 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2536 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2537 BT_DBG("%s bad parameters", hdev->name);
2538 return;
2539 }
2540
2541 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2542 ev->num_hndl);
2543
2544 for (i = 0; i < ev->num_hndl; i++) {
2545 struct hci_comp_blocks_info *info = &ev->handles[i];
2546 struct hci_conn *conn;
2547 __u16 handle, block_count;
2548
2549 handle = __le16_to_cpu(info->handle);
2550 block_count = __le16_to_cpu(info->blocks);
2551
2552 conn = hci_conn_hash_lookup_handle(hdev, handle);
2553 if (!conn)
2554 continue;
2555
2556 conn->sent -= block_count;
2557
2558 switch (conn->type) {
2559 case ACL_LINK:
2560 hdev->block_cnt += block_count;
2561 if (hdev->block_cnt > hdev->num_blocks)
2562 hdev->block_cnt = hdev->num_blocks;
2563 break;
2564
2565 default:
2566 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2567 break;
2568 }
2569 }
2570
2571 queue_work(hdev->workqueue, &hdev->tx_work);
2572}
2573
2335static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2574static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2336{ 2575{
2337 struct hci_ev_mode_change *ev = (void *) skb->data; 2576 struct hci_ev_mode_change *ev = (void *) skb->data;
@@ -2346,14 +2585,14 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2346 conn->mode = ev->mode; 2585 conn->mode = ev->mode;
2347 conn->interval = __le16_to_cpu(ev->interval); 2586 conn->interval = __le16_to_cpu(ev->interval);
2348 2587
2349 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 2588 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2350 if (conn->mode == HCI_CM_ACTIVE) 2589 if (conn->mode == HCI_CM_ACTIVE)
2351 conn->power_save = 1; 2590 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2352 else 2591 else
2353 conn->power_save = 0; 2592 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2354 } 2593 }
2355 2594
2356 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 2595 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2357 hci_sco_setup(conn, ev->status); 2596 hci_sco_setup(conn, ev->status);
2358 } 2597 }
2359 2598
@@ -2379,10 +2618,10 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2379 hci_conn_put(conn); 2618 hci_conn_put(conn);
2380 } 2619 }
2381 2620
2382 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2621 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2383 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2622 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2384 sizeof(ev->bdaddr), &ev->bdaddr); 2623 sizeof(ev->bdaddr), &ev->bdaddr);
2385 else if (test_bit(HCI_MGMT, &hdev->flags)) { 2624 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2386 u8 secure; 2625 u8 secure;
2387 2626
2388 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2627 if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -2406,7 +2645,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2406 2645
2407 BT_DBG("%s", hdev->name); 2646 BT_DBG("%s", hdev->name);
2408 2647
2409 if (!test_bit(HCI_LINK_KEYS, &hdev->flags)) 2648 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2410 return; 2649 return;
2411 2650
2412 hci_dev_lock(hdev); 2651 hci_dev_lock(hdev);
@@ -2421,7 +2660,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2421 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2660 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2422 batostr(&ev->bdaddr)); 2661 batostr(&ev->bdaddr));
2423 2662
2424 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && 2663 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2425 key->type == HCI_LK_DEBUG_COMBINATION) { 2664 key->type == HCI_LK_DEBUG_COMBINATION) {
2426 BT_DBG("%s ignoring debug key", hdev->name); 2665 BT_DBG("%s ignoring debug key", hdev->name);
2427 goto not_found; 2666 goto not_found;
@@ -2483,7 +2722,7 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2483 hci_conn_put(conn); 2722 hci_conn_put(conn);
2484 } 2723 }
2485 2724
2486 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2725 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2487 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2726 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2488 ev->key_type, pin_len); 2727 ev->key_type, pin_len);
2489 2728
@@ -2551,6 +2790,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2551{ 2790{
2552 struct inquiry_data data; 2791 struct inquiry_data data;
2553 int num_rsp = *((__u8 *) skb->data); 2792 int num_rsp = *((__u8 *) skb->data);
2793 bool name_known, ssp;
2554 2794
2555 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2795 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2556 2796
@@ -2572,10 +2812,12 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2572 data.clock_offset = info->clock_offset; 2812 data.clock_offset = info->clock_offset;
2573 data.rssi = info->rssi; 2813 data.rssi = info->rssi;
2574 data.ssp_mode = 0x00; 2814 data.ssp_mode = 0x00;
2575 hci_inquiry_cache_update(hdev, &data); 2815
2816 name_known = hci_inquiry_cache_update(hdev, &data,
2817 false, &ssp);
2576 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2818 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2577 info->dev_class, info->rssi, 2819 info->dev_class, info->rssi,
2578 NULL); 2820 !name_known, ssp, NULL, 0);
2579 } 2821 }
2580 } else { 2822 } else {
2581 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2823 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -2589,10 +2831,11 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2589 data.clock_offset = info->clock_offset; 2831 data.clock_offset = info->clock_offset;
2590 data.rssi = info->rssi; 2832 data.rssi = info->rssi;
2591 data.ssp_mode = 0x00; 2833 data.ssp_mode = 0x00;
2592 hci_inquiry_cache_update(hdev, &data); 2834 name_known = hci_inquiry_cache_update(hdev, &data,
2835 false, &ssp);
2593 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2836 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2594 info->dev_class, info->rssi, 2837 info->dev_class, info->rssi,
2595 NULL); 2838 !name_known, ssp, NULL, 0);
2596 } 2839 }
2597 } 2840 }
2598 2841
@@ -2617,9 +2860,10 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
2617 2860
2618 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2861 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2619 if (ie) 2862 if (ie)
2620 ie->data.ssp_mode = (ev->features[0] & 0x01); 2863 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2621 2864
2622 conn->ssp_mode = (ev->features[0] & 0x01); 2865 if (ev->features[0] & LMP_HOST_SSP)
2866 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2623 } 2867 }
2624 2868
2625 if (conn->state != BT_CONFIG) 2869 if (conn->state != BT_CONFIG)
@@ -2631,7 +2875,10 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
2631 bacpy(&cp.bdaddr, &conn->dst); 2875 bacpy(&cp.bdaddr, &conn->dst);
2632 cp.pscan_rep_mode = 0x02; 2876 cp.pscan_rep_mode = 0x02;
2633 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2877 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2634 } 2878 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2879 mgmt_device_connected(hdev, &conn->dst, conn->type,
2880 conn->dst_type, 0, NULL, 0,
2881 conn->dev_class);
2635 2882
2636 if (!hci_outgoing_auth_needed(hdev, conn)) { 2883 if (!hci_outgoing_auth_needed(hdev, conn)) {
2637 conn->state = BT_CONNECTED; 2884 conn->state = BT_CONNECTED;
@@ -2724,6 +2971,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2724 hci_dev_lock(hdev); 2971 hci_dev_lock(hdev);
2725 2972
2726 for (; num_rsp; num_rsp--, info++) { 2973 for (; num_rsp; num_rsp--, info++) {
2974 bool name_known, ssp;
2975
2727 bacpy(&data.bdaddr, &info->bdaddr); 2976 bacpy(&data.bdaddr, &info->bdaddr);
2728 data.pscan_rep_mode = info->pscan_rep_mode; 2977 data.pscan_rep_mode = info->pscan_rep_mode;
2729 data.pscan_period_mode = info->pscan_period_mode; 2978 data.pscan_period_mode = info->pscan_period_mode;
@@ -2732,9 +2981,19 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2732 data.clock_offset = info->clock_offset; 2981 data.clock_offset = info->clock_offset;
2733 data.rssi = info->rssi; 2982 data.rssi = info->rssi;
2734 data.ssp_mode = 0x01; 2983 data.ssp_mode = 0x01;
2735 hci_inquiry_cache_update(hdev, &data); 2984
2985 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2986 name_known = eir_has_data_type(info->data,
2987 sizeof(info->data),
2988 EIR_NAME_COMPLETE);
2989 else
2990 name_known = true;
2991
2992 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2993 &ssp);
2736 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2994 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2737 info->dev_class, info->rssi, info->data); 2995 info->dev_class, info->rssi, !name_known,
2996 ssp, info->data, sizeof(info->data));
2738 } 2997 }
2739 2998
2740 hci_dev_unlock(hdev); 2999 hci_dev_unlock(hdev);
@@ -2774,19 +3033,22 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2774 3033
2775 hci_conn_hold(conn); 3034 hci_conn_hold(conn);
2776 3035
2777 if (!test_bit(HCI_MGMT, &hdev->flags)) 3036 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2778 goto unlock; 3037 goto unlock;
2779 3038
2780 if (test_bit(HCI_PAIRABLE, &hdev->flags) || 3039 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2781 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3040 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2782 struct hci_cp_io_capability_reply cp; 3041 struct hci_cp_io_capability_reply cp;
2783 3042
2784 bacpy(&cp.bdaddr, &ev->bdaddr); 3043 bacpy(&cp.bdaddr, &ev->bdaddr);
2785 cp.capability = conn->io_capability; 3044 /* Change the IO capability from KeyboardDisplay
3045 * to DisplayYesNo as it is not supported by BT spec. */
3046 cp.capability = (conn->io_capability == 0x04) ?
3047 0x01 : conn->io_capability;
2786 conn->auth_type = hci_get_auth_req(conn); 3048 conn->auth_type = hci_get_auth_req(conn);
2787 cp.authentication = conn->auth_type; 3049 cp.authentication = conn->auth_type;
2788 3050
2789 if ((conn->out == 0x01 || conn->remote_oob == 0x01) && 3051 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
2790 hci_find_remote_oob_data(hdev, &conn->dst)) 3052 hci_find_remote_oob_data(hdev, &conn->dst))
2791 cp.oob_data = 0x01; 3053 cp.oob_data = 0x01;
2792 else 3054 else
@@ -2822,8 +3084,9 @@ static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *s
2822 goto unlock; 3084 goto unlock;
2823 3085
2824 conn->remote_cap = ev->capability; 3086 conn->remote_cap = ev->capability;
2825 conn->remote_oob = ev->oob_data;
2826 conn->remote_auth = ev->authentication; 3087 conn->remote_auth = ev->authentication;
3088 if (ev->oob_data)
3089 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
2827 3090
2828unlock: 3091unlock:
2829 hci_dev_unlock(hdev); 3092 hci_dev_unlock(hdev);
@@ -2840,7 +3103,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2840 3103
2841 hci_dev_lock(hdev); 3104 hci_dev_lock(hdev);
2842 3105
2843 if (!test_bit(HCI_MGMT, &hdev->flags)) 3106 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2844 goto unlock; 3107 goto unlock;
2845 3108
2846 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -2869,7 +3132,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2869 /* If we're not the initiators request authorization to 3132 /* If we're not the initiators request authorization to
2870 * proceed from user space (mgmt_user_confirm with 3133 * proceed from user space (mgmt_user_confirm with
2871 * confirm_hint set to 1). */ 3134 * confirm_hint set to 1). */
2872 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 3135 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2873 BT_DBG("Confirming auto-accept as acceptor"); 3136 BT_DBG("Confirming auto-accept as acceptor");
2874 confirm_hint = 1; 3137 confirm_hint = 1;
2875 goto confirm; 3138 goto confirm;
@@ -2890,8 +3153,8 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2890 } 3153 }
2891 3154
2892confirm: 3155confirm:
2893 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey, 3156 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
2894 confirm_hint); 3157 confirm_hint);
2895 3158
2896unlock: 3159unlock:
2897 hci_dev_unlock(hdev); 3160 hci_dev_unlock(hdev);
@@ -2906,8 +3169,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
2906 3169
2907 hci_dev_lock(hdev); 3170 hci_dev_lock(hdev);
2908 3171
2909 if (test_bit(HCI_MGMT, &hdev->flags)) 3172 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2910 mgmt_user_passkey_request(hdev, &ev->bdaddr); 3173 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
2911 3174
2912 hci_dev_unlock(hdev); 3175 hci_dev_unlock(hdev);
2913} 3176}
@@ -2930,8 +3193,9 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
2930 * initiated the authentication. A traditional auth_complete 3193 * initiated the authentication. A traditional auth_complete
2931 * event gets always produced as initiator and is also mapped to 3194 * event gets always produced as initiator and is also mapped to
2932 * the mgmt_auth_failed event */ 3195 * the mgmt_auth_failed event */
2933 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 3196 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
2934 mgmt_auth_failed(hdev, &conn->dst, ev->status); 3197 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3198 ev->status);
2935 3199
2936 hci_conn_put(conn); 3200 hci_conn_put(conn);
2937 3201
@@ -2950,13 +3214,13 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
2950 3214
2951 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3215 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2952 if (ie) 3216 if (ie)
2953 ie->data.ssp_mode = (ev->features[0] & 0x01); 3217 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2954 3218
2955 hci_dev_unlock(hdev); 3219 hci_dev_unlock(hdev);
2956} 3220}
2957 3221
2958static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3222static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2959 struct sk_buff *skb) 3223 struct sk_buff *skb)
2960{ 3224{
2961 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3225 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2962 struct oob_data *data; 3226 struct oob_data *data;
@@ -2965,7 +3229,7 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2965 3229
2966 hci_dev_lock(hdev); 3230 hci_dev_lock(hdev);
2967 3231
2968 if (!test_bit(HCI_MGMT, &hdev->flags)) 3232 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2969 goto unlock; 3233 goto unlock;
2970 3234
2971 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3235 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
@@ -3020,7 +3284,9 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3020 goto unlock; 3284 goto unlock;
3021 } 3285 }
3022 3286
3023 mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type); 3287 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3288 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3289 conn->dst_type, 0, NULL, 0, NULL);
3024 3290
3025 conn->sec_level = BT_SECURITY_LOW; 3291 conn->sec_level = BT_SECURITY_LOW;
3026 conn->handle = __le16_to_cpu(ev->handle); 3292 conn->handle = __le16_to_cpu(ev->handle);
@@ -3040,6 +3306,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3040{ 3306{
3041 u8 num_reports = skb->data[0]; 3307 u8 num_reports = skb->data[0];
3042 void *ptr = &skb->data[1]; 3308 void *ptr = &skb->data[1];
3309 s8 rssi;
3043 3310
3044 hci_dev_lock(hdev); 3311 hci_dev_lock(hdev);
3045 3312
@@ -3048,6 +3315,10 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3048 3315
3049 hci_add_adv_entry(hdev, ev); 3316 hci_add_adv_entry(hdev, ev);
3050 3317
3318 rssi = ev->data[ev->length];
3319 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3320 NULL, rssi, 0, 1, ev->data, ev->length);
3321
3051 ptr += sizeof(*ev) + ev->length + 1; 3322 ptr += sizeof(*ev) + ev->length + 1;
3052 } 3323 }
3053 3324
@@ -3061,7 +3332,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3061 struct hci_cp_le_ltk_reply cp; 3332 struct hci_cp_le_ltk_reply cp;
3062 struct hci_cp_le_ltk_neg_reply neg; 3333 struct hci_cp_le_ltk_neg_reply neg;
3063 struct hci_conn *conn; 3334 struct hci_conn *conn;
3064 struct link_key *ltk; 3335 struct smp_ltk *ltk;
3065 3336
3066 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); 3337 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3067 3338
@@ -3077,10 +3348,17 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3077 3348
3078 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 3349 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3079 cp.handle = cpu_to_le16(conn->handle); 3350 cp.handle = cpu_to_le16(conn->handle);
3080 conn->pin_length = ltk->pin_len; 3351
3352 if (ltk->authenticated)
3353 conn->sec_level = BT_SECURITY_HIGH;
3081 3354
3082 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3355 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3083 3356
3357 if (ltk->type & HCI_SMP_STK) {
3358 list_del(&ltk->list);
3359 kfree(ltk);
3360 }
3361
3084 hci_dev_unlock(hdev); 3362 hci_dev_unlock(hdev);
3085 3363
3086 return; 3364 return;
@@ -3271,6 +3549,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3271 hci_remote_oob_data_request_evt(hdev, skb); 3549 hci_remote_oob_data_request_evt(hdev, skb);
3272 break; 3550 break;
3273 3551
3552 case HCI_EV_NUM_COMP_BLOCKS:
3553 hci_num_comp_blocks_evt(hdev, skb);
3554 break;
3555
3274 default: 3556 default:
3275 BT_DBG("%s event 0x%x", hdev->name, event); 3557 BT_DBG("%s event 0x%x", hdev->name, event);
3276 break; 3558 break;
@@ -3279,34 +3561,3 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3279 kfree_skb(skb); 3561 kfree_skb(skb);
3280 hdev->stat.evt_rx++; 3562 hdev->stat.evt_rx++;
3281} 3563}
3282
3283/* Generate internal stack event */
3284void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3285{
3286 struct hci_event_hdr *hdr;
3287 struct hci_ev_stack_internal *ev;
3288 struct sk_buff *skb;
3289
3290 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3291 if (!skb)
3292 return;
3293
3294 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3295 hdr->evt = HCI_EV_STACK_INTERNAL;
3296 hdr->plen = sizeof(*ev) + dlen;
3297
3298 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3299 ev->type = type;
3300 memcpy(ev->data, data, dlen);
3301
3302 bt_cb(skb)->incoming = 1;
3303 __net_timestamp(skb);
3304
3305 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3306 skb->dev = (void *) hdev;
3307 hci_send_to_sock(hdev, skb, NULL);
3308 kfree_skb(skb);
3309}
3310
3311module_param(enable_le, bool, 0644);
3312MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 0dcc96266779..63afd234283e 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -48,8 +48,9 @@
48 48
49#include <net/bluetooth/bluetooth.h> 49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 50#include <net/bluetooth/hci_core.h>
51#include <net/bluetooth/hci_mon.h>
51 52
52static bool enable_mgmt; 53static atomic_t monitor_promisc = ATOMIC_INIT(0);
53 54
54/* ----- HCI socket interface ----- */ 55/* ----- HCI socket interface ----- */
55 56
@@ -85,22 +86,20 @@ static struct bt_sock_list hci_sk_list = {
85}; 86};
86 87
87/* Send frame to RAW socket */ 88/* Send frame to RAW socket */
88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, 89void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
89 struct sock *skip_sk)
90{ 90{
91 struct sock *sk; 91 struct sock *sk;
92 struct hlist_node *node; 92 struct hlist_node *node;
93 struct sk_buff *skb_copy = NULL;
93 94
94 BT_DBG("hdev %p len %d", hdev, skb->len); 95 BT_DBG("hdev %p len %d", hdev, skb->len);
95 96
96 read_lock(&hci_sk_list.lock); 97 read_lock(&hci_sk_list.lock);
98
97 sk_for_each(sk, node, &hci_sk_list.head) { 99 sk_for_each(sk, node, &hci_sk_list.head) {
98 struct hci_filter *flt; 100 struct hci_filter *flt;
99 struct sk_buff *nskb; 101 struct sk_buff *nskb;
100 102
101 if (sk == skip_sk)
102 continue;
103
104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 103 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
105 continue; 104 continue;
106 105
@@ -108,12 +107,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
108 if (skb->sk == sk) 107 if (skb->sk == sk)
109 continue; 108 continue;
110 109
111 if (bt_cb(skb)->channel != hci_pi(sk)->channel) 110 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
112 continue; 111 continue;
113 112
114 if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
115 goto clone;
116
117 /* Apply filter */ 113 /* Apply filter */
118 flt = &hci_pi(sk)->filter; 114 flt = &hci_pi(sk)->filter;
119 115
@@ -137,21 +133,303 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
137 continue; 133 continue;
138 } 134 }
139 135
140clone: 136 if (!skb_copy) {
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 if (!skb_copy)
140 continue;
141
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 }
145
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
147 if (!nskb)
148 continue;
149
150 if (sock_queue_rcv_skb(sk, nskb))
151 kfree_skb(nskb);
152 }
153
154 read_unlock(&hci_sk_list.lock);
155
156 kfree_skb(skb_copy);
157}
158
159/* Send frame to control socket */
160void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161{
162 struct sock *sk;
163 struct hlist_node *node;
164
165 BT_DBG("len %d", skb->len);
166
167 read_lock(&hci_sk_list.lock);
168
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
171
172 /* Skip the original socket */
173 if (sk == skip_sk)
174 continue;
175
176 if (sk->sk_state != BT_BOUND)
177 continue;
178
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 continue;
181
141 nskb = skb_clone(skb, GFP_ATOMIC); 182 nskb = skb_clone(skb, GFP_ATOMIC);
142 if (!nskb) 183 if (!nskb)
143 continue; 184 continue;
144 185
145 /* Put type byte before the data */ 186 if (sock_queue_rcv_skb(sk, nskb))
146 if (bt_cb(skb)->channel == HCI_CHANNEL_RAW) 187 kfree_skb(nskb);
147 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); 188 }
189
190 read_unlock(&hci_sk_list.lock);
191}
192
193/* Send frame to monitor socket */
194void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195{
196 struct sock *sk;
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
199 __le16 opcode;
200
201 if (!atomic_read(&monitor_promisc))
202 return;
203
204 BT_DBG("hdev %p len %d", hdev, skb->len);
205
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 break;
210 case HCI_EVENT_PKT:
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 break;
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 else
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 break;
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 else
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 break;
225 default:
226 return;
227 }
228
229 read_lock(&hci_sk_list.lock);
230
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 continue;
239
240 if (!skb_copy) {
241 struct hci_mon_hdr *hdr;
242
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
245 if (!skb_copy)
246 continue;
247
248 /* Put header before the data */
249 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 hdr->opcode = opcode;
251 hdr->index = cpu_to_le16(hdev->id);
252 hdr->len = cpu_to_le16(skb->len);
253 }
254
255 nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 if (!nskb)
257 continue;
258
259 if (sock_queue_rcv_skb(sk, nskb))
260 kfree_skb(nskb);
261 }
262
263 read_unlock(&hci_sk_list.lock);
264
265 kfree_skb(skb_copy);
266}
267
268static void send_monitor_event(struct sk_buff *skb)
269{
270 struct sock *sk;
271 struct hlist_node *node;
272
273 BT_DBG("len %d", skb->len);
274
275 read_lock(&hci_sk_list.lock);
276
277 sk_for_each(sk, node, &hci_sk_list.head) {
278 struct sk_buff *nskb;
279
280 if (sk->sk_state != BT_BOUND)
281 continue;
282
283 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
284 continue;
285
286 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (!nskb)
288 continue;
148 289
149 if (sock_queue_rcv_skb(sk, nskb)) 290 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb); 291 kfree_skb(nskb);
151 } 292 }
293
152 read_unlock(&hci_sk_list.lock); 294 read_unlock(&hci_sk_list.lock);
153} 295}
154 296
297static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298{
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
303
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
315
316 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 break;
318
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
325 break;
326
327 default:
328 return NULL;
329 }
330
331 __net_timestamp(skb);
332
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338 return skb;
339}
340
341static void send_monitor_replay(struct sock *sk)
342{
343 struct hci_dev *hdev;
344
345 read_lock(&hci_dev_list_lock);
346
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
349
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
353
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
356 }
357
358 read_unlock(&hci_dev_list_lock);
359}
360
361/* Generate internal stack event */
362static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363{
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
367
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
375
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
379
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
382
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 skb->dev = (void *) hdev;
385 hci_send_to_sock(hdev, skb);
386 kfree_skb(skb);
387}
388
389void hci_sock_dev_event(struct hci_dev *hdev, int event)
390{
391 struct hci_ev_si_device ev;
392
393 BT_DBG("hdev %s event %d", hdev->name, event);
394
395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
397 struct sk_buff *skb;
398
399 skb = create_monitor_event(hdev, event);
400 if (skb) {
401 send_monitor_event(skb);
402 kfree_skb(skb);
403 }
404 }
405
406 /* Send event to sockets */
407 ev.event = event;
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410
411 if (event == HCI_DEV_UNREG) {
412 struct sock *sk;
413 struct hlist_node *node;
414
415 /* Detach sockets from device */
416 read_lock(&hci_sk_list.lock);
417 sk_for_each(sk, node, &hci_sk_list.head) {
418 bh_lock_sock_nested(sk);
419 if (hci_pi(sk)->hdev == hdev) {
420 hci_pi(sk)->hdev = NULL;
421 sk->sk_err = EPIPE;
422 sk->sk_state = BT_OPEN;
423 sk->sk_state_change(sk);
424
425 hci_dev_put(hdev);
426 }
427 bh_unlock_sock(sk);
428 }
429 read_unlock(&hci_sk_list.lock);
430 }
431}
432
155static int hci_sock_release(struct socket *sock) 433static int hci_sock_release(struct socket *sock)
156{ 434{
157 struct sock *sk = sock->sk; 435 struct sock *sk = sock->sk;
@@ -164,6 +442,9 @@ static int hci_sock_release(struct socket *sock)
164 442
165 hdev = hci_pi(sk)->hdev; 443 hdev = hci_pi(sk)->hdev;
166 444
445 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 atomic_dec(&monitor_promisc);
447
167 bt_sock_unlink(&hci_sk_list, sk); 448 bt_sock_unlink(&hci_sk_list, sk);
168 449
169 if (hdev) { 450 if (hdev) {
@@ -190,7 +471,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
190 471
191 hci_dev_lock(hdev); 472 hci_dev_lock(hdev);
192 473
193 err = hci_blacklist_add(hdev, &bdaddr); 474 err = hci_blacklist_add(hdev, &bdaddr, 0);
194 475
195 hci_dev_unlock(hdev); 476 hci_dev_unlock(hdev);
196 477
@@ -207,7 +488,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
207 488
208 hci_dev_lock(hdev); 489 hci_dev_lock(hdev);
209 490
210 err = hci_blacklist_del(hdev, &bdaddr); 491 err = hci_blacklist_del(hdev, &bdaddr, 0);
211 492
212 hci_dev_unlock(hdev); 493 hci_dev_unlock(hdev);
213 494
@@ -340,34 +621,69 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
340 if (haddr.hci_family != AF_BLUETOOTH) 621 if (haddr.hci_family != AF_BLUETOOTH)
341 return -EINVAL; 622 return -EINVAL;
342 623
343 if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
344 return -EINVAL;
345
346 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
347 if (!enable_mgmt)
348 return -EINVAL;
349 set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
350 }
351
352 lock_sock(sk); 624 lock_sock(sk);
353 625
354 if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) { 626 if (sk->sk_state == BT_BOUND) {
355 err = -EALREADY; 627 err = -EALREADY;
356 goto done; 628 goto done;
357 } 629 }
358 630
359 if (haddr.hci_dev != HCI_DEV_NONE) { 631 switch (haddr.hci_channel) {
360 hdev = hci_dev_get(haddr.hci_dev); 632 case HCI_CHANNEL_RAW:
361 if (!hdev) { 633 if (hci_pi(sk)->hdev) {
362 err = -ENODEV; 634 err = -EALREADY;
363 goto done; 635 goto done;
364 } 636 }
365 637
366 atomic_inc(&hdev->promisc); 638 if (haddr.hci_dev != HCI_DEV_NONE) {
639 hdev = hci_dev_get(haddr.hci_dev);
640 if (!hdev) {
641 err = -ENODEV;
642 goto done;
643 }
644
645 atomic_inc(&hdev->promisc);
646 }
647
648 hci_pi(sk)->hdev = hdev;
649 break;
650
651 case HCI_CHANNEL_CONTROL:
652 if (haddr.hci_dev != HCI_DEV_NONE) {
653 err = -EINVAL;
654 goto done;
655 }
656
657 if (!capable(CAP_NET_ADMIN)) {
658 err = -EPERM;
659 goto done;
660 }
661
662 break;
663
664 case HCI_CHANNEL_MONITOR:
665 if (haddr.hci_dev != HCI_DEV_NONE) {
666 err = -EINVAL;
667 goto done;
668 }
669
670 if (!capable(CAP_NET_RAW)) {
671 err = -EPERM;
672 goto done;
673 }
674
675 send_monitor_replay(sk);
676
677 atomic_inc(&monitor_promisc);
678 break;
679
680 default:
681 err = -EINVAL;
682 goto done;
367 } 683 }
368 684
685
369 hci_pi(sk)->channel = haddr.hci_channel; 686 hci_pi(sk)->channel = haddr.hci_channel;
370 hci_pi(sk)->hdev = hdev;
371 sk->sk_state = BT_BOUND; 687 sk->sk_state = BT_BOUND;
372 688
373done: 689done:
@@ -461,7 +777,15 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
461 skb_reset_transport_header(skb); 777 skb_reset_transport_header(skb);
462 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 778 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
463 779
464 hci_sock_cmsg(sk, msg, skb); 780 switch (hci_pi(sk)->channel) {
781 case HCI_CHANNEL_RAW:
782 hci_sock_cmsg(sk, msg, skb);
783 break;
784 case HCI_CHANNEL_CONTROL:
785 case HCI_CHANNEL_MONITOR:
786 sock_recv_timestamp(msg, sk, skb);
787 break;
788 }
465 789
466 skb_free_datagram(sk, skb); 790 skb_free_datagram(sk, skb);
467 791
@@ -495,6 +819,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
495 case HCI_CHANNEL_CONTROL: 819 case HCI_CHANNEL_CONTROL:
496 err = mgmt_control(sk, msg, len); 820 err = mgmt_control(sk, msg, len);
497 goto done; 821 goto done;
822 case HCI_CHANNEL_MONITOR:
823 err = -EOPNOTSUPP;
824 goto done;
498 default: 825 default:
499 err = -EINVAL; 826 err = -EINVAL;
500 goto done; 827 goto done;
@@ -574,6 +901,11 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
574 901
575 lock_sock(sk); 902 lock_sock(sk);
576 903
904 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
905 err = -EINVAL;
906 goto done;
907 }
908
577 switch (optname) { 909 switch (optname) {
578 case HCI_DATA_DIR: 910 case HCI_DATA_DIR:
579 if (get_user(opt, (int __user *)optval)) { 911 if (get_user(opt, (int __user *)optval)) {
@@ -636,6 +968,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
636 break; 968 break;
637 } 969 }
638 970
971done:
639 release_sock(sk); 972 release_sock(sk);
640 return err; 973 return err;
641} 974}
@@ -644,11 +977,20 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
644{ 977{
645 struct hci_ufilter uf; 978 struct hci_ufilter uf;
646 struct sock *sk = sock->sk; 979 struct sock *sk = sock->sk;
647 int len, opt; 980 int len, opt, err = 0;
981
982 BT_DBG("sk %p, opt %d", sk, optname);
648 983
649 if (get_user(len, optlen)) 984 if (get_user(len, optlen))
650 return -EFAULT; 985 return -EFAULT;
651 986
987 lock_sock(sk);
988
989 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
990 err = -EINVAL;
991 goto done;
992 }
993
652 switch (optname) { 994 switch (optname) {
653 case HCI_DATA_DIR: 995 case HCI_DATA_DIR:
654 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) 996 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
@@ -657,7 +999,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
657 opt = 0; 999 opt = 0;
658 1000
659 if (put_user(opt, optval)) 1001 if (put_user(opt, optval))
660 return -EFAULT; 1002 err = -EFAULT;
661 break; 1003 break;
662 1004
663 case HCI_TIME_STAMP: 1005 case HCI_TIME_STAMP:
@@ -667,7 +1009,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
667 opt = 0; 1009 opt = 0;
668 1010
669 if (put_user(opt, optval)) 1011 if (put_user(opt, optval))
670 return -EFAULT; 1012 err = -EFAULT;
671 break; 1013 break;
672 1014
673 case HCI_FILTER: 1015 case HCI_FILTER:
@@ -682,15 +1024,17 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
682 1024
683 len = min_t(unsigned int, len, sizeof(uf)); 1025 len = min_t(unsigned int, len, sizeof(uf));
684 if (copy_to_user(optval, &uf, len)) 1026 if (copy_to_user(optval, &uf, len))
685 return -EFAULT; 1027 err = -EFAULT;
686 break; 1028 break;
687 1029
688 default: 1030 default:
689 return -ENOPROTOOPT; 1031 err = -ENOPROTOOPT;
690 break; 1032 break;
691 } 1033 }
692 1034
693 return 0; 1035done:
1036 release_sock(sk);
1037 return err;
694} 1038}
695 1039
696static const struct proto_ops hci_sock_ops = { 1040static const struct proto_ops hci_sock_ops = {
@@ -748,52 +1092,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
748 return 0; 1092 return 0;
749} 1093}
750 1094
751static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
752{
753 struct hci_dev *hdev = (struct hci_dev *) ptr;
754 struct hci_ev_si_device ev;
755
756 BT_DBG("hdev %s event %ld", hdev->name, event);
757
758 /* Send event to sockets */
759 ev.event = event;
760 ev.dev_id = hdev->id;
761 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
762
763 if (event == HCI_DEV_UNREG) {
764 struct sock *sk;
765 struct hlist_node *node;
766
767 /* Detach sockets from device */
768 read_lock(&hci_sk_list.lock);
769 sk_for_each(sk, node, &hci_sk_list.head) {
770 bh_lock_sock_nested(sk);
771 if (hci_pi(sk)->hdev == hdev) {
772 hci_pi(sk)->hdev = NULL;
773 sk->sk_err = EPIPE;
774 sk->sk_state = BT_OPEN;
775 sk->sk_state_change(sk);
776
777 hci_dev_put(hdev);
778 }
779 bh_unlock_sock(sk);
780 }
781 read_unlock(&hci_sk_list.lock);
782 }
783
784 return NOTIFY_DONE;
785}
786
787static const struct net_proto_family hci_sock_family_ops = { 1095static const struct net_proto_family hci_sock_family_ops = {
788 .family = PF_BLUETOOTH, 1096 .family = PF_BLUETOOTH,
789 .owner = THIS_MODULE, 1097 .owner = THIS_MODULE,
790 .create = hci_sock_create, 1098 .create = hci_sock_create,
791}; 1099};
792 1100
793static struct notifier_block hci_sock_nblock = {
794 .notifier_call = hci_sock_dev_event
795};
796
797int __init hci_sock_init(void) 1101int __init hci_sock_init(void)
798{ 1102{
799 int err; 1103 int err;
@@ -806,8 +1110,6 @@ int __init hci_sock_init(void)
806 if (err < 0) 1110 if (err < 0)
807 goto error; 1111 goto error;
808 1112
809 hci_register_notifier(&hci_sock_nblock);
810
811 BT_INFO("HCI socket layer initialized"); 1113 BT_INFO("HCI socket layer initialized");
812 1114
813 return 0; 1115 return 0;
@@ -823,10 +1125,5 @@ void hci_sock_cleanup(void)
823 if (bt_sock_unregister(BTPROTO_HCI) < 0) 1125 if (bt_sock_unregister(BTPROTO_HCI) < 0)
824 BT_ERR("HCI socket unregistration failed"); 1126 BT_ERR("HCI socket unregistration failed");
825 1127
826 hci_unregister_notifier(&hci_sock_nblock);
827
828 proto_unregister(&hci_sk_proto); 1128 proto_unregister(&hci_sk_proto);
829} 1129}
830
831module_param(enable_mgmt, bool, 0644);
832MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 521095614235..bc154298979a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -33,19 +33,19 @@ static inline char *link_typetostr(int type)
33 33
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
35{ 35{
36 struct hci_conn *conn = dev_get_drvdata(dev); 36 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 37 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 38}
39 39
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
41{ 41{
42 struct hci_conn *conn = dev_get_drvdata(dev); 42 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 43 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 44}
45 45
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
47{ 47{
48 struct hci_conn *conn = dev_get_drvdata(dev); 48 struct hci_conn *conn = to_hci_conn(dev);
49 49
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 51 conn->features[0], conn->features[1],
@@ -79,8 +79,8 @@ static const struct attribute_group *bt_link_groups[] = {
79 79
80static void bt_link_release(struct device *dev) 80static void bt_link_release(struct device *dev)
81{ 81{
82 void *data = dev_get_drvdata(dev); 82 struct hci_conn *conn = to_hci_conn(dev);
83 kfree(data); 83 kfree(conn);
84} 84}
85 85
86static struct device_type bt_link = { 86static struct device_type bt_link = {
@@ -120,8 +120,6 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
120 120
121 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 121 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
122 122
123 dev_set_drvdata(&conn->dev, conn);
124
125 if (device_add(&conn->dev) < 0) { 123 if (device_add(&conn->dev) < 0) {
126 BT_ERR("Failed to register connection device"); 124 BT_ERR("Failed to register connection device");
127 return; 125 return;
@@ -189,19 +187,19 @@ static inline char *host_typetostr(int type)
189 187
190static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
191{ 189{
192 struct hci_dev *hdev = dev_get_drvdata(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
193 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
194} 192}
195 193
196static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
197{ 195{
198 struct hci_dev *hdev = dev_get_drvdata(dev); 196 struct hci_dev *hdev = to_hci_dev(dev);
199 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
200} 198}
201 199
202static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
203{ 201{
204 struct hci_dev *hdev = dev_get_drvdata(dev); 202 struct hci_dev *hdev = to_hci_dev(dev);
205 char name[HCI_MAX_NAME_LENGTH + 1]; 203 char name[HCI_MAX_NAME_LENGTH + 1];
206 int i; 204 int i;
207 205
@@ -214,20 +212,20 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
214 212
215static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
216{ 214{
217 struct hci_dev *hdev = dev_get_drvdata(dev); 215 struct hci_dev *hdev = to_hci_dev(dev);
218 return sprintf(buf, "0x%.2x%.2x%.2x\n", 216 return sprintf(buf, "0x%.2x%.2x%.2x\n",
219 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
220} 218}
221 219
222static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
223{ 221{
224 struct hci_dev *hdev = dev_get_drvdata(dev); 222 struct hci_dev *hdev = to_hci_dev(dev);
225 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
226} 224}
227 225
228static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
229{ 227{
230 struct hci_dev *hdev = dev_get_drvdata(dev); 228 struct hci_dev *hdev = to_hci_dev(dev);
231 229
232 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
233 hdev->features[0], hdev->features[1], 231 hdev->features[0], hdev->features[1],
@@ -238,31 +236,31 @@ static ssize_t show_features(struct device *dev, struct device_attribute *attr,
238 236
239static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
240{ 238{
241 struct hci_dev *hdev = dev_get_drvdata(dev); 239 struct hci_dev *hdev = to_hci_dev(dev);
242 return sprintf(buf, "%d\n", hdev->manufacturer); 240 return sprintf(buf, "%d\n", hdev->manufacturer);
243} 241}
244 242
245static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
246{ 244{
247 struct hci_dev *hdev = dev_get_drvdata(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
248 return sprintf(buf, "%d\n", hdev->hci_ver); 246 return sprintf(buf, "%d\n", hdev->hci_ver);
249} 247}
250 248
251static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
252{ 250{
253 struct hci_dev *hdev = dev_get_drvdata(dev); 251 struct hci_dev *hdev = to_hci_dev(dev);
254 return sprintf(buf, "%d\n", hdev->hci_rev); 252 return sprintf(buf, "%d\n", hdev->hci_rev);
255} 253}
256 254
257static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
258{ 256{
259 struct hci_dev *hdev = dev_get_drvdata(dev); 257 struct hci_dev *hdev = to_hci_dev(dev);
260 return sprintf(buf, "%d\n", hdev->idle_timeout); 258 return sprintf(buf, "%d\n", hdev->idle_timeout);
261} 259}
262 260
263static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
264{ 262{
265 struct hci_dev *hdev = dev_get_drvdata(dev); 263 struct hci_dev *hdev = to_hci_dev(dev);
266 unsigned int val; 264 unsigned int val;
267 int rv; 265 int rv;
268 266
@@ -280,13 +278,13 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
280 278
281static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
282{ 280{
283 struct hci_dev *hdev = dev_get_drvdata(dev); 281 struct hci_dev *hdev = to_hci_dev(dev);
284 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 282 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
285} 283}
286 284
287static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
288{ 286{
289 struct hci_dev *hdev = dev_get_drvdata(dev); 287 struct hci_dev *hdev = to_hci_dev(dev);
290 u16 val; 288 u16 val;
291 int rv; 289 int rv;
292 290
@@ -304,13 +302,13 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
304 302
305static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
306{ 304{
307 struct hci_dev *hdev = dev_get_drvdata(dev); 305 struct hci_dev *hdev = to_hci_dev(dev);
308 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 306 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
309} 307}
310 308
311static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
312{ 310{
313 struct hci_dev *hdev = dev_get_drvdata(dev); 311 struct hci_dev *hdev = to_hci_dev(dev);
314 u16 val; 312 u16 val;
315 int rv; 313 int rv;
316 314
@@ -370,8 +368,9 @@ static const struct attribute_group *bt_host_groups[] = {
370 368
371static void bt_host_release(struct device *dev) 369static void bt_host_release(struct device *dev)
372{ 370{
373 void *data = dev_get_drvdata(dev); 371 struct hci_dev *hdev = to_hci_dev(dev);
374 kfree(data); 372 kfree(hdev);
373 module_put(THIS_MODULE);
375} 374}
376 375
377static struct device_type bt_host = { 376static struct device_type bt_host = {
@@ -383,12 +382,12 @@ static struct device_type bt_host = {
383static int inquiry_cache_show(struct seq_file *f, void *p) 382static int inquiry_cache_show(struct seq_file *f, void *p)
384{ 383{
385 struct hci_dev *hdev = f->private; 384 struct hci_dev *hdev = f->private;
386 struct inquiry_cache *cache = &hdev->inq_cache; 385 struct discovery_state *cache = &hdev->discovery;
387 struct inquiry_entry *e; 386 struct inquiry_entry *e;
388 387
389 hci_dev_lock(hdev); 388 hci_dev_lock(hdev);
390 389
391 for (e = cache->list; e; e = e->next) { 390 list_for_each_entry(e, &cache->all, all) {
392 struct inquiry_data *data = &e->data; 391 struct inquiry_data *data = &e->data;
393 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", 392 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
394 batostr(&data->bdaddr), 393 batostr(&data->bdaddr),
@@ -523,7 +522,7 @@ void hci_init_sysfs(struct hci_dev *hdev)
523 dev->type = &bt_host; 522 dev->type = &bt_host;
524 dev->class = bt_class; 523 dev->class = bt_class;
525 524
526 dev_set_drvdata(dev, hdev); 525 __module_get(THIS_MODULE);
527 device_initialize(dev); 526 device_initialize(dev);
528} 527}
529 528
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 178ac7f127ad..73a32d705c1f 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -160,10 +160,10 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
160{ 160{
161 if (cmd == HIDPGETCONNLIST) { 161 if (cmd == HIDPGETCONNLIST) {
162 struct hidp_connlist_req cl; 162 struct hidp_connlist_req cl;
163 uint32_t uci; 163 u32 uci;
164 int err; 164 int err;
165 165
166 if (get_user(cl.cnum, (uint32_t __user *) arg) || 166 if (get_user(cl.cnum, (u32 __user *) arg) ||
167 get_user(uci, (u32 __user *) (arg + 4))) 167 get_user(uci, (u32 __user *) (arg + 4)))
168 return -EFAULT; 168 return -EFAULT;
169 169
@@ -174,7 +174,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
174 174
175 err = hidp_get_connlist(&cl); 175 err = hidp_get_connlist(&cl);
176 176
177 if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) 177 if (!err && put_user(cl.cnum, (u32 __user *) arg))
178 err = -EFAULT; 178 err = -EFAULT;
179 179
180 return err; 180 return err;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 32d338c30e65..3e450f4a3125 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -73,42 +73,28 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 74 struct l2cap_chan *chan, int err);
75 75
76static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
79 77
80static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81{ 79{
82 struct l2cap_chan *c, *r = NULL; 80 struct l2cap_chan *c;
83
84 rcu_read_lock();
85 81
86 list_for_each_entry_rcu(c, &conn->chan_l, list) { 82 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid) { 83 if (c->dcid == cid)
88 r = c; 84 return c;
89 break;
90 }
91 } 85 }
92 86 return NULL;
93 rcu_read_unlock();
94 return r;
95} 87}
96 88
97static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 89static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
98{ 90{
99 struct l2cap_chan *c, *r = NULL; 91 struct l2cap_chan *c;
100
101 rcu_read_lock();
102 92
103 list_for_each_entry_rcu(c, &conn->chan_l, list) { 93 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->scid == cid) { 94 if (c->scid == cid)
105 r = c; 95 return c;
106 break;
107 }
108 } 96 }
109 97 return NULL;
110 rcu_read_unlock();
111 return r;
112} 98}
113 99
114/* Find channel with given SCID. 100/* Find channel with given SCID.
@@ -117,36 +103,32 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci
117{ 103{
118 struct l2cap_chan *c; 104 struct l2cap_chan *c;
119 105
106 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid); 107 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c) 108 mutex_unlock(&conn->chan_lock);
122 lock_sock(c->sk); 109
123 return c; 110 return c;
124} 111}
125 112
126static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 113static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127{ 114{
128 struct l2cap_chan *c, *r = NULL; 115 struct l2cap_chan *c;
129
130 rcu_read_lock();
131 116
132 list_for_each_entry_rcu(c, &conn->chan_l, list) { 117 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident) { 118 if (c->ident == ident)
134 r = c; 119 return c;
135 break;
136 }
137 } 120 }
138 121 return NULL;
139 rcu_read_unlock();
140 return r;
141} 122}
142 123
143static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 124static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144{ 125{
145 struct l2cap_chan *c; 126 struct l2cap_chan *c;
146 127
128 mutex_lock(&conn->chan_lock);
147 c = __l2cap_get_chan_by_ident(conn, ident); 129 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c) 130 mutex_unlock(&conn->chan_lock);
149 lock_sock(c->sk); 131
150 return c; 132 return c;
151} 133}
152 134
@@ -217,51 +199,51 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
217 return 0; 199 return 0;
218} 200}
219 201
220static char *state_to_string(int state) 202static void __l2cap_state_change(struct l2cap_chan *chan, int state)
221{ 203{
222 switch(state) { 204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
223 case BT_CONNECTED: 205 state_to_string(state));
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
241 }
242 206
243 return "invalid state"; 207 chan->state = state;
208 chan->ops->state_change(chan->data, state);
244} 209}
245 210
246static void l2cap_state_change(struct l2cap_chan *chan, int state) 211static void l2cap_state_change(struct l2cap_chan *chan, int state)
247{ 212{
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state), 213 struct sock *sk = chan->sk;
249 state_to_string(state));
250 214
251 chan->state = state; 215 lock_sock(sk);
252 chan->ops->state_change(chan->data, state); 216 __l2cap_state_change(chan, state);
217 release_sock(sk);
218}
219
220static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221{
222 struct sock *sk = chan->sk;
223
224 sk->sk_err = err;
225}
226
227static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228{
229 struct sock *sk = chan->sk;
230
231 lock_sock(sk);
232 __l2cap_chan_set_err(chan, err);
233 release_sock(sk);
253} 234}
254 235
255static void l2cap_chan_timeout(struct work_struct *work) 236static void l2cap_chan_timeout(struct work_struct *work)
256{ 237{
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 238 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work); 239 chan_timer.work);
259 struct sock *sk = chan->sk; 240 struct l2cap_conn *conn = chan->conn;
260 int reason; 241 int reason;
261 242
262 BT_DBG("chan %p state %d", chan, chan->state); 243 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
263 244
264 lock_sock(sk); 245 mutex_lock(&conn->chan_lock);
246 l2cap_chan_lock(chan);
265 247
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 248 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED; 249 reason = ECONNREFUSED;
@@ -273,9 +255,11 @@ static void l2cap_chan_timeout(struct work_struct *work)
273 255
274 l2cap_chan_close(chan, reason); 256 l2cap_chan_close(chan, reason);
275 257
276 release_sock(sk); 258 l2cap_chan_unlock(chan);
277 259
278 chan->ops->close(chan->data); 260 chan->ops->close(chan->data);
261 mutex_unlock(&conn->chan_lock);
262
279 l2cap_chan_put(chan); 263 l2cap_chan_put(chan);
280} 264}
281 265
@@ -287,6 +271,8 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
287 if (!chan) 271 if (!chan)
288 return NULL; 272 return NULL;
289 273
274 mutex_init(&chan->lock);
275
290 chan->sk = sk; 276 chan->sk = sk;
291 277
292 write_lock(&chan_list_lock); 278 write_lock(&chan_list_lock);
@@ -313,7 +299,7 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
313 l2cap_chan_put(chan); 299 l2cap_chan_put(chan);
314} 300}
315 301
316static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 302void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
317{ 303{
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 304 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid); 305 chan->psm, chan->dcid);
@@ -322,7 +308,8 @@ static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
322 308
323 chan->conn = conn; 309 chan->conn = conn;
324 310
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { 311 switch (chan->chan_type) {
312 case L2CAP_CHAN_CONN_ORIENTED:
326 if (conn->hcon->type == LE_LINK) { 313 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */ 314 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU; 315 chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -333,12 +320,16 @@ static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
333 chan->scid = l2cap_alloc_cid(conn); 320 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU; 321 chan->omtu = L2CAP_DEFAULT_MTU;
335 } 322 }
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 323 break;
324
325 case L2CAP_CHAN_CONN_LESS:
337 /* Connectionless socket */ 326 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS; 327 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS; 328 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU; 329 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else { 330 break;
331
332 default:
342 /* Raw socket can send/recv signalling messages only */ 333 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING; 334 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING; 335 chan->dcid = L2CAP_CID_SIGNALING;
@@ -354,11 +345,16 @@ static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
354 345
355 l2cap_chan_hold(chan); 346 l2cap_chan_hold(chan);
356 347
357 list_add_rcu(&chan->list, &conn->chan_l); 348 list_add(&chan->list, &conn->chan_l);
349}
350
351void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352{
353 mutex_lock(&conn->chan_lock);
354 __l2cap_chan_add(conn, chan);
355 mutex_unlock(&conn->chan_lock);
358} 356}
359 357
360/* Delete channel.
361 * Must be called on the locked socket. */
362static void l2cap_chan_del(struct l2cap_chan *chan, int err) 358static void l2cap_chan_del(struct l2cap_chan *chan, int err)
363{ 359{
364 struct sock *sk = chan->sk; 360 struct sock *sk = chan->sk;
@@ -371,8 +367,7 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
371 367
372 if (conn) { 368 if (conn) {
373 /* Delete from channel list */ 369 /* Delete from channel list */
374 list_del_rcu(&chan->list); 370 list_del(&chan->list);
375 synchronize_rcu();
376 371
377 l2cap_chan_put(chan); 372 l2cap_chan_put(chan);
378 373
@@ -380,11 +375,13 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
380 hci_conn_put(conn->hcon); 375 hci_conn_put(conn->hcon);
381 } 376 }
382 377
383 l2cap_state_change(chan, BT_CLOSED); 378 lock_sock(sk);
379
380 __l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED); 381 sock_set_flag(sk, SOCK_ZAPPED);
385 382
386 if (err) 383 if (err)
387 sk->sk_err = err; 384 __l2cap_chan_set_err(chan, err);
388 385
389 if (parent) { 386 if (parent) {
390 bt_accept_unlink(sk); 387 bt_accept_unlink(sk);
@@ -392,6 +389,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
392 } else 389 } else
393 sk->sk_state_change(sk); 390 sk->sk_state_change(sk);
394 391
392 release_sock(sk);
393
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && 394 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state))) 395 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return; 396 return;
@@ -423,10 +422,12 @@ static void l2cap_chan_cleanup_listen(struct sock *parent)
423 /* Close not yet accepted channels */ 422 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) { 423 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 424 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425
426 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan); 427 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET); 428 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk); 429 l2cap_chan_unlock(chan);
430
430 chan->ops->close(chan->data); 431 chan->ops->close(chan->data);
431 } 432 }
432} 433}
@@ -436,14 +437,17 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn; 437 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk; 438 struct sock *sk = chan->sk;
438 439
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket); 440 BT_DBG("chan %p state %s sk %p", chan,
441 state_to_string(chan->state), sk);
440 442
441 switch (chan->state) { 443 switch (chan->state) {
442 case BT_LISTEN: 444 case BT_LISTEN:
445 lock_sock(sk);
443 l2cap_chan_cleanup_listen(sk); 446 l2cap_chan_cleanup_listen(sk);
444 447
445 l2cap_state_change(chan, BT_CLOSED); 448 __l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED); 449 sock_set_flag(sk, SOCK_ZAPPED);
450 release_sock(sk);
447 break; 451 break;
448 452
449 case BT_CONNECTED: 453 case BT_CONNECTED:
@@ -486,7 +490,9 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
486 break; 490 break;
487 491
488 default: 492 default:
493 lock_sock(sk);
489 sock_set_flag(sk, SOCK_ZAPPED); 494 sock_set_flag(sk, SOCK_ZAPPED);
495 release_sock(sk);
490 break; 496 break;
491 } 497 }
492} 498}
@@ -661,6 +667,21 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 667 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
662} 668}
663 669
670static void l2cap_send_conn_req(struct l2cap_chan *chan)
671{
672 struct l2cap_conn *conn = chan->conn;
673 struct l2cap_conn_req req;
674
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679
680 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681
682 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
683}
684
664static void l2cap_do_start(struct l2cap_chan *chan) 685static void l2cap_do_start(struct l2cap_chan *chan)
665{ 686{
666 struct l2cap_conn *conn = chan->conn; 687 struct l2cap_conn *conn = chan->conn;
@@ -670,17 +691,8 @@ static void l2cap_do_start(struct l2cap_chan *chan)
670 return; 691 return;
671 692
672 if (l2cap_chan_check_security(chan) && 693 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) { 694 __l2cap_no_conn_pending(chan))
674 struct l2cap_conn_req req; 695 l2cap_send_conn_req(chan);
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
683 }
684 } else { 696 } else {
685 struct l2cap_info_req req; 697 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 698 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -688,8 +700,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 700 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn); 701 conn->info_ident = l2cap_get_ident(conn);
690 702
691 schedule_delayed_work(&conn->info_timer, 703 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
693 704
694 l2cap_send_cmd(conn, conn->info_ident, 705 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req); 706 L2CAP_INFO_REQ, sizeof(req), &req);
@@ -714,14 +725,12 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
714 725
715static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 726static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
716{ 727{
717 struct sock *sk; 728 struct sock *sk = chan->sk;
718 struct l2cap_disconn_req req; 729 struct l2cap_disconn_req req;
719 730
720 if (!conn) 731 if (!conn)
721 return; 732 return;
722 733
723 sk = chan->sk;
724
725 if (chan->mode == L2CAP_MODE_ERTM) { 734 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan); 735 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan); 736 __clear_monitor_timer(chan);
@@ -733,56 +742,47 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
733 l2cap_send_cmd(conn, l2cap_get_ident(conn), 742 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req); 743 L2CAP_DISCONN_REQ, sizeof(req), &req);
735 744
736 l2cap_state_change(chan, BT_DISCONN); 745 lock_sock(sk);
737 sk->sk_err = err; 746 __l2cap_state_change(chan, BT_DISCONN);
747 __l2cap_chan_set_err(chan, err);
748 release_sock(sk);
738} 749}
739 750
740/* ---- L2CAP connections ---- */ 751/* ---- L2CAP connections ---- */
741static void l2cap_conn_start(struct l2cap_conn *conn) 752static void l2cap_conn_start(struct l2cap_conn *conn)
742{ 753{
743 struct l2cap_chan *chan; 754 struct l2cap_chan *chan, *tmp;
744 755
745 BT_DBG("conn %p", conn); 756 BT_DBG("conn %p", conn);
746 757
747 rcu_read_lock(); 758 mutex_lock(&conn->chan_lock);
748 759
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 760 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
750 struct sock *sk = chan->sk; 761 struct sock *sk = chan->sk;
751 762
752 bh_lock_sock(sk); 763 l2cap_chan_lock(chan);
753 764
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 765 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk); 766 l2cap_chan_unlock(chan);
756 continue; 767 continue;
757 } 768 }
758 769
759 if (chan->state == BT_CONNECT) { 770 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
761
762 if (!l2cap_chan_check_security(chan) || 771 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) { 772 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk); 773 l2cap_chan_unlock(chan);
765 continue; 774 continue;
766 } 775 }
767 776
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 777 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE, 778 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) { 779 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET); 780 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk); 781 l2cap_chan_unlock(chan);
775 continue; 782 continue;
776 } 783 }
777 784
778 req.scid = cpu_to_le16(chan->scid); 785 l2cap_send_conn_req(chan);
779 req.psm = chan->psm;
780
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
783
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
786 786
787 } else if (chan->state == BT_CONNECT2) { 787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp; 788 struct l2cap_conn_rsp rsp;
@@ -791,6 +791,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
791 rsp.dcid = cpu_to_le16(chan->scid); 791 rsp.dcid = cpu_to_le16(chan->scid);
792 792
793 if (l2cap_chan_check_security(chan)) { 793 if (l2cap_chan_check_security(chan)) {
794 lock_sock(sk);
794 if (bt_sk(sk)->defer_setup) { 795 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent; 796 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 797 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -799,10 +800,11 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
799 parent->sk_data_ready(parent, 0); 800 parent->sk_data_ready(parent, 0);
800 801
801 } else { 802 } else {
802 l2cap_state_change(chan, BT_CONFIG); 803 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 804 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 805 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 } 806 }
807 release_sock(sk);
806 } else { 808 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 809 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 810 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -813,7 +815,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
813 815
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 816 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) { 817 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk); 818 l2cap_chan_unlock(chan);
817 continue; 819 continue;
818 } 820 }
819 821
@@ -823,10 +825,10 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
823 chan->num_conf_req++; 825 chan->num_conf_req++;
824 } 826 }
825 827
826 bh_unlock_sock(sk); 828 l2cap_chan_unlock(chan);
827 } 829 }
828 830
829 rcu_read_unlock(); 831 mutex_unlock(&conn->chan_lock);
830} 832}
831 833
832/* Find socket with cid and source bdaddr. 834/* Find socket with cid and source bdaddr.
@@ -902,28 +904,34 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
902 904
903 __set_chan_timer(chan, sk->sk_sndtimeo); 905 __set_chan_timer(chan, sk->sk_sndtimeo);
904 906
905 l2cap_state_change(chan, BT_CONNECTED); 907 __l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0); 908 parent->sk_data_ready(parent, 0);
907 909
908clean: 910clean:
909 release_sock(parent); 911 release_sock(parent);
910} 912}
911 913
912static void l2cap_chan_ready(struct sock *sk) 914static void l2cap_chan_ready(struct l2cap_chan *chan)
913{ 915{
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 916 struct sock *sk = chan->sk;
915 struct sock *parent = bt_sk(sk)->parent; 917 struct sock *parent;
918
919 lock_sock(sk);
920
921 parent = bt_sk(sk)->parent;
916 922
917 BT_DBG("sk %p, parent %p", sk, parent); 923 BT_DBG("sk %p, parent %p", sk, parent);
918 924
919 chan->conf_state = 0; 925 chan->conf_state = 0;
920 __clear_chan_timer(chan); 926 __clear_chan_timer(chan);
921 927
922 l2cap_state_change(chan, BT_CONNECTED); 928 __l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk); 929 sk->sk_state_change(sk);
924 930
925 if (parent) 931 if (parent)
926 parent->sk_data_ready(parent, 0); 932 parent->sk_data_ready(parent, 0);
933
934 release_sock(sk);
927} 935}
928 936
929static void l2cap_conn_ready(struct l2cap_conn *conn) 937static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -938,29 +946,31 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
938 if (conn->hcon->out && conn->hcon->type == LE_LINK) 946 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level); 947 smp_conn_security(conn, conn->hcon->pending_sec_level);
940 948
941 rcu_read_lock(); 949 mutex_lock(&conn->chan_lock);
942 950
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 951 list_for_each_entry(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
945 952
946 bh_lock_sock(sk); 953 l2cap_chan_lock(chan);
947 954
948 if (conn->hcon->type == LE_LINK) { 955 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level)) 956 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk); 957 l2cap_chan_ready(chan);
951 958
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 959 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
960 struct sock *sk = chan->sk;
953 __clear_chan_timer(chan); 961 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED); 962 lock_sock(sk);
963 __l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk); 964 sk->sk_state_change(sk);
965 release_sock(sk);
956 966
957 } else if (chan->state == BT_CONNECT) 967 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan); 968 l2cap_do_start(chan);
959 969
960 bh_unlock_sock(sk); 970 l2cap_chan_unlock(chan);
961 } 971 }
962 972
963 rcu_read_unlock(); 973 mutex_unlock(&conn->chan_lock);
964} 974}
965 975
966/* Notify sockets that we cannot guaranty reliability anymore */ 976/* Notify sockets that we cannot guaranty reliability anymore */
@@ -970,16 +980,14 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
970 980
971 BT_DBG("conn %p", conn); 981 BT_DBG("conn %p", conn);
972 982
973 rcu_read_lock(); 983 mutex_lock(&conn->chan_lock);
974
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
977 984
985 list_for_each_entry(chan, &conn->chan_l, list) {
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 986 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err; 987 __l2cap_chan_set_err(chan, err);
980 } 988 }
981 989
982 rcu_read_unlock(); 990 mutex_unlock(&conn->chan_lock);
983} 991}
984 992
985static void l2cap_info_timeout(struct work_struct *work) 993static void l2cap_info_timeout(struct work_struct *work)
@@ -997,7 +1005,6 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
997{ 1005{
998 struct l2cap_conn *conn = hcon->l2cap_data; 1006 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l; 1007 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001 1008
1002 if (!conn) 1009 if (!conn)
1003 return; 1010 return;
@@ -1006,21 +1013,27 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1006 1013
1007 kfree_skb(conn->rx_skb); 1014 kfree_skb(conn->rx_skb);
1008 1015
1016 mutex_lock(&conn->chan_lock);
1017
1009 /* Kill channels */ 1018 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1019 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk; 1020 l2cap_chan_lock(chan);
1012 lock_sock(sk); 1021
1013 l2cap_chan_del(chan, err); 1022 l2cap_chan_del(chan, err);
1014 release_sock(sk); 1023
1024 l2cap_chan_unlock(chan);
1025
1015 chan->ops->close(chan->data); 1026 chan->ops->close(chan->data);
1016 } 1027 }
1017 1028
1029 mutex_unlock(&conn->chan_lock);
1030
1018 hci_chan_del(conn->hchan); 1031 hci_chan_del(conn->hchan);
1019 1032
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 1033 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 cancel_delayed_work_sync(&conn->info_timer); 1034 cancel_delayed_work_sync(&conn->info_timer);
1022 1035
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) { 1036 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1024 cancel_delayed_work_sync(&conn->security_timer); 1037 cancel_delayed_work_sync(&conn->security_timer);
1025 smp_chan_destroy(conn); 1038 smp_chan_destroy(conn);
1026 } 1039 }
@@ -1072,6 +1085,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1072 conn->feat_mask = 0; 1085 conn->feat_mask = 0;
1073 1086
1074 spin_lock_init(&conn->lock); 1087 spin_lock_init(&conn->lock);
1088 mutex_init(&conn->chan_lock);
1075 1089
1076 INIT_LIST_HEAD(&conn->chan_l); 1090 INIT_LIST_HEAD(&conn->chan_l);
1077 1091
@@ -1139,7 +1153,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1139 1153
1140 hci_dev_lock(hdev); 1154 hci_dev_lock(hdev);
1141 1155
1142 lock_sock(sk); 1156 l2cap_chan_lock(chan);
1143 1157
1144 /* PSM must be odd and lsb of upper byte must be 0 */ 1158 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && 1159 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
@@ -1166,17 +1180,21 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1166 goto done; 1180 goto done;
1167 } 1181 }
1168 1182
1183 lock_sock(sk);
1184
1169 switch (sk->sk_state) { 1185 switch (sk->sk_state) {
1170 case BT_CONNECT: 1186 case BT_CONNECT:
1171 case BT_CONNECT2: 1187 case BT_CONNECT2:
1172 case BT_CONFIG: 1188 case BT_CONFIG:
1173 /* Already connecting */ 1189 /* Already connecting */
1174 err = 0; 1190 err = 0;
1191 release_sock(sk);
1175 goto done; 1192 goto done;
1176 1193
1177 case BT_CONNECTED: 1194 case BT_CONNECTED:
1178 /* Already connected */ 1195 /* Already connected */
1179 err = -EISCONN; 1196 err = -EISCONN;
1197 release_sock(sk);
1180 goto done; 1198 goto done;
1181 1199
1182 case BT_OPEN: 1200 case BT_OPEN:
@@ -1186,11 +1204,15 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1186 1204
1187 default: 1205 default:
1188 err = -EBADFD; 1206 err = -EBADFD;
1207 release_sock(sk);
1189 goto done; 1208 goto done;
1190 } 1209 }
1191 1210
1192 /* Set destination address and psm */ 1211 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst); 1212 bacpy(&bt_sk(sk)->dst, dst);
1213
1214 release_sock(sk);
1215
1194 chan->psm = psm; 1216 chan->psm = psm;
1195 chan->dcid = cid; 1217 chan->dcid = cid;
1196 1218
@@ -1218,7 +1240,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1218 /* Update source addr of the socket */ 1240 /* Update source addr of the socket */
1219 bacpy(src, conn->src); 1241 bacpy(src, conn->src);
1220 1242
1243 l2cap_chan_unlock(chan);
1221 l2cap_chan_add(conn, chan); 1244 l2cap_chan_add(conn, chan);
1245 l2cap_chan_lock(chan);
1222 1246
1223 l2cap_state_change(chan, BT_CONNECT); 1247 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo); 1248 __set_chan_timer(chan, sk->sk_sndtimeo);
@@ -1235,6 +1259,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1235 err = 0; 1259 err = 0;
1236 1260
1237done: 1261done:
1262 l2cap_chan_unlock(chan);
1238 hci_dev_unlock(hdev); 1263 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev); 1264 hci_dev_put(hdev);
1240 return err; 1265 return err;
@@ -1276,14 +1301,14 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1276{ 1301{
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work); 1303 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1280 1304
1281 BT_DBG("chan %p", chan); 1305 BT_DBG("chan %p", chan);
1282 1306
1283 lock_sock(sk); 1307 l2cap_chan_lock(chan);
1308
1284 if (chan->retry_count >= chan->remote_max_tx) { 1309 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1310 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk); 1311 l2cap_chan_unlock(chan);
1287 return; 1312 return;
1288 } 1313 }
1289 1314
@@ -1291,25 +1316,26 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1291 __set_monitor_timer(chan); 1316 __set_monitor_timer(chan);
1292 1317
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk); 1319 l2cap_chan_unlock(chan);
1295} 1320}
1296 1321
1297static void l2cap_retrans_timeout(struct work_struct *work) 1322static void l2cap_retrans_timeout(struct work_struct *work)
1298{ 1323{
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1324 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work); 1325 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1302 1326
1303 BT_DBG("chan %p", chan); 1327 BT_DBG("chan %p", chan);
1304 1328
1305 lock_sock(sk); 1329 l2cap_chan_lock(chan);
1330
1306 chan->retry_count = 1; 1331 chan->retry_count = 1;
1307 __set_monitor_timer(chan); 1332 __set_monitor_timer(chan);
1308 1333
1309 set_bit(CONN_WAIT_F, &chan->conn_state); 1334 set_bit(CONN_WAIT_F, &chan->conn_state);
1310 1335
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1336 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk); 1337
1338 l2cap_chan_unlock(chan);
1313} 1339}
1314 1340
1315static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1341static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1450,17 +1476,19 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1450 1476
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1477 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1452 1478
1453 if (bt_cb(skb)->retries == 1) 1479 if (bt_cb(skb)->retries == 1) {
1454 chan->unacked_frames++; 1480 chan->unacked_frames++;
1455 1481
1482 if (!nsent++)
1483 __clear_ack_timer(chan);
1484 }
1485
1456 chan->frames_sent++; 1486 chan->frames_sent++;
1457 1487
1458 if (skb_queue_is_last(&chan->tx_q, skb)) 1488 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL; 1489 chan->tx_send_head = NULL;
1460 else 1490 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); 1491 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1462
1463 nsent++;
1464 } 1492 }
1465 1493
1466 return nsent; 1494 return nsent;
@@ -1478,7 +1506,7 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1478 return ret; 1506 return ret;
1479} 1507}
1480 1508
1481static void l2cap_send_ack(struct l2cap_chan *chan) 1509static void __l2cap_send_ack(struct l2cap_chan *chan)
1482{ 1510{
1483 u32 control = 0; 1511 u32 control = 0;
1484 1512
@@ -1498,6 +1526,12 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
1498 l2cap_send_sframe(chan, control); 1526 l2cap_send_sframe(chan, control);
1499} 1527}
1500 1528
1529static void l2cap_send_ack(struct l2cap_chan *chan)
1530{
1531 __clear_ack_timer(chan);
1532 __l2cap_send_ack(chan);
1533}
1534
1501static void l2cap_send_srejtail(struct l2cap_chan *chan) 1535static void l2cap_send_srejtail(struct l2cap_chan *chan)
1502{ 1536{
1503 struct srej_list *tail; 1537 struct srej_list *tail;
@@ -1512,9 +1546,11 @@ static void l2cap_send_srejtail(struct l2cap_chan *chan)
1512 l2cap_send_sframe(chan, control); 1546 l2cap_send_sframe(chan, control);
1513} 1547}
1514 1548
1515static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1549static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1550 struct msghdr *msg, int len,
1551 int count, struct sk_buff *skb)
1516{ 1552{
1517 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 1553 struct l2cap_conn *conn = chan->conn;
1518 struct sk_buff **frag; 1554 struct sk_buff **frag;
1519 int err, sent = 0; 1555 int err, sent = 0;
1520 1556
@@ -1529,7 +1565,10 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1529 while (len) { 1565 while (len) {
1530 count = min_t(unsigned int, conn->mtu, len); 1566 count = min_t(unsigned int, conn->mtu, len);
1531 1567
1532 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); 1568 *frag = chan->ops->alloc_skb(chan, count,
1569 msg->msg_flags & MSG_DONTWAIT,
1570 &err);
1571
1533 if (!*frag) 1572 if (!*frag)
1534 return err; 1573 return err;
1535 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 1574 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
@@ -1550,17 +1589,18 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1550 struct msghdr *msg, size_t len, 1589 struct msghdr *msg, size_t len,
1551 u32 priority) 1590 u32 priority)
1552{ 1591{
1553 struct sock *sk = chan->sk;
1554 struct l2cap_conn *conn = chan->conn; 1592 struct l2cap_conn *conn = chan->conn;
1555 struct sk_buff *skb; 1593 struct sk_buff *skb;
1556 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 1594 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1557 struct l2cap_hdr *lh; 1595 struct l2cap_hdr *lh;
1558 1596
1559 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority); 1597 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1560 1598
1561 count = min_t(unsigned int, (conn->mtu - hlen), len); 1599 count = min_t(unsigned int, (conn->mtu - hlen), len);
1562 skb = bt_skb_send_alloc(sk, count + hlen, 1600
1563 msg->msg_flags & MSG_DONTWAIT, &err); 1601 skb = chan->ops->alloc_skb(chan, count + hlen,
1602 msg->msg_flags & MSG_DONTWAIT, &err);
1603
1564 if (!skb) 1604 if (!skb)
1565 return ERR_PTR(err); 1605 return ERR_PTR(err);
1566 1606
@@ -1572,7 +1612,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1612 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1573 put_unaligned_le16(chan->psm, skb_put(skb, 2)); 1613 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1574 1614
1575 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1615 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1576 if (unlikely(err < 0)) { 1616 if (unlikely(err < 0)) {
1577 kfree_skb(skb); 1617 kfree_skb(skb);
1578 return ERR_PTR(err); 1618 return ERR_PTR(err);
@@ -1584,17 +1624,18 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1584 struct msghdr *msg, size_t len, 1624 struct msghdr *msg, size_t len,
1585 u32 priority) 1625 u32 priority)
1586{ 1626{
1587 struct sock *sk = chan->sk;
1588 struct l2cap_conn *conn = chan->conn; 1627 struct l2cap_conn *conn = chan->conn;
1589 struct sk_buff *skb; 1628 struct sk_buff *skb;
1590 int err, count, hlen = L2CAP_HDR_SIZE; 1629 int err, count, hlen = L2CAP_HDR_SIZE;
1591 struct l2cap_hdr *lh; 1630 struct l2cap_hdr *lh;
1592 1631
1593 BT_DBG("sk %p len %d", sk, (int)len); 1632 BT_DBG("chan %p len %d", chan, (int)len);
1594 1633
1595 count = min_t(unsigned int, (conn->mtu - hlen), len); 1634 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen, 1635
1597 msg->msg_flags & MSG_DONTWAIT, &err); 1636 skb = chan->ops->alloc_skb(chan, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1638
1598 if (!skb) 1639 if (!skb)
1599 return ERR_PTR(err); 1640 return ERR_PTR(err);
1600 1641
@@ -1605,7 +1646,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1605 lh->cid = cpu_to_le16(chan->dcid); 1646 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1647 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1607 1648
1608 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1649 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1609 if (unlikely(err < 0)) { 1650 if (unlikely(err < 0)) {
1610 kfree_skb(skb); 1651 kfree_skb(skb);
1611 return ERR_PTR(err); 1652 return ERR_PTR(err);
@@ -1617,13 +1658,12 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1617 struct msghdr *msg, size_t len, 1658 struct msghdr *msg, size_t len,
1618 u32 control, u16 sdulen) 1659 u32 control, u16 sdulen)
1619{ 1660{
1620 struct sock *sk = chan->sk;
1621 struct l2cap_conn *conn = chan->conn; 1661 struct l2cap_conn *conn = chan->conn;
1622 struct sk_buff *skb; 1662 struct sk_buff *skb;
1623 int err, count, hlen; 1663 int err, count, hlen;
1624 struct l2cap_hdr *lh; 1664 struct l2cap_hdr *lh;
1625 1665
1626 BT_DBG("sk %p len %d", sk, (int)len); 1666 BT_DBG("chan %p len %d", chan, (int)len);
1627 1667
1628 if (!conn) 1668 if (!conn)
1629 return ERR_PTR(-ENOTCONN); 1669 return ERR_PTR(-ENOTCONN);
@@ -1640,8 +1680,10 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1640 hlen += L2CAP_FCS_SIZE; 1680 hlen += L2CAP_FCS_SIZE;
1641 1681
1642 count = min_t(unsigned int, (conn->mtu - hlen), len); 1682 count = min_t(unsigned int, (conn->mtu - hlen), len);
1643 skb = bt_skb_send_alloc(sk, count + hlen, 1683
1644 msg->msg_flags & MSG_DONTWAIT, &err); 1684 skb = chan->ops->alloc_skb(chan, count + hlen,
1685 msg->msg_flags & MSG_DONTWAIT, &err);
1686
1645 if (!skb) 1687 if (!skb)
1646 return ERR_PTR(err); 1688 return ERR_PTR(err);
1647 1689
@@ -1655,7 +1697,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1655 if (sdulen) 1697 if (sdulen)
1656 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 1698 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1657 1699
1658 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1700 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1659 if (unlikely(err < 0)) { 1701 if (unlikely(err < 0)) {
1660 kfree_skb(skb); 1702 kfree_skb(skb);
1661 return ERR_PTR(err); 1703 return ERR_PTR(err);
@@ -1801,9 +1843,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1801 1843
1802 BT_DBG("conn %p", conn); 1844 BT_DBG("conn %p", conn);
1803 1845
1804 rcu_read_lock(); 1846 mutex_lock(&conn->chan_lock);
1805 1847
1806 list_for_each_entry_rcu(chan, &conn->chan_l, list) { 1848 list_for_each_entry(chan, &conn->chan_l, list) {
1807 struct sock *sk = chan->sk; 1849 struct sock *sk = chan->sk;
1808 if (chan->chan_type != L2CAP_CHAN_RAW) 1850 if (chan->chan_type != L2CAP_CHAN_RAW)
1809 continue; 1851 continue;
@@ -1819,7 +1861,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1819 kfree_skb(nskb); 1861 kfree_skb(nskb);
1820 } 1862 }
1821 1863
1822 rcu_read_unlock(); 1864 mutex_unlock(&conn->chan_lock);
1823} 1865}
1824 1866
1825/* ---- L2CAP signalling commands ---- */ 1867/* ---- L2CAP signalling commands ---- */
@@ -1987,9 +2029,13 @@ static void l2cap_ack_timeout(struct work_struct *work)
1987 2029
1988 BT_DBG("chan %p", chan); 2030 BT_DBG("chan %p", chan);
1989 2031
1990 lock_sock(chan->sk); 2032 l2cap_chan_lock(chan);
1991 l2cap_send_ack(chan); 2033
1992 release_sock(chan->sk); 2034 __l2cap_send_ack(chan);
2035
2036 l2cap_chan_unlock(chan);
2037
2038 l2cap_chan_put(chan);
1993} 2039}
1994 2040
1995static inline void l2cap_ertm_init(struct l2cap_chan *chan) 2041static inline void l2cap_ertm_init(struct l2cap_chan *chan)
@@ -2607,6 +2653,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2607 2653
2608 parent = pchan->sk; 2654 parent = pchan->sk;
2609 2655
2656 mutex_lock(&conn->chan_lock);
2610 lock_sock(parent); 2657 lock_sock(parent);
2611 2658
2612 /* Check if the ACL is secure enough (if not SDP) */ 2659 /* Check if the ACL is secure enough (if not SDP) */
@@ -2647,7 +2694,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2647 2694
2648 bt_accept_enqueue(parent, sk); 2695 bt_accept_enqueue(parent, sk);
2649 2696
2650 l2cap_chan_add(conn, chan); 2697 __l2cap_chan_add(conn, chan);
2651 2698
2652 dcid = chan->scid; 2699 dcid = chan->scid;
2653 2700
@@ -2658,28 +2705,29 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2705 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2659 if (l2cap_chan_check_security(chan)) { 2706 if (l2cap_chan_check_security(chan)) {
2660 if (bt_sk(sk)->defer_setup) { 2707 if (bt_sk(sk)->defer_setup) {
2661 l2cap_state_change(chan, BT_CONNECT2); 2708 __l2cap_state_change(chan, BT_CONNECT2);
2662 result = L2CAP_CR_PEND; 2709 result = L2CAP_CR_PEND;
2663 status = L2CAP_CS_AUTHOR_PEND; 2710 status = L2CAP_CS_AUTHOR_PEND;
2664 parent->sk_data_ready(parent, 0); 2711 parent->sk_data_ready(parent, 0);
2665 } else { 2712 } else {
2666 l2cap_state_change(chan, BT_CONFIG); 2713 __l2cap_state_change(chan, BT_CONFIG);
2667 result = L2CAP_CR_SUCCESS; 2714 result = L2CAP_CR_SUCCESS;
2668 status = L2CAP_CS_NO_INFO; 2715 status = L2CAP_CS_NO_INFO;
2669 } 2716 }
2670 } else { 2717 } else {
2671 l2cap_state_change(chan, BT_CONNECT2); 2718 __l2cap_state_change(chan, BT_CONNECT2);
2672 result = L2CAP_CR_PEND; 2719 result = L2CAP_CR_PEND;
2673 status = L2CAP_CS_AUTHEN_PEND; 2720 status = L2CAP_CS_AUTHEN_PEND;
2674 } 2721 }
2675 } else { 2722 } else {
2676 l2cap_state_change(chan, BT_CONNECT2); 2723 __l2cap_state_change(chan, BT_CONNECT2);
2677 result = L2CAP_CR_PEND; 2724 result = L2CAP_CR_PEND;
2678 status = L2CAP_CS_NO_INFO; 2725 status = L2CAP_CS_NO_INFO;
2679 } 2726 }
2680 2727
2681response: 2728response:
2682 release_sock(parent); 2729 release_sock(parent);
2730 mutex_unlock(&conn->chan_lock);
2683 2731
2684sendresp: 2732sendresp:
2685 rsp.scid = cpu_to_le16(scid); 2733 rsp.scid = cpu_to_le16(scid);
@@ -2695,8 +2743,7 @@ sendresp:
2695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 2743 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2696 conn->info_ident = l2cap_get_ident(conn); 2744 conn->info_ident = l2cap_get_ident(conn);
2697 2745
2698 schedule_delayed_work(&conn->info_timer, 2746 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2699 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2700 2747
2701 l2cap_send_cmd(conn, conn->info_ident, 2748 l2cap_send_cmd(conn, conn->info_ident,
2702 L2CAP_INFO_REQ, sizeof(info), &info); 2749 L2CAP_INFO_REQ, sizeof(info), &info);
@@ -2719,27 +2766,36 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2719 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 2766 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2720 u16 scid, dcid, result, status; 2767 u16 scid, dcid, result, status;
2721 struct l2cap_chan *chan; 2768 struct l2cap_chan *chan;
2722 struct sock *sk;
2723 u8 req[128]; 2769 u8 req[128];
2770 int err;
2724 2771
2725 scid = __le16_to_cpu(rsp->scid); 2772 scid = __le16_to_cpu(rsp->scid);
2726 dcid = __le16_to_cpu(rsp->dcid); 2773 dcid = __le16_to_cpu(rsp->dcid);
2727 result = __le16_to_cpu(rsp->result); 2774 result = __le16_to_cpu(rsp->result);
2728 status = __le16_to_cpu(rsp->status); 2775 status = __le16_to_cpu(rsp->status);
2729 2776
2730 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 2777 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2778 dcid, scid, result, status);
2779
2780 mutex_lock(&conn->chan_lock);
2731 2781
2732 if (scid) { 2782 if (scid) {
2733 chan = l2cap_get_chan_by_scid(conn, scid); 2783 chan = __l2cap_get_chan_by_scid(conn, scid);
2734 if (!chan) 2784 if (!chan) {
2735 return -EFAULT; 2785 err = -EFAULT;
2786 goto unlock;
2787 }
2736 } else { 2788 } else {
2737 chan = l2cap_get_chan_by_ident(conn, cmd->ident); 2789 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2738 if (!chan) 2790 if (!chan) {
2739 return -EFAULT; 2791 err = -EFAULT;
2792 goto unlock;
2793 }
2740 } 2794 }
2741 2795
2742 sk = chan->sk; 2796 err = 0;
2797
2798 l2cap_chan_lock(chan);
2743 2799
2744 switch (result) { 2800 switch (result) {
2745 case L2CAP_CR_SUCCESS: 2801 case L2CAP_CR_SUCCESS:
@@ -2765,8 +2821,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2765 break; 2821 break;
2766 } 2822 }
2767 2823
2768 release_sock(sk); 2824 l2cap_chan_unlock(chan);
2769 return 0; 2825
2826unlock:
2827 mutex_unlock(&conn->chan_lock);
2828
2829 return err;
2770} 2830}
2771 2831
2772static inline void set_default_fcs(struct l2cap_chan *chan) 2832static inline void set_default_fcs(struct l2cap_chan *chan)
@@ -2786,7 +2846,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2786 u16 dcid, flags; 2846 u16 dcid, flags;
2787 u8 rsp[64]; 2847 u8 rsp[64];
2788 struct l2cap_chan *chan; 2848 struct l2cap_chan *chan;
2789 struct sock *sk;
2790 int len; 2849 int len;
2791 2850
2792 dcid = __le16_to_cpu(req->dcid); 2851 dcid = __le16_to_cpu(req->dcid);
@@ -2798,7 +2857,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2798 if (!chan) 2857 if (!chan)
2799 return -ENOENT; 2858 return -ENOENT;
2800 2859
2801 sk = chan->sk; 2860 l2cap_chan_lock(chan);
2802 2861
2803 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 2862 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2804 struct l2cap_cmd_rej_cid rej; 2863 struct l2cap_cmd_rej_cid rej;
@@ -2860,7 +2919,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2860 if (chan->mode == L2CAP_MODE_ERTM) 2919 if (chan->mode == L2CAP_MODE_ERTM)
2861 l2cap_ertm_init(chan); 2920 l2cap_ertm_init(chan);
2862 2921
2863 l2cap_chan_ready(sk); 2922 l2cap_chan_ready(chan);
2864 goto unlock; 2923 goto unlock;
2865 } 2924 }
2866 2925
@@ -2887,7 +2946,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2887 } 2946 }
2888 2947
2889unlock: 2948unlock:
2890 release_sock(sk); 2949 l2cap_chan_unlock(chan);
2891 return 0; 2950 return 0;
2892} 2951}
2893 2952
@@ -2896,7 +2955,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2896 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 2955 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2897 u16 scid, flags, result; 2956 u16 scid, flags, result;
2898 struct l2cap_chan *chan; 2957 struct l2cap_chan *chan;
2899 struct sock *sk;
2900 int len = cmd->len - sizeof(*rsp); 2958 int len = cmd->len - sizeof(*rsp);
2901 2959
2902 scid = __le16_to_cpu(rsp->scid); 2960 scid = __le16_to_cpu(rsp->scid);
@@ -2910,7 +2968,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2910 if (!chan) 2968 if (!chan)
2911 return 0; 2969 return 0;
2912 2970
2913 sk = chan->sk; 2971 l2cap_chan_lock(chan);
2914 2972
2915 switch (result) { 2973 switch (result) {
2916 case L2CAP_CONF_SUCCESS: 2974 case L2CAP_CONF_SUCCESS:
@@ -2969,9 +3027,9 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2969 } 3027 }
2970 3028
2971 default: 3029 default:
2972 sk->sk_err = ECONNRESET; 3030 l2cap_chan_set_err(chan, ECONNRESET);
2973 __set_chan_timer(chan, 3031
2974 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT)); 3032 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2975 l2cap_send_disconn_req(conn, chan, ECONNRESET); 3033 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2976 goto done; 3034 goto done;
2977 } 3035 }
@@ -2991,11 +3049,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2991 if (chan->mode == L2CAP_MODE_ERTM) 3049 if (chan->mode == L2CAP_MODE_ERTM)
2992 l2cap_ertm_init(chan); 3050 l2cap_ertm_init(chan);
2993 3051
2994 l2cap_chan_ready(sk); 3052 l2cap_chan_ready(chan);
2995 } 3053 }
2996 3054
2997done: 3055done:
2998 release_sock(sk); 3056 l2cap_chan_unlock(chan);
2999 return 0; 3057 return 0;
3000} 3058}
3001 3059
@@ -3012,9 +3070,15 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3012 3070
3013 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 3071 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3014 3072
3015 chan = l2cap_get_chan_by_scid(conn, dcid); 3073 mutex_lock(&conn->chan_lock);
3016 if (!chan) 3074
3075 chan = __l2cap_get_chan_by_scid(conn, dcid);
3076 if (!chan) {
3077 mutex_unlock(&conn->chan_lock);
3017 return 0; 3078 return 0;
3079 }
3080
3081 l2cap_chan_lock(chan);
3018 3082
3019 sk = chan->sk; 3083 sk = chan->sk;
3020 3084
@@ -3022,12 +3086,18 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3022 rsp.scid = cpu_to_le16(chan->dcid); 3086 rsp.scid = cpu_to_le16(chan->dcid);
3023 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 3087 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3024 3088
3089 lock_sock(sk);
3025 sk->sk_shutdown = SHUTDOWN_MASK; 3090 sk->sk_shutdown = SHUTDOWN_MASK;
3091 release_sock(sk);
3026 3092
3027 l2cap_chan_del(chan, ECONNRESET); 3093 l2cap_chan_del(chan, ECONNRESET);
3028 release_sock(sk); 3094
3095 l2cap_chan_unlock(chan);
3029 3096
3030 chan->ops->close(chan->data); 3097 chan->ops->close(chan->data);
3098
3099 mutex_unlock(&conn->chan_lock);
3100
3031 return 0; 3101 return 0;
3032} 3102}
3033 3103
@@ -3036,23 +3106,30 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3036 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 3106 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3037 u16 dcid, scid; 3107 u16 dcid, scid;
3038 struct l2cap_chan *chan; 3108 struct l2cap_chan *chan;
3039 struct sock *sk;
3040 3109
3041 scid = __le16_to_cpu(rsp->scid); 3110 scid = __le16_to_cpu(rsp->scid);
3042 dcid = __le16_to_cpu(rsp->dcid); 3111 dcid = __le16_to_cpu(rsp->dcid);
3043 3112
3044 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3045 3114
3046 chan = l2cap_get_chan_by_scid(conn, scid); 3115 mutex_lock(&conn->chan_lock);
3047 if (!chan) 3116
3117 chan = __l2cap_get_chan_by_scid(conn, scid);
3118 if (!chan) {
3119 mutex_unlock(&conn->chan_lock);
3048 return 0; 3120 return 0;
3121 }
3049 3122
3050 sk = chan->sk; 3123 l2cap_chan_lock(chan);
3051 3124
3052 l2cap_chan_del(chan, 0); 3125 l2cap_chan_del(chan, 0);
3053 release_sock(sk); 3126
3127 l2cap_chan_unlock(chan);
3054 3128
3055 chan->ops->close(chan->data); 3129 chan->ops->close(chan->data);
3130
3131 mutex_unlock(&conn->chan_lock);
3132
3056 return 0; 3133 return 0;
3057} 3134}
3058 3135
@@ -3132,7 +3209,8 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3132 return 0; 3209 return 0;
3133 } 3210 }
3134 3211
3135 if (type == L2CAP_IT_FEAT_MASK) { 3212 switch (type) {
3213 case L2CAP_IT_FEAT_MASK:
3136 conn->feat_mask = get_unaligned_le32(rsp->data); 3214 conn->feat_mask = get_unaligned_le32(rsp->data);
3137 3215
3138 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3216 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
@@ -3149,11 +3227,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3149 3227
3150 l2cap_conn_start(conn); 3228 l2cap_conn_start(conn);
3151 } 3229 }
3152 } else if (type == L2CAP_IT_FIXED_CHAN) { 3230 break;
3231
3232 case L2CAP_IT_FIXED_CHAN:
3233 conn->fixed_chan_mask = rsp->data[0];
3153 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3234 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3154 conn->info_ident = 0; 3235 conn->info_ident = 0;
3155 3236
3156 l2cap_conn_start(conn); 3237 l2cap_conn_start(conn);
3238 break;
3157 } 3239 }
3158 3240
3159 return 0; 3241 return 0;
@@ -3713,19 +3795,11 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
3713 3795
3714static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 3796static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3715{ 3797{
3716 u32 control;
3717
3718 BT_DBG("chan %p, Enter local busy", chan); 3798 BT_DBG("chan %p, Enter local busy", chan);
3719 3799
3720 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 3800 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3721 3801
3722 control = __set_reqseq(chan, chan->buffer_seq); 3802 __set_ack_timer(chan);
3723 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3724 l2cap_send_sframe(chan, control);
3725
3726 set_bit(CONN_RNR_SENT, &chan->conn_state);
3727
3728 __clear_ack_timer(chan);
3729} 3803}
3730 3804
3731static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 3805static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
@@ -3865,8 +3939,11 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont
3865 goto drop; 3939 goto drop;
3866 } 3940 }
3867 3941
3868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) 3942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3943 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3944 l2cap_send_ack(chan);
3869 goto drop; 3945 goto drop;
3946 }
3870 3947
3871 if (tx_seq == chan->expected_tx_seq) 3948 if (tx_seq == chan->expected_tx_seq)
3872 goto expected; 3949 goto expected;
@@ -3927,15 +4004,15 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_cont
3927 __skb_queue_head_init(&chan->srej_q); 4004 __skb_queue_head_init(&chan->srej_q);
3928 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4005 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3929 4006
3930 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4007 /* Set P-bit only if there are some I-frames to ack. */
4008 if (__clear_ack_timer(chan))
4009 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3931 4010
3932 err = l2cap_send_srejframe(chan, tx_seq); 4011 err = l2cap_send_srejframe(chan, tx_seq);
3933 if (err < 0) { 4012 if (err < 0) {
3934 l2cap_send_disconn_req(chan->conn, chan, -err); 4013 l2cap_send_disconn_req(chan->conn, chan, -err);
3935 return err; 4014 return err;
3936 } 4015 }
3937
3938 __clear_ack_timer(chan);
3939 } 4016 }
3940 return 0; 4017 return 0;
3941 4018
@@ -4135,9 +4212,8 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_cont
4135 return 0; 4212 return 0;
4136} 4213}
4137 4214
4138static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 4215static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4139{ 4216{
4140 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4141 u32 control; 4217 u32 control;
4142 u16 req_seq; 4218 u16 req_seq;
4143 int len, next_tx_seq_offset, req_seq_offset; 4219 int len, next_tx_seq_offset, req_seq_offset;
@@ -4205,7 +4281,6 @@ drop:
4205static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 4281static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4206{ 4282{
4207 struct l2cap_chan *chan; 4283 struct l2cap_chan *chan;
4208 struct sock *sk = NULL;
4209 u32 control; 4284 u32 control;
4210 u16 tx_seq; 4285 u16 tx_seq;
4211 int len; 4286 int len;
@@ -4213,10 +4288,12 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4213 chan = l2cap_get_chan_by_scid(conn, cid); 4288 chan = l2cap_get_chan_by_scid(conn, cid);
4214 if (!chan) { 4289 if (!chan) {
4215 BT_DBG("unknown cid 0x%4.4x", cid); 4290 BT_DBG("unknown cid 0x%4.4x", cid);
4216 goto drop; 4291 /* Drop packet and return */
4292 kfree_skb(skb);
4293 return 0;
4217 } 4294 }
4218 4295
4219 sk = chan->sk; 4296 l2cap_chan_lock(chan);
4220 4297
4221 BT_DBG("chan %p, len %d", chan, skb->len); 4298 BT_DBG("chan %p, len %d", chan, skb->len);
4222 4299
@@ -4238,7 +4315,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4238 break; 4315 break;
4239 4316
4240 case L2CAP_MODE_ERTM: 4317 case L2CAP_MODE_ERTM:
4241 l2cap_ertm_data_rcv(sk, skb); 4318 l2cap_ertm_data_rcv(chan, skb);
4242 4319
4243 goto done; 4320 goto done;
4244 4321
@@ -4287,26 +4364,20 @@ drop:
4287 kfree_skb(skb); 4364 kfree_skb(skb);
4288 4365
4289done: 4366done:
4290 if (sk) 4367 l2cap_chan_unlock(chan);
4291 release_sock(sk);
4292 4368
4293 return 0; 4369 return 0;
4294} 4370}
4295 4371
4296static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 4372static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4297{ 4373{
4298 struct sock *sk = NULL;
4299 struct l2cap_chan *chan; 4374 struct l2cap_chan *chan;
4300 4375
4301 chan = l2cap_global_chan_by_psm(0, psm, conn->src); 4376 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4302 if (!chan) 4377 if (!chan)
4303 goto drop; 4378 goto drop;
4304 4379
4305 sk = chan->sk; 4380 BT_DBG("chan %p, len %d", chan, skb->len);
4306
4307 lock_sock(sk);
4308
4309 BT_DBG("sk %p, len %d", sk, skb->len);
4310 4381
4311 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 4382 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4312 goto drop; 4383 goto drop;
@@ -4315,31 +4386,23 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4315 goto drop; 4386 goto drop;
4316 4387
4317 if (!chan->ops->recv(chan->data, skb)) 4388 if (!chan->ops->recv(chan->data, skb))
4318 goto done; 4389 return 0;
4319 4390
4320drop: 4391drop:
4321 kfree_skb(skb); 4392 kfree_skb(skb);
4322 4393
4323done:
4324 if (sk)
4325 release_sock(sk);
4326 return 0; 4394 return 0;
4327} 4395}
4328 4396
4329static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) 4397static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4330{ 4398{
4331 struct sock *sk = NULL;
4332 struct l2cap_chan *chan; 4399 struct l2cap_chan *chan;
4333 4400
4334 chan = l2cap_global_chan_by_scid(0, cid, conn->src); 4401 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4335 if (!chan) 4402 if (!chan)
4336 goto drop; 4403 goto drop;
4337 4404
4338 sk = chan->sk; 4405 BT_DBG("chan %p, len %d", chan, skb->len);
4339
4340 lock_sock(sk);
4341
4342 BT_DBG("sk %p, len %d", sk, skb->len);
4343 4406
4344 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) 4407 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4345 goto drop; 4408 goto drop;
@@ -4348,14 +4411,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
4348 goto drop; 4411 goto drop;
4349 4412
4350 if (!chan->ops->recv(chan->data, skb)) 4413 if (!chan->ops->recv(chan->data, skb))
4351 goto done; 4414 return 0;
4352 4415
4353drop: 4416drop:
4354 kfree_skb(skb); 4417 kfree_skb(skb);
4355 4418
4356done:
4357 if (sk)
4358 release_sock(sk);
4359 return 0; 4419 return 0;
4360} 4420}
4361 4421
@@ -4479,8 +4539,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4479 if (encrypt == 0x00) { 4539 if (encrypt == 0x00) {
4480 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4540 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4481 __clear_chan_timer(chan); 4541 __clear_chan_timer(chan);
4482 __set_chan_timer(chan, 4542 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4483 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4484 } else if (chan->sec_level == BT_SECURITY_HIGH) 4543 } else if (chan->sec_level == BT_SECURITY_HIGH)
4485 l2cap_chan_close(chan, ECONNREFUSED); 4544 l2cap_chan_close(chan, ECONNREFUSED);
4486 } else { 4545 } else {
@@ -4504,57 +4563,49 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4504 cancel_delayed_work(&conn->security_timer); 4563 cancel_delayed_work(&conn->security_timer);
4505 } 4564 }
4506 4565
4507 rcu_read_lock(); 4566 mutex_lock(&conn->chan_lock);
4508
4509 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4510 struct sock *sk = chan->sk;
4511 4567
4512 bh_lock_sock(sk); 4568 list_for_each_entry(chan, &conn->chan_l, list) {
4569 l2cap_chan_lock(chan);
4513 4570
4514 BT_DBG("chan->scid %d", chan->scid); 4571 BT_DBG("chan->scid %d", chan->scid);
4515 4572
4516 if (chan->scid == L2CAP_CID_LE_DATA) { 4573 if (chan->scid == L2CAP_CID_LE_DATA) {
4517 if (!status && encrypt) { 4574 if (!status && encrypt) {
4518 chan->sec_level = hcon->sec_level; 4575 chan->sec_level = hcon->sec_level;
4519 l2cap_chan_ready(sk); 4576 l2cap_chan_ready(chan);
4520 } 4577 }
4521 4578
4522 bh_unlock_sock(sk); 4579 l2cap_chan_unlock(chan);
4523 continue; 4580 continue;
4524 } 4581 }
4525 4582
4526 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { 4583 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4527 bh_unlock_sock(sk); 4584 l2cap_chan_unlock(chan);
4528 continue; 4585 continue;
4529 } 4586 }
4530 4587
4531 if (!status && (chan->state == BT_CONNECTED || 4588 if (!status && (chan->state == BT_CONNECTED ||
4532 chan->state == BT_CONFIG)) { 4589 chan->state == BT_CONFIG)) {
4533 l2cap_check_encryption(chan, encrypt); 4590 l2cap_check_encryption(chan, encrypt);
4534 bh_unlock_sock(sk); 4591 l2cap_chan_unlock(chan);
4535 continue; 4592 continue;
4536 } 4593 }
4537 4594
4538 if (chan->state == BT_CONNECT) { 4595 if (chan->state == BT_CONNECT) {
4539 if (!status) { 4596 if (!status) {
4540 struct l2cap_conn_req req; 4597 l2cap_send_conn_req(chan);
4541 req.scid = cpu_to_le16(chan->scid);
4542 req.psm = chan->psm;
4543
4544 chan->ident = l2cap_get_ident(conn);
4545 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4546
4547 l2cap_send_cmd(conn, chan->ident,
4548 L2CAP_CONN_REQ, sizeof(req), &req);
4549 } else { 4598 } else {
4550 __clear_chan_timer(chan); 4599 __clear_chan_timer(chan);
4551 __set_chan_timer(chan, 4600 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4552 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4553 } 4601 }
4554 } else if (chan->state == BT_CONNECT2) { 4602 } else if (chan->state == BT_CONNECT2) {
4603 struct sock *sk = chan->sk;
4555 struct l2cap_conn_rsp rsp; 4604 struct l2cap_conn_rsp rsp;
4556 __u16 res, stat; 4605 __u16 res, stat;
4557 4606
4607 lock_sock(sk);
4608
4558 if (!status) { 4609 if (!status) {
4559 if (bt_sk(sk)->defer_setup) { 4610 if (bt_sk(sk)->defer_setup) {
4560 struct sock *parent = bt_sk(sk)->parent; 4611 struct sock *parent = bt_sk(sk)->parent;
@@ -4563,18 +4614,19 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4563 if (parent) 4614 if (parent)
4564 parent->sk_data_ready(parent, 0); 4615 parent->sk_data_ready(parent, 0);
4565 } else { 4616 } else {
4566 l2cap_state_change(chan, BT_CONFIG); 4617 __l2cap_state_change(chan, BT_CONFIG);
4567 res = L2CAP_CR_SUCCESS; 4618 res = L2CAP_CR_SUCCESS;
4568 stat = L2CAP_CS_NO_INFO; 4619 stat = L2CAP_CS_NO_INFO;
4569 } 4620 }
4570 } else { 4621 } else {
4571 l2cap_state_change(chan, BT_DISCONN); 4622 __l2cap_state_change(chan, BT_DISCONN);
4572 __set_chan_timer(chan, 4623 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4574 res = L2CAP_CR_SEC_BLOCK; 4624 res = L2CAP_CR_SEC_BLOCK;
4575 stat = L2CAP_CS_NO_INFO; 4625 stat = L2CAP_CS_NO_INFO;
4576 } 4626 }
4577 4627
4628 release_sock(sk);
4629
4578 rsp.scid = cpu_to_le16(chan->dcid); 4630 rsp.scid = cpu_to_le16(chan->dcid);
4579 rsp.dcid = cpu_to_le16(chan->scid); 4631 rsp.dcid = cpu_to_le16(chan->scid);
4580 rsp.result = cpu_to_le16(res); 4632 rsp.result = cpu_to_le16(res);
@@ -4583,10 +4635,10 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4583 sizeof(rsp), &rsp); 4635 sizeof(rsp), &rsp);
4584 } 4636 }
4585 4637
4586 bh_unlock_sock(sk); 4638 l2cap_chan_unlock(chan);
4587 } 4639 }
4588 4640
4589 rcu_read_unlock(); 4641 mutex_unlock(&conn->chan_lock);
4590 4642
4591 return 0; 4643 return 0;
4592} 4644}
@@ -4647,6 +4699,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4647 4699
4648 if (chan && chan->sk) { 4700 if (chan && chan->sk) {
4649 struct sock *sk = chan->sk; 4701 struct sock *sk = chan->sk;
4702 lock_sock(sk);
4650 4703
4651 if (chan->imtu < len - L2CAP_HDR_SIZE) { 4704 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4652 BT_ERR("Frame exceeding recv MTU (len %d, " 4705 BT_ERR("Frame exceeding recv MTU (len %d, "
@@ -4717,7 +4770,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4717 c->state, __le16_to_cpu(c->psm), 4770 c->state, __le16_to_cpu(c->psm),
4718 c->scid, c->dcid, c->imtu, c->omtu, 4771 c->scid, c->dcid, c->imtu, c->omtu,
4719 c->sec_level, c->mode); 4772 c->sec_level, c->mode);
4720} 4773 }
4721 4774
4722 read_unlock(&chan_list_lock); 4775 read_unlock(&chan_list_lock);
4723 4776
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 401d9428ae4c..c4fe583b0af6 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -125,13 +125,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
125 125
126 err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); 126 err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
127 if (err) 127 if (err)
128 goto done; 128 return err;
129
130 lock_sock(sk);
129 131
130 err = bt_sock_wait_state(sk, BT_CONNECTED, 132 err = bt_sock_wait_state(sk, BT_CONNECTED,
131 sock_sndtimeo(sk, flags & O_NONBLOCK)); 133 sock_sndtimeo(sk, flags & O_NONBLOCK));
132done: 134
133 if (sock_owned_by_user(sk)) 135 release_sock(sk);
134 release_sock(sk); 136
135 return err; 137 return err;
136} 138}
137 139
@@ -783,7 +785,7 @@ static void l2cap_sock_kill(struct sock *sk)
783 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 785 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
784 return; 786 return;
785 787
786 BT_DBG("sk %p state %d", sk, sk->sk_state); 788 BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
787 789
788 /* Kill poor orphan */ 790 /* Kill poor orphan */
789 791
@@ -795,7 +797,8 @@ static void l2cap_sock_kill(struct sock *sk)
795static int l2cap_sock_shutdown(struct socket *sock, int how) 797static int l2cap_sock_shutdown(struct socket *sock, int how)
796{ 798{
797 struct sock *sk = sock->sk; 799 struct sock *sk = sock->sk;
798 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 800 struct l2cap_chan *chan;
801 struct l2cap_conn *conn;
799 int err = 0; 802 int err = 0;
800 803
801 BT_DBG("sock %p, sk %p", sock, sk); 804 BT_DBG("sock %p, sk %p", sock, sk);
@@ -803,13 +806,24 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
803 if (!sk) 806 if (!sk)
804 return 0; 807 return 0;
805 808
809 chan = l2cap_pi(sk)->chan;
810 conn = chan->conn;
811
812 if (conn)
813 mutex_lock(&conn->chan_lock);
814
815 l2cap_chan_lock(chan);
806 lock_sock(sk); 816 lock_sock(sk);
817
807 if (!sk->sk_shutdown) { 818 if (!sk->sk_shutdown) {
808 if (chan->mode == L2CAP_MODE_ERTM) 819 if (chan->mode == L2CAP_MODE_ERTM)
809 err = __l2cap_wait_ack(sk); 820 err = __l2cap_wait_ack(sk);
810 821
811 sk->sk_shutdown = SHUTDOWN_MASK; 822 sk->sk_shutdown = SHUTDOWN_MASK;
823
824 release_sock(sk);
812 l2cap_chan_close(chan, 0); 825 l2cap_chan_close(chan, 0);
826 lock_sock(sk);
813 827
814 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 828 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
815 err = bt_sock_wait_state(sk, BT_CLOSED, 829 err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -820,6 +834,11 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
820 err = -sk->sk_err; 834 err = -sk->sk_err;
821 835
822 release_sock(sk); 836 release_sock(sk);
837 l2cap_chan_unlock(chan);
838
839 if (conn)
840 mutex_unlock(&conn->chan_lock);
841
823 return err; 842 return err;
824} 843}
825 844
@@ -862,8 +881,12 @@ static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
862 struct sock *sk = data; 881 struct sock *sk = data;
863 struct l2cap_pinfo *pi = l2cap_pi(sk); 882 struct l2cap_pinfo *pi = l2cap_pi(sk);
864 883
865 if (pi->rx_busy_skb) 884 lock_sock(sk);
866 return -ENOMEM; 885
886 if (pi->rx_busy_skb) {
887 err = -ENOMEM;
888 goto done;
889 }
867 890
868 err = sock_queue_rcv_skb(sk, skb); 891 err = sock_queue_rcv_skb(sk, skb);
869 892
@@ -882,6 +905,9 @@ static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
882 err = 0; 905 err = 0;
883 } 906 }
884 907
908done:
909 release_sock(sk);
910
885 return err; 911 return err;
886} 912}
887 913
@@ -899,12 +925,22 @@ static void l2cap_sock_state_change_cb(void *data, int state)
899 sk->sk_state = state; 925 sk->sk_state = state;
900} 926}
901 927
928static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
929 unsigned long len, int nb,
930 int *err)
931{
932 struct sock *sk = chan->sk;
933
934 return bt_skb_send_alloc(sk, len, nb, err);
935}
936
902static struct l2cap_ops l2cap_chan_ops = { 937static struct l2cap_ops l2cap_chan_ops = {
903 .name = "L2CAP Socket Interface", 938 .name = "L2CAP Socket Interface",
904 .new_connection = l2cap_sock_new_connection_cb, 939 .new_connection = l2cap_sock_new_connection_cb,
905 .recv = l2cap_sock_recv_cb, 940 .recv = l2cap_sock_recv_cb,
906 .close = l2cap_sock_close_cb, 941 .close = l2cap_sock_close_cb,
907 .state_change = l2cap_sock_state_change_cb, 942 .state_change = l2cap_sock_state_change_cb,
943 .alloc_skb = l2cap_sock_alloc_skb_cb,
908}; 944};
909 945
910static void l2cap_sock_destruct(struct sock *sk) 946static void l2cap_sock_destruct(struct sock *sk)
@@ -1004,7 +1040,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
1004 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 1040 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1005 1041
1006 sk->sk_destruct = l2cap_sock_destruct; 1042 sk->sk_destruct = l2cap_sock_destruct;
1007 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); 1043 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
1008 1044
1009 sock_reset_flag(sk, SOCK_ZAPPED); 1045 sock_reset_flag(sk, SOCK_ZAPPED);
1010 1046
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 86a6bed229df..506628876f36 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -24,6 +24,8 @@
24 24
25/* Bluetooth kernel library. */ 25/* Bluetooth kernel library. */
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt
28
27#include <linux/module.h> 29#include <linux/module.h>
28 30
29#include <linux/kernel.h> 31#include <linux/kernel.h>
@@ -151,7 +153,26 @@ int bt_to_errno(__u16 code)
151} 153}
152EXPORT_SYMBOL(bt_to_errno); 154EXPORT_SYMBOL(bt_to_errno);
153 155
154int bt_printk(const char *level, const char *format, ...) 156int bt_info(const char *format, ...)
157{
158 struct va_format vaf;
159 va_list args;
160 int r;
161
162 va_start(args, format);
163
164 vaf.fmt = format;
165 vaf.va = &args;
166
167 r = pr_info("%pV", &vaf);
168
169 va_end(args);
170
171 return r;
172}
173EXPORT_SYMBOL(bt_info);
174
175int bt_err(const char *format, ...)
155{ 176{
156 struct va_format vaf; 177 struct va_format vaf;
157 va_list args; 178 va_list args;
@@ -162,10 +183,10 @@ int bt_printk(const char *level, const char *format, ...)
162 vaf.fmt = format; 183 vaf.fmt = format;
163 vaf.va = &args; 184 vaf.va = &args;
164 185
165 r = printk("%sBluetooth: %pV\n", level, &vaf); 186 r = pr_err("%pV", &vaf);
166 187
167 va_end(args); 188 va_end(args);
168 189
169 return r; 190 return r;
170} 191}
171EXPORT_SYMBOL(bt_printk); 192EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index bc8e59dda78e..7fcff8887131 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1,6 +1,8 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3
3 Copyright (C) 2010 Nokia Corporation 4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
4 6
5 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as 8 it under the terms of the GNU General Public License version 2 as
@@ -32,12 +34,92 @@
32#include <net/bluetooth/mgmt.h> 34#include <net/bluetooth/mgmt.h>
33#include <net/bluetooth/smp.h> 35#include <net/bluetooth/smp.h>
34 36
35#define MGMT_VERSION 0 37bool enable_hs;
36#define MGMT_REVISION 1 38bool enable_le;
39
40#define MGMT_VERSION 1
41#define MGMT_REVISION 0
42
43static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_PAIRABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81};
82
83static const u16 mgmt_events[] = {
84 MGMT_EV_CONTROLLER_ERROR,
85 MGMT_EV_INDEX_ADDED,
86 MGMT_EV_INDEX_REMOVED,
87 MGMT_EV_NEW_SETTINGS,
88 MGMT_EV_CLASS_OF_DEV_CHANGED,
89 MGMT_EV_LOCAL_NAME_CHANGED,
90 MGMT_EV_NEW_LINK_KEY,
91 MGMT_EV_NEW_LONG_TERM_KEY,
92 MGMT_EV_DEVICE_CONNECTED,
93 MGMT_EV_DEVICE_DISCONNECTED,
94 MGMT_EV_CONNECT_FAILED,
95 MGMT_EV_PIN_CODE_REQUEST,
96 MGMT_EV_USER_CONFIRM_REQUEST,
97 MGMT_EV_USER_PASSKEY_REQUEST,
98 MGMT_EV_AUTH_FAILED,
99 MGMT_EV_DEVICE_FOUND,
100 MGMT_EV_DISCOVERING,
101 MGMT_EV_DEVICE_BLOCKED,
102 MGMT_EV_DEVICE_UNBLOCKED,
103 MGMT_EV_DEVICE_UNPAIRED,
104};
105
106/*
107 * These LE scan and inquiry parameters were chosen according to LE General
108 * Discovery Procedure specification.
109 */
110#define LE_SCAN_TYPE 0x01
111#define LE_SCAN_WIN 0x12
112#define LE_SCAN_INT 0x12
113#define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
114#define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115
116#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
117#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
37 118
38#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ 119#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
39 120
40#define SERVICE_CACHE_TIMEOUT (5 * 1000) 121#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
122 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
41 123
42struct pending_cmd { 124struct pending_cmd {
43 struct list_head list; 125 struct list_head list;
@@ -151,8 +233,8 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
151 return err; 233 return err;
152} 234}
153 235
154static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, 236static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
155 size_t rp_len) 237 void *rp, size_t rp_len)
156{ 238{
157 struct sk_buff *skb; 239 struct sk_buff *skb;
158 struct mgmt_hdr *hdr; 240 struct mgmt_hdr *hdr;
@@ -173,6 +255,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
173 255
174 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 256 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
175 put_unaligned_le16(cmd, &ev->opcode); 257 put_unaligned_le16(cmd, &ev->opcode);
258 ev->status = status;
176 259
177 if (rp) 260 if (rp)
178 memcpy(ev->data, rp, rp_len); 261 memcpy(ev->data, rp, rp_len);
@@ -181,10 +264,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
181 if (err < 0) 264 if (err < 0)
182 kfree_skb(skb); 265 kfree_skb(skb);
183 266
184 return err;; 267 return err;
185} 268}
186 269
187static int read_version(struct sock *sk) 270static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
271 u16 data_len)
188{ 272{
189 struct mgmt_rp_read_version rp; 273 struct mgmt_rp_read_version rp;
190 274
@@ -193,11 +277,46 @@ static int read_version(struct sock *sk)
193 rp.version = MGMT_VERSION; 277 rp.version = MGMT_VERSION;
194 put_unaligned_le16(MGMT_REVISION, &rp.revision); 278 put_unaligned_le16(MGMT_REVISION, &rp.revision);
195 279
196 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp, 280 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
197 sizeof(rp)); 281 sizeof(rp));
282}
283
284static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
285 u16 data_len)
286{
287 struct mgmt_rp_read_commands *rp;
288 u16 num_commands = ARRAY_SIZE(mgmt_commands);
289 u16 num_events = ARRAY_SIZE(mgmt_events);
290 u16 *opcode;
291 size_t rp_size;
292 int i, err;
293
294 BT_DBG("sock %p", sk);
295
296 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
297
298 rp = kmalloc(rp_size, GFP_KERNEL);
299 if (!rp)
300 return -ENOMEM;
301
302 put_unaligned_le16(num_commands, &rp->num_commands);
303 put_unaligned_le16(num_events, &rp->num_events);
304
305 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
306 put_unaligned_le16(mgmt_commands[i], opcode);
307
308 for (i = 0; i < num_events; i++, opcode++)
309 put_unaligned_le16(mgmt_events[i], opcode);
310
311 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
312 rp_size);
313 kfree(rp);
314
315 return err;
198} 316}
199 317
200static int read_index_list(struct sock *sk) 318static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
319 u16 data_len)
201{ 320{
202 struct mgmt_rp_read_index_list *rp; 321 struct mgmt_rp_read_index_list *rp;
203 struct list_head *p; 322 struct list_head *p;
@@ -226,10 +345,7 @@ static int read_index_list(struct sock *sk)
226 345
227 i = 0; 346 i = 0;
228 list_for_each_entry(d, &hci_dev_list, list) { 347 list_for_each_entry(d, &hci_dev_list, list) {
229 if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags)) 348 if (test_bit(HCI_SETUP, &d->dev_flags))
230 cancel_delayed_work(&d->power_off);
231
232 if (test_bit(HCI_SETUP, &d->flags))
233 continue; 349 continue;
234 350
235 put_unaligned_le16(d->id, &rp->index[i++]); 351 put_unaligned_le16(d->id, &rp->index[i++]);
@@ -238,8 +354,8 @@ static int read_index_list(struct sock *sk)
238 354
239 read_unlock(&hci_dev_list_lock); 355 read_unlock(&hci_dev_list_lock);
240 356
241 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp, 357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
242 rp_len); 358 rp_len);
243 359
244 kfree(rp); 360 kfree(rp);
245 361
@@ -264,8 +380,13 @@ static u32 get_supported_settings(struct hci_dev *hdev)
264 settings |= MGMT_SETTING_LINK_SECURITY; 380 settings |= MGMT_SETTING_LINK_SECURITY;
265 } 381 }
266 382
267 if (hdev->features[4] & LMP_LE) 383 if (enable_hs)
268 settings |= MGMT_SETTING_LE; 384 settings |= MGMT_SETTING_HS;
385
386 if (enable_le) {
387 if (hdev->features[4] & LMP_LE)
388 settings |= MGMT_SETTING_LE;
389 }
269 390
270 return settings; 391 return settings;
271} 392}
@@ -274,47 +395,36 @@ static u32 get_current_settings(struct hci_dev *hdev)
274{ 395{
275 u32 settings = 0; 396 u32 settings = 0;
276 397
277 if (test_bit(HCI_UP, &hdev->flags)) 398 if (hdev_is_powered(hdev))
278 settings |= MGMT_SETTING_POWERED; 399 settings |= MGMT_SETTING_POWERED;
279 else
280 return settings;
281 400
282 if (test_bit(HCI_PSCAN, &hdev->flags)) 401 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
283 settings |= MGMT_SETTING_CONNECTABLE; 402 settings |= MGMT_SETTING_CONNECTABLE;
284 403
285 if (test_bit(HCI_ISCAN, &hdev->flags)) 404 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
286 settings |= MGMT_SETTING_DISCOVERABLE; 405 settings |= MGMT_SETTING_DISCOVERABLE;
287 406
288 if (test_bit(HCI_PAIRABLE, &hdev->flags)) 407 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
289 settings |= MGMT_SETTING_PAIRABLE; 408 settings |= MGMT_SETTING_PAIRABLE;
290 409
291 if (!(hdev->features[4] & LMP_NO_BREDR)) 410 if (!(hdev->features[4] & LMP_NO_BREDR))
292 settings |= MGMT_SETTING_BREDR; 411 settings |= MGMT_SETTING_BREDR;
293 412
294 if (hdev->host_features[0] & LMP_HOST_LE) 413 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
295 settings |= MGMT_SETTING_LE; 414 settings |= MGMT_SETTING_LE;
296 415
297 if (test_bit(HCI_AUTH, &hdev->flags)) 416 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
298 settings |= MGMT_SETTING_LINK_SECURITY; 417 settings |= MGMT_SETTING_LINK_SECURITY;
299 418
300 if (hdev->ssp_mode > 0) 419 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
301 settings |= MGMT_SETTING_SSP; 420 settings |= MGMT_SETTING_SSP;
302 421
422 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_HS;
424
303 return settings; 425 return settings;
304} 426}
305 427
306#define EIR_FLAGS 0x01 /* flags */
307#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
308#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
309#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
310#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
311#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
312#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
313#define EIR_NAME_SHORT 0x08 /* shortened local name */
314#define EIR_NAME_COMPLETE 0x09 /* complete local name */
315#define EIR_TX_POWER 0x0A /* transmit power level */
316#define EIR_DEVICE_ID 0x10 /* device ID */
317
318#define PNP_INFO_SVCLASS_ID 0x1200 428#define PNP_INFO_SVCLASS_ID 0x1200
319 429
320static u8 bluetooth_base_uuid[] = { 430static u8 bluetooth_base_uuid[] = {
@@ -425,13 +535,16 @@ static int update_eir(struct hci_dev *hdev)
425{ 535{
426 struct hci_cp_write_eir cp; 536 struct hci_cp_write_eir cp;
427 537
538 if (!hdev_is_powered(hdev))
539 return 0;
540
428 if (!(hdev->features[6] & LMP_EXT_INQ)) 541 if (!(hdev->features[6] & LMP_EXT_INQ))
429 return 0; 542 return 0;
430 543
431 if (hdev->ssp_mode == 0) 544 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 return 0; 545 return 0;
433 546
434 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) 547 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
435 return 0; 548 return 0;
436 549
437 memset(&cp, 0, sizeof(cp)); 550 memset(&cp, 0, sizeof(cp));
@@ -460,10 +573,14 @@ static u8 get_service_classes(struct hci_dev *hdev)
460static int update_class(struct hci_dev *hdev) 573static int update_class(struct hci_dev *hdev)
461{ 574{
462 u8 cod[3]; 575 u8 cod[3];
576 int err;
463 577
464 BT_DBG("%s", hdev->name); 578 BT_DBG("%s", hdev->name);
465 579
466 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags)) 580 if (!hdev_is_powered(hdev))
581 return 0;
582
583 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
467 return 0; 584 return 0;
468 585
469 cod[0] = hdev->minor_class; 586 cod[0] = hdev->minor_class;
@@ -473,15 +590,19 @@ static int update_class(struct hci_dev *hdev)
473 if (memcmp(cod, hdev->dev_class, 3) == 0) 590 if (memcmp(cod, hdev->dev_class, 3) == 0)
474 return 0; 591 return 0;
475 592
476 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); 593 err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
594 if (err == 0)
595 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
596
597 return err;
477} 598}
478 599
479static void service_cache_off(struct work_struct *work) 600static void service_cache_off(struct work_struct *work)
480{ 601{
481 struct hci_dev *hdev = container_of(work, struct hci_dev, 602 struct hci_dev *hdev = container_of(work, struct hci_dev,
482 service_cache.work); 603 service_cache.work);
483 604
484 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) 605 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
485 return; 606 return;
486 607
487 hci_dev_lock(hdev); 608 hci_dev_lock(hdev);
@@ -492,36 +613,30 @@ static void service_cache_off(struct work_struct *work)
492 hci_dev_unlock(hdev); 613 hci_dev_unlock(hdev);
493} 614}
494 615
495static void mgmt_init_hdev(struct hci_dev *hdev) 616static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
496{ 617{
497 if (!test_and_set_bit(HCI_MGMT, &hdev->flags)) 618 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
498 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); 619 return;
620
621 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
499 622
500 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags)) 623 /* Non-mgmt controlled devices get this bit set
501 schedule_delayed_work(&hdev->service_cache, 624 * implicitly so that pairing works for them, however
502 msecs_to_jiffies(SERVICE_CACHE_TIMEOUT)); 625 * for mgmt we require user-space to explicitly enable
626 * it
627 */
628 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
503} 629}
504 630
505static int read_controller_info(struct sock *sk, u16 index) 631static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
632 void *data, u16 data_len)
506{ 633{
507 struct mgmt_rp_read_info rp; 634 struct mgmt_rp_read_info rp;
508 struct hci_dev *hdev;
509
510 BT_DBG("sock %p hci%u", sk, index);
511
512 hdev = hci_dev_get(index);
513 if (!hdev)
514 return cmd_status(sk, index, MGMT_OP_READ_INFO,
515 MGMT_STATUS_INVALID_PARAMS);
516 635
517 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) 636 BT_DBG("sock %p %s", sk, hdev->name);
518 cancel_delayed_work_sync(&hdev->power_off);
519 637
520 hci_dev_lock(hdev); 638 hci_dev_lock(hdev);
521 639
522 if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
523 mgmt_init_hdev(hdev);
524
525 memset(&rp, 0, sizeof(rp)); 640 memset(&rp, 0, sizeof(rp));
526 641
527 bacpy(&rp.bdaddr, &hdev->bdaddr); 642 bacpy(&rp.bdaddr, &hdev->bdaddr);
@@ -536,11 +651,12 @@ static int read_controller_info(struct sock *sk, u16 index)
536 memcpy(rp.dev_class, hdev->dev_class, 3); 651 memcpy(rp.dev_class, hdev->dev_class, 3);
537 652
538 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 653 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
654 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
539 655
540 hci_dev_unlock(hdev); 656 hci_dev_unlock(hdev);
541 hci_dev_put(hdev);
542 657
543 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 658 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
659 sizeof(rp));
544} 660}
545 661
546static void mgmt_pending_free(struct pending_cmd *cmd) 662static void mgmt_pending_free(struct pending_cmd *cmd)
@@ -551,8 +667,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
551} 667}
552 668
553static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 669static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
554 struct hci_dev *hdev, 670 struct hci_dev *hdev, void *data,
555 void *data, u16 len) 671 u16 len)
556{ 672{
557 struct pending_cmd *cmd; 673 struct pending_cmd *cmd;
558 674
@@ -581,8 +697,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
581} 697}
582 698
583static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 699static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
584 void (*cb)(struct pending_cmd *cmd, void *data), 700 void (*cb)(struct pending_cmd *cmd, void *data),
585 void *data) 701 void *data)
586{ 702{
587 struct list_head *p, *n; 703 struct list_head *p, *n;
588 704
@@ -620,40 +736,39 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
620{ 736{
621 __le32 settings = cpu_to_le32(get_current_settings(hdev)); 737 __le32 settings = cpu_to_le32(get_current_settings(hdev));
622 738
623 return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings)); 739 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
740 sizeof(settings));
624} 741}
625 742
626static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) 743static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
744 u16 len)
627{ 745{
628 struct mgmt_mode *cp; 746 struct mgmt_mode *cp = data;
629 struct hci_dev *hdev;
630 struct pending_cmd *cmd; 747 struct pending_cmd *cmd;
631 int err, up; 748 int err;
632
633 cp = (void *) data;
634 749
635 BT_DBG("request for hci%u", index); 750 BT_DBG("request for %s", hdev->name);
636 751
637 if (len != sizeof(*cp)) 752 hci_dev_lock(hdev);
638 return cmd_status(sk, index, MGMT_OP_SET_POWERED,
639 MGMT_STATUS_INVALID_PARAMS);
640 753
641 hdev = hci_dev_get(index); 754 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
642 if (!hdev) 755 cancel_delayed_work(&hdev->power_off);
643 return cmd_status(sk, index, MGMT_OP_SET_POWERED,
644 MGMT_STATUS_INVALID_PARAMS);
645 756
646 hci_dev_lock(hdev); 757 if (cp->val) {
758 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
759 mgmt_powered(hdev, 1);
760 goto failed;
761 }
762 }
647 763
648 up = test_bit(HCI_UP, &hdev->flags); 764 if (!!cp->val == hdev_is_powered(hdev)) {
649 if ((cp->val && up) || (!cp->val && !up)) {
650 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); 765 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
651 goto failed; 766 goto failed;
652 } 767 }
653 768
654 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { 769 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
655 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, 770 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
656 MGMT_STATUS_BUSY); 771 MGMT_STATUS_BUSY);
657 goto failed; 772 goto failed;
658 } 773 }
659 774
@@ -672,49 +787,115 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
672 787
673failed: 788failed:
674 hci_dev_unlock(hdev); 789 hci_dev_unlock(hdev);
675 hci_dev_put(hdev);
676 return err; 790 return err;
677} 791}
678 792
679static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, 793static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
680 u16 len) 794 struct sock *skip_sk)
681{ 795{
682 struct mgmt_cp_set_discoverable *cp; 796 struct sk_buff *skb;
683 struct hci_dev *hdev; 797 struct mgmt_hdr *hdr;
798
799 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
800 if (!skb)
801 return -ENOMEM;
802
803 hdr = (void *) skb_put(skb, sizeof(*hdr));
804 hdr->opcode = cpu_to_le16(event);
805 if (hdev)
806 hdr->index = cpu_to_le16(hdev->id);
807 else
808 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
809 hdr->len = cpu_to_le16(data_len);
810
811 if (data)
812 memcpy(skb_put(skb, data_len), data, data_len);
813
814 /* Time stamp */
815 __net_timestamp(skb);
816
817 hci_send_to_control(skb, skip_sk);
818 kfree_skb(skb);
819
820 return 0;
821}
822
823static int new_settings(struct hci_dev *hdev, struct sock *skip)
824{
825 __le32 ev;
826
827 ev = cpu_to_le32(get_current_settings(hdev));
828
829 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
830}
831
832static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
833 u16 len)
834{
835 struct mgmt_cp_set_discoverable *cp = data;
684 struct pending_cmd *cmd; 836 struct pending_cmd *cmd;
837 u16 timeout;
685 u8 scan; 838 u8 scan;
686 int err; 839 int err;
687 840
688 cp = (void *) data; 841 BT_DBG("request for %s", hdev->name);
689
690 BT_DBG("request for hci%u", index);
691 842
692 if (len != sizeof(*cp)) 843 timeout = get_unaligned_le16(&cp->timeout);
693 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 844 if (!cp->val && timeout > 0)
694 MGMT_STATUS_INVALID_PARAMS); 845 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
695 846 MGMT_STATUS_INVALID_PARAMS);
696 hdev = hci_dev_get(index);
697 if (!hdev)
698 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
699 MGMT_STATUS_INVALID_PARAMS);
700 847
701 hci_dev_lock(hdev); 848 hci_dev_lock(hdev);
702 849
703 if (!test_bit(HCI_UP, &hdev->flags)) { 850 if (!hdev_is_powered(hdev) && timeout > 0) {
704 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 851 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
705 MGMT_STATUS_NOT_POWERED); 852 MGMT_STATUS_NOT_POWERED);
706 goto failed; 853 goto failed;
707 } 854 }
708 855
709 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 856 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
710 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 857 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
711 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, 858 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
712 MGMT_STATUS_BUSY); 859 MGMT_STATUS_BUSY);
713 goto failed; 860 goto failed;
714 } 861 }
715 862
716 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && 863 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
717 test_bit(HCI_PSCAN, &hdev->flags)) { 864 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
865 MGMT_STATUS_REJECTED);
866 goto failed;
867 }
868
869 if (!hdev_is_powered(hdev)) {
870 bool changed = false;
871
872 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
873 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
874 changed = true;
875 }
876
877 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
878 if (err < 0)
879 goto failed;
880
881 if (changed)
882 err = new_settings(hdev, sk);
883
884 goto failed;
885 }
886
887 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
888 if (hdev->discov_timeout > 0) {
889 cancel_delayed_work(&hdev->discov_off);
890 hdev->discov_timeout = 0;
891 }
892
893 if (cp->val && timeout > 0) {
894 hdev->discov_timeout = timeout;
895 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
896 msecs_to_jiffies(hdev->discov_timeout * 1000));
897 }
898
718 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); 899 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
719 goto failed; 900 goto failed;
720 } 901 }
@@ -737,53 +918,56 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
737 mgmt_pending_remove(cmd); 918 mgmt_pending_remove(cmd);
738 919
739 if (cp->val) 920 if (cp->val)
740 hdev->discov_timeout = get_unaligned_le16(&cp->timeout); 921 hdev->discov_timeout = timeout;
741 922
742failed: 923failed:
743 hci_dev_unlock(hdev); 924 hci_dev_unlock(hdev);
744 hci_dev_put(hdev);
745
746 return err; 925 return err;
747} 926}
748 927
749static int set_connectable(struct sock *sk, u16 index, unsigned char *data, 928static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
750 u16 len) 929 u16 len)
751{ 930{
752 struct mgmt_mode *cp; 931 struct mgmt_mode *cp = data;
753 struct hci_dev *hdev;
754 struct pending_cmd *cmd; 932 struct pending_cmd *cmd;
755 u8 scan; 933 u8 scan;
756 int err; 934 int err;
757 935
758 cp = (void *) data; 936 BT_DBG("request for %s", hdev->name);
759 937
760 BT_DBG("request for hci%u", index); 938 hci_dev_lock(hdev);
761 939
762 if (len != sizeof(*cp)) 940 if (!hdev_is_powered(hdev)) {
763 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, 941 bool changed = false;
764 MGMT_STATUS_INVALID_PARAMS);
765 942
766 hdev = hci_dev_get(index); 943 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
767 if (!hdev) 944 changed = true;
768 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
769 MGMT_STATUS_INVALID_PARAMS);
770 945
771 hci_dev_lock(hdev); 946 if (cp->val) {
947 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
948 } else {
949 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
950 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
951 }
952
953 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
954 if (err < 0)
955 goto failed;
956
957 if (changed)
958 err = new_settings(hdev, sk);
772 959
773 if (!test_bit(HCI_UP, &hdev->flags)) {
774 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
775 MGMT_STATUS_NOT_POWERED);
776 goto failed; 960 goto failed;
777 } 961 }
778 962
779 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 963 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
780 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 964 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
781 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, 965 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
782 MGMT_STATUS_BUSY); 966 MGMT_STATUS_BUSY);
783 goto failed; 967 goto failed;
784 } 968 }
785 969
786 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { 970 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
787 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); 971 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
788 goto failed; 972 goto failed;
789 } 973 }
@@ -794,116 +978,282 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
794 goto failed; 978 goto failed;
795 } 979 }
796 980
797 if (cp->val) 981 if (cp->val) {
798 scan = SCAN_PAGE; 982 scan = SCAN_PAGE;
799 else 983 } else {
800 scan = 0; 984 scan = 0;
801 985
986 if (test_bit(HCI_ISCAN, &hdev->flags) &&
987 hdev->discov_timeout > 0)
988 cancel_delayed_work(&hdev->discov_off);
989 }
990
802 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 991 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
803 if (err < 0) 992 if (err < 0)
804 mgmt_pending_remove(cmd); 993 mgmt_pending_remove(cmd);
805 994
806failed: 995failed:
807 hci_dev_unlock(hdev); 996 hci_dev_unlock(hdev);
808 hci_dev_put(hdev);
809
810 return err; 997 return err;
811} 998}
812 999
813static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, 1000static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
814 u16 data_len, struct sock *skip_sk) 1001 u16 len)
815{ 1002{
816 struct sk_buff *skb; 1003 struct mgmt_mode *cp = data;
817 struct mgmt_hdr *hdr; 1004 int err;
818 1005
819 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 1006 BT_DBG("request for %s", hdev->name);
820 if (!skb)
821 return -ENOMEM;
822 1007
823 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL; 1008 hci_dev_lock(hdev);
824 1009
825 hdr = (void *) skb_put(skb, sizeof(*hdr)); 1010 if (cp->val)
826 hdr->opcode = cpu_to_le16(event); 1011 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
827 if (hdev)
828 hdr->index = cpu_to_le16(hdev->id);
829 else 1012 else
830 hdr->index = cpu_to_le16(MGMT_INDEX_NONE); 1013 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
831 hdr->len = cpu_to_le16(data_len);
832 1014
833 if (data) 1015 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
834 memcpy(skb_put(skb, data_len), data, data_len); 1016 if (err < 0)
1017 goto failed;
835 1018
836 hci_send_to_sock(NULL, skb, skip_sk); 1019 err = new_settings(hdev, sk);
837 kfree_skb(skb);
838 1020
839 return 0; 1021failed:
1022 hci_dev_unlock(hdev);
1023 return err;
840} 1024}
841 1025
842static int set_pairable(struct sock *sk, u16 index, unsigned char *data, 1026static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
843 u16 len) 1027 u16 len)
844{ 1028{
845 struct mgmt_mode *cp; 1029 struct mgmt_mode *cp = data;
846 struct hci_dev *hdev; 1030 struct pending_cmd *cmd;
847 __le32 ev; 1031 u8 val;
848 int err; 1032 int err;
849 1033
850 cp = (void *) data; 1034 BT_DBG("request for %s", hdev->name);
851 1035
852 BT_DBG("request for hci%u", index); 1036 hci_dev_lock(hdev);
853 1037
854 if (len != sizeof(*cp)) 1038 if (!hdev_is_powered(hdev)) {
855 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, 1039 bool changed = false;
856 MGMT_STATUS_INVALID_PARAMS);
857 1040
858 hdev = hci_dev_get(index); 1041 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
859 if (!hdev) 1042 &hdev->dev_flags)) {
860 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, 1043 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
861 MGMT_STATUS_INVALID_PARAMS); 1044 changed = true;
1045 }
862 1046
863 hci_dev_lock(hdev); 1047 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1048 if (err < 0)
1049 goto failed;
864 1050
865 if (cp->val) 1051 if (changed)
866 set_bit(HCI_PAIRABLE, &hdev->flags); 1052 err = new_settings(hdev, sk);
867 else
868 clear_bit(HCI_PAIRABLE, &hdev->flags);
869 1053
870 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
871 if (err < 0)
872 goto failed; 1054 goto failed;
1055 }
873 1056
874 ev = cpu_to_le32(get_current_settings(hdev)); 1057 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1058 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1059 MGMT_STATUS_BUSY);
1060 goto failed;
1061 }
1062
1063 val = !!cp->val;
1064
1065 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1066 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1067 goto failed;
1068 }
1069
1070 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1071 if (!cmd) {
1072 err = -ENOMEM;
1073 goto failed;
1074 }
875 1075
876 err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk); 1076 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1077 if (err < 0) {
1078 mgmt_pending_remove(cmd);
1079 goto failed;
1080 }
877 1081
878failed: 1082failed:
879 hci_dev_unlock(hdev); 1083 hci_dev_unlock(hdev);
880 hci_dev_put(hdev); 1084 return err;
1085}
1086
1087static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1088{
1089 struct mgmt_mode *cp = data;
1090 struct pending_cmd *cmd;
1091 u8 val;
1092 int err;
1093
1094 BT_DBG("request for %s", hdev->name);
1095
1096 hci_dev_lock(hdev);
1097
1098 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1099 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1100 MGMT_STATUS_NOT_SUPPORTED);
1101 goto failed;
1102 }
1103
1104 val = !!cp->val;
1105
1106 if (!hdev_is_powered(hdev)) {
1107 bool changed = false;
1108
1109 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1110 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1111 changed = true;
1112 }
1113
1114 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1115 if (err < 0)
1116 goto failed;
1117
1118 if (changed)
1119 err = new_settings(hdev, sk);
881 1120
1121 goto failed;
1122 }
1123
1124 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1125 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1126 MGMT_STATUS_BUSY);
1127 goto failed;
1128 }
1129
1130 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1131 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1132 goto failed;
1133 }
1134
1135 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1136 if (!cmd) {
1137 err = -ENOMEM;
1138 goto failed;
1139 }
1140
1141 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1142 if (err < 0) {
1143 mgmt_pending_remove(cmd);
1144 goto failed;
1145 }
1146
1147failed:
1148 hci_dev_unlock(hdev);
882 return err; 1149 return err;
883} 1150}
884 1151
885static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 1152static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
886{ 1153{
887 struct mgmt_cp_add_uuid *cp; 1154 struct mgmt_mode *cp = data;
888 struct hci_dev *hdev; 1155
889 struct bt_uuid *uuid; 1156 BT_DBG("request for %s", hdev->name);
1157
1158 if (!enable_hs)
1159 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1160 MGMT_STATUS_NOT_SUPPORTED);
1161
1162 if (cp->val)
1163 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1164 else
1165 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1166
1167 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1168}
1169
1170static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1171{
1172 struct mgmt_mode *cp = data;
1173 struct hci_cp_write_le_host_supported hci_cp;
1174 struct pending_cmd *cmd;
890 int err; 1175 int err;
1176 u8 val, enabled;
1177
1178 BT_DBG("request for %s", hdev->name);
891 1179
892 cp = (void *) data; 1180 hci_dev_lock(hdev);
893 1181
894 BT_DBG("request for hci%u", index); 1182 if (!enable_le || !(hdev->features[4] & LMP_LE)) {
1183 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1184 MGMT_STATUS_NOT_SUPPORTED);
1185 goto unlock;
1186 }
895 1187
896 if (len != sizeof(*cp)) 1188 val = !!cp->val;
897 return cmd_status(sk, index, MGMT_OP_ADD_UUID, 1189 enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
898 MGMT_STATUS_INVALID_PARAMS); 1190
1191 if (!hdev_is_powered(hdev) || val == enabled) {
1192 bool changed = false;
1193
1194 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1195 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1196 changed = true;
1197 }
899 1198
900 hdev = hci_dev_get(index); 1199 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
901 if (!hdev) 1200 if (err < 0)
902 return cmd_status(sk, index, MGMT_OP_ADD_UUID, 1201 goto unlock;
903 MGMT_STATUS_INVALID_PARAMS); 1202
1203 if (changed)
1204 err = new_settings(hdev, sk);
1205
1206 goto unlock;
1207 }
1208
1209 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1210 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1211 MGMT_STATUS_BUSY);
1212 goto unlock;
1213 }
1214
1215 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1216 if (!cmd) {
1217 err = -ENOMEM;
1218 goto unlock;
1219 }
1220
1221 memset(&hci_cp, 0, sizeof(hci_cp));
1222
1223 if (val) {
1224 hci_cp.le = val;
1225 hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
1226 }
1227
1228 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1229 &hci_cp);
1230 if (err < 0) {
1231 mgmt_pending_remove(cmd);
1232 goto unlock;
1233 }
1234
1235unlock:
1236 hci_dev_unlock(hdev);
1237 return err;
1238}
1239
1240static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1241{
1242 struct mgmt_cp_add_uuid *cp = data;
1243 struct pending_cmd *cmd;
1244 struct bt_uuid *uuid;
1245 int err;
1246
1247 BT_DBG("request for %s", hdev->name);
904 1248
905 hci_dev_lock(hdev); 1249 hci_dev_lock(hdev);
906 1250
1251 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1252 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1253 MGMT_STATUS_BUSY);
1254 goto failed;
1255 }
1256
907 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 1257 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
908 if (!uuid) { 1258 if (!uuid) {
909 err = -ENOMEM; 1259 err = -ENOMEM;
@@ -923,41 +1273,65 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
923 if (err < 0) 1273 if (err < 0)
924 goto failed; 1274 goto failed;
925 1275
926 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 1276 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1277 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1278 hdev->dev_class, 3);
1279 goto failed;
1280 }
1281
1282 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1283 if (!cmd) {
1284 err = -ENOMEM;
1285 goto failed;
1286 }
927 1287
928failed: 1288failed:
929 hci_dev_unlock(hdev); 1289 hci_dev_unlock(hdev);
930 hci_dev_put(hdev);
931
932 return err; 1290 return err;
933} 1291}
934 1292
935static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 1293static bool enable_service_cache(struct hci_dev *hdev)
936{ 1294{
937 struct list_head *p, *n; 1295 if (!hdev_is_powered(hdev))
938 struct mgmt_cp_remove_uuid *cp; 1296 return false;
939 struct hci_dev *hdev;
940 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
941 int err, found;
942 1297
943 cp = (void *) data; 1298 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1299 schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
1300 return true;
1301 }
944 1302
945 BT_DBG("request for hci%u", index); 1303 return false;
1304}
946 1305
947 if (len != sizeof(*cp)) 1306static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
948 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, 1307 u16 len)
949 MGMT_STATUS_INVALID_PARAMS); 1308{
1309 struct mgmt_cp_remove_uuid *cp = data;
1310 struct pending_cmd *cmd;
1311 struct list_head *p, *n;
1312 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1313 int err, found;
950 1314
951 hdev = hci_dev_get(index); 1315 BT_DBG("request for %s", hdev->name);
952 if (!hdev)
953 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
954 MGMT_STATUS_INVALID_PARAMS);
955 1316
956 hci_dev_lock(hdev); 1317 hci_dev_lock(hdev);
957 1318
1319 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1320 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1321 MGMT_STATUS_BUSY);
1322 goto unlock;
1323 }
1324
958 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 1325 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
959 err = hci_uuids_clear(hdev); 1326 err = hci_uuids_clear(hdev);
960 goto unlock; 1327
1328 if (enable_service_cache(hdev)) {
1329 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1330 0, hdev->dev_class, 3);
1331 goto unlock;
1332 }
1333
1334 goto update_class;
961 } 1335 }
962 1336
963 found = 0; 1337 found = 0;
@@ -973,11 +1347,12 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
973 } 1347 }
974 1348
975 if (found == 0) { 1349 if (found == 0) {
976 err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, 1350 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
977 MGMT_STATUS_INVALID_PARAMS); 1351 MGMT_STATUS_INVALID_PARAMS);
978 goto unlock; 1352 goto unlock;
979 } 1353 }
980 1354
1355update_class:
981 err = update_class(hdev); 1356 err = update_class(hdev);
982 if (err < 0) 1357 if (err < 0)
983 goto unlock; 1358 goto unlock;
@@ -986,41 +1361,50 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
986 if (err < 0) 1361 if (err < 0)
987 goto unlock; 1362 goto unlock;
988 1363
989 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 1364 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1365 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1366 hdev->dev_class, 3);
1367 goto unlock;
1368 }
1369
1370 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1371 if (!cmd) {
1372 err = -ENOMEM;
1373 goto unlock;
1374 }
990 1375
991unlock: 1376unlock:
992 hci_dev_unlock(hdev); 1377 hci_dev_unlock(hdev);
993 hci_dev_put(hdev);
994
995 return err; 1378 return err;
996} 1379}
997 1380
998static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, 1381static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
999 u16 len) 1382 u16 len)
1000{ 1383{
1001 struct hci_dev *hdev; 1384 struct mgmt_cp_set_dev_class *cp = data;
1002 struct mgmt_cp_set_dev_class *cp; 1385 struct pending_cmd *cmd;
1003 int err; 1386 int err;
1004 1387
1005 cp = (void *) data; 1388 BT_DBG("request for %s", hdev->name);
1006
1007 BT_DBG("request for hci%u", index);
1008
1009 if (len != sizeof(*cp))
1010 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
1011 MGMT_STATUS_INVALID_PARAMS);
1012
1013 hdev = hci_dev_get(index);
1014 if (!hdev)
1015 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
1016 MGMT_STATUS_INVALID_PARAMS);
1017 1389
1018 hci_dev_lock(hdev); 1390 hci_dev_lock(hdev);
1019 1391
1392 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1393 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1394 MGMT_STATUS_BUSY);
1395 goto unlock;
1396 }
1397
1020 hdev->major_class = cp->major; 1398 hdev->major_class = cp->major;
1021 hdev->minor_class = cp->minor; 1399 hdev->minor_class = cp->minor;
1022 1400
1023 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) { 1401 if (!hdev_is_powered(hdev)) {
1402 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1403 hdev->dev_class, 3);
1404 goto unlock;
1405 }
1406
1407 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1024 hci_dev_unlock(hdev); 1408 hci_dev_unlock(hdev);
1025 cancel_delayed_work_sync(&hdev->service_cache); 1409 cancel_delayed_work_sync(&hdev->service_cache);
1026 hci_dev_lock(hdev); 1410 hci_dev_lock(hdev);
@@ -1028,30 +1412,33 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
1028 } 1412 }
1029 1413
1030 err = update_class(hdev); 1414 err = update_class(hdev);
1415 if (err < 0)
1416 goto unlock;
1031 1417
1032 if (err == 0) 1418 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1033 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 1419 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1420 hdev->dev_class, 3);
1421 goto unlock;
1422 }
1034 1423
1035 hci_dev_unlock(hdev); 1424 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1036 hci_dev_put(hdev); 1425 if (!cmd) {
1426 err = -ENOMEM;
1427 goto unlock;
1428 }
1037 1429
1430unlock:
1431 hci_dev_unlock(hdev);
1038 return err; 1432 return err;
1039} 1433}
1040 1434
1041static int load_link_keys(struct sock *sk, u16 index, unsigned char *data, 1435static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1042 u16 len) 1436 u16 len)
1043{ 1437{
1044 struct hci_dev *hdev; 1438 struct mgmt_cp_load_link_keys *cp = data;
1045 struct mgmt_cp_load_link_keys *cp;
1046 u16 key_count, expected_len; 1439 u16 key_count, expected_len;
1047 int i; 1440 int i;
1048 1441
1049 cp = (void *) data;
1050
1051 if (len < sizeof(*cp))
1052 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
1053 MGMT_STATUS_INVALID_PARAMS);
1054
1055 key_count = get_unaligned_le16(&cp->key_count); 1442 key_count = get_unaligned_le16(&cp->key_count);
1056 1443
1057 expected_len = sizeof(*cp) + key_count * 1444 expected_len = sizeof(*cp) + key_count *
@@ -1059,92 +1446,103 @@ static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
1059 if (expected_len != len) { 1446 if (expected_len != len) {
1060 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1447 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1061 len, expected_len); 1448 len, expected_len);
1062 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, 1449 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1063 MGMT_STATUS_INVALID_PARAMS); 1450 MGMT_STATUS_INVALID_PARAMS);
1064 } 1451 }
1065 1452
1066 hdev = hci_dev_get(index); 1453 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1067 if (!hdev)
1068 return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
1069 MGMT_STATUS_INVALID_PARAMS);
1070
1071 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
1072 key_count); 1454 key_count);
1073 1455
1074 hci_dev_lock(hdev); 1456 hci_dev_lock(hdev);
1075 1457
1076 hci_link_keys_clear(hdev); 1458 hci_link_keys_clear(hdev);
1077 1459
1078 set_bit(HCI_LINK_KEYS, &hdev->flags); 1460 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1079 1461
1080 if (cp->debug_keys) 1462 if (cp->debug_keys)
1081 set_bit(HCI_DEBUG_KEYS, &hdev->flags); 1463 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1082 else 1464 else
1083 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 1465 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1084 1466
1085 for (i = 0; i < key_count; i++) { 1467 for (i = 0; i < key_count; i++) {
1086 struct mgmt_link_key_info *key = &cp->keys[i]; 1468 struct mgmt_link_key_info *key = &cp->keys[i];
1087 1469
1088 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 1470 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1089 key->pin_len); 1471 key->type, key->pin_len);
1090 } 1472 }
1091 1473
1092 cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0); 1474 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1093 1475
1094 hci_dev_unlock(hdev); 1476 hci_dev_unlock(hdev);
1095 hci_dev_put(hdev);
1096 1477
1097 return 0; 1478 return 0;
1098} 1479}
1099 1480
1100static int remove_keys(struct sock *sk, u16 index, unsigned char *data, 1481static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1101 u16 len) 1482 u8 addr_type, struct sock *skip_sk)
1102{ 1483{
1103 struct hci_dev *hdev; 1484 struct mgmt_ev_device_unpaired ev;
1104 struct mgmt_cp_remove_keys *cp; 1485
1105 struct mgmt_rp_remove_keys rp; 1486 bacpy(&ev.addr.bdaddr, bdaddr);
1487 ev.addr.type = addr_type;
1488
1489 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1490 skip_sk);
1491}
1492
1493static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1494 u16 len)
1495{
1496 struct mgmt_cp_unpair_device *cp = data;
1497 struct mgmt_rp_unpair_device rp;
1106 struct hci_cp_disconnect dc; 1498 struct hci_cp_disconnect dc;
1107 struct pending_cmd *cmd; 1499 struct pending_cmd *cmd;
1108 struct hci_conn *conn; 1500 struct hci_conn *conn;
1109 int err; 1501 int err;
1110 1502
1111 cp = (void *) data;
1112
1113 if (len != sizeof(*cp))
1114 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
1115 MGMT_STATUS_INVALID_PARAMS);
1116
1117 hdev = hci_dev_get(index);
1118 if (!hdev)
1119 return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
1120 MGMT_STATUS_INVALID_PARAMS);
1121
1122 hci_dev_lock(hdev); 1503 hci_dev_lock(hdev);
1123 1504
1124 memset(&rp, 0, sizeof(rp)); 1505 memset(&rp, 0, sizeof(rp));
1125 bacpy(&rp.bdaddr, &cp->bdaddr); 1506 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1126 rp.status = MGMT_STATUS_FAILED; 1507 rp.addr.type = cp->addr.type;
1127 1508
1128 err = hci_remove_link_key(hdev, &cp->bdaddr); 1509 if (!hdev_is_powered(hdev)) {
1129 if (err < 0) { 1510 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1130 rp.status = MGMT_STATUS_NOT_PAIRED; 1511 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1131 goto unlock; 1512 goto unlock;
1132 } 1513 }
1133 1514
1134 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) { 1515 if (cp->addr.type == MGMT_ADDR_BREDR)
1135 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, 1516 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1136 sizeof(rp)); 1517 else
1518 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1519
1520 if (err < 0) {
1521 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1522 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1137 goto unlock; 1523 goto unlock;
1138 } 1524 }
1139 1525
1140 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1526 if (cp->disconnect) {
1527 if (cp->addr.type == MGMT_ADDR_BREDR)
1528 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1529 &cp->addr.bdaddr);
1530 else
1531 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1532 &cp->addr.bdaddr);
1533 } else {
1534 conn = NULL;
1535 }
1536
1141 if (!conn) { 1537 if (!conn) {
1142 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp, 1538 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1143 sizeof(rp)); 1539 &rp, sizeof(rp));
1540 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1144 goto unlock; 1541 goto unlock;
1145 } 1542 }
1146 1543
1147 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp)); 1544 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1545 sizeof(*cp));
1148 if (!cmd) { 1546 if (!cmd) {
1149 err = -ENOMEM; 1547 err = -ENOMEM;
1150 goto unlock; 1548 goto unlock;
@@ -1157,19 +1555,14 @@ static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
1157 mgmt_pending_remove(cmd); 1555 mgmt_pending_remove(cmd);
1158 1556
1159unlock: 1557unlock:
1160 if (err < 0)
1161 err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
1162 sizeof(rp));
1163 hci_dev_unlock(hdev); 1558 hci_dev_unlock(hdev);
1164 hci_dev_put(hdev);
1165
1166 return err; 1559 return err;
1167} 1560}
1168 1561
1169static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) 1562static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1563 u16 len)
1170{ 1564{
1171 struct hci_dev *hdev; 1565 struct mgmt_cp_disconnect *cp = data;
1172 struct mgmt_cp_disconnect *cp;
1173 struct hci_cp_disconnect dc; 1566 struct hci_cp_disconnect dc;
1174 struct pending_cmd *cmd; 1567 struct pending_cmd *cmd;
1175 struct hci_conn *conn; 1568 struct hci_conn *conn;
@@ -1177,38 +1570,28 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1177 1570
1178 BT_DBG(""); 1571 BT_DBG("");
1179 1572
1180 cp = (void *) data;
1181
1182 if (len != sizeof(*cp))
1183 return cmd_status(sk, index, MGMT_OP_DISCONNECT,
1184 MGMT_STATUS_INVALID_PARAMS);
1185
1186 hdev = hci_dev_get(index);
1187 if (!hdev)
1188 return cmd_status(sk, index, MGMT_OP_DISCONNECT,
1189 MGMT_STATUS_INVALID_PARAMS);
1190
1191 hci_dev_lock(hdev); 1573 hci_dev_lock(hdev);
1192 1574
1193 if (!test_bit(HCI_UP, &hdev->flags)) { 1575 if (!test_bit(HCI_UP, &hdev->flags)) {
1194 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1576 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1195 MGMT_STATUS_NOT_POWERED); 1577 MGMT_STATUS_NOT_POWERED);
1196 goto failed; 1578 goto failed;
1197 } 1579 }
1198 1580
1199 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 1581 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1200 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1582 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1201 MGMT_STATUS_BUSY); 1583 MGMT_STATUS_BUSY);
1202 goto failed; 1584 goto failed;
1203 } 1585 }
1204 1586
1205 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1587 if (cp->addr.type == MGMT_ADDR_BREDR)
1206 if (!conn) 1588 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1207 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr); 1589 else
1590 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1208 1591
1209 if (!conn) { 1592 if (!conn) {
1210 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, 1593 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1211 MGMT_STATUS_NOT_CONNECTED); 1594 MGMT_STATUS_NOT_CONNECTED);
1212 goto failed; 1595 goto failed;
1213 } 1596 }
1214 1597
@@ -1227,8 +1610,6 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1227 1610
1228failed: 1611failed:
1229 hci_dev_unlock(hdev); 1612 hci_dev_unlock(hdev);
1230 hci_dev_put(hdev);
1231
1232 return err; 1613 return err;
1233} 1614}
1234 1615
@@ -1251,41 +1632,42 @@ static u8 link_to_mgmt(u8 link_type, u8 addr_type)
1251 } 1632 }
1252} 1633}
1253 1634
1254static int get_connections(struct sock *sk, u16 index) 1635static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1636 u16 data_len)
1255{ 1637{
1256 struct mgmt_rp_get_connections *rp; 1638 struct mgmt_rp_get_connections *rp;
1257 struct hci_dev *hdev;
1258 struct hci_conn *c; 1639 struct hci_conn *c;
1259 struct list_head *p;
1260 size_t rp_len; 1640 size_t rp_len;
1261 u16 count; 1641 int err;
1262 int i, err; 1642 u16 i;
1263 1643
1264 BT_DBG(""); 1644 BT_DBG("");
1265 1645
1266 hdev = hci_dev_get(index);
1267 if (!hdev)
1268 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
1269 MGMT_STATUS_INVALID_PARAMS);
1270
1271 hci_dev_lock(hdev); 1646 hci_dev_lock(hdev);
1272 1647
1273 count = 0; 1648 if (!hdev_is_powered(hdev)) {
1274 list_for_each(p, &hdev->conn_hash.list) { 1649 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1275 count++; 1650 MGMT_STATUS_NOT_POWERED);
1651 goto unlock;
1276 } 1652 }
1277 1653
1278 rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info)); 1654 i = 0;
1655 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1656 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1657 i++;
1658 }
1659
1660 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1279 rp = kmalloc(rp_len, GFP_ATOMIC); 1661 rp = kmalloc(rp_len, GFP_ATOMIC);
1280 if (!rp) { 1662 if (!rp) {
1281 err = -ENOMEM; 1663 err = -ENOMEM;
1282 goto unlock; 1664 goto unlock;
1283 } 1665 }
1284 1666
1285 put_unaligned_le16(count, &rp->conn_count);
1286
1287 i = 0; 1667 i = 0;
1288 list_for_each_entry(c, &hdev->conn_hash.list, list) { 1668 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1669 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1670 continue;
1289 bacpy(&rp->addr[i].bdaddr, &c->dst); 1671 bacpy(&rp->addr[i].bdaddr, &c->dst);
1290 rp->addr[i].type = link_to_mgmt(c->type, c->dst_type); 1672 rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
1291 if (rp->addr[i].type == MGMT_ADDR_INVALID) 1673 if (rp->addr[i].type == MGMT_ADDR_INVALID)
@@ -1293,85 +1675,77 @@ static int get_connections(struct sock *sk, u16 index)
1293 i++; 1675 i++;
1294 } 1676 }
1295 1677
1678 put_unaligned_le16(i, &rp->conn_count);
1679
1296 /* Recalculate length in case of filtered SCO connections, etc */ 1680 /* Recalculate length in case of filtered SCO connections, etc */
1297 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1681 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1298 1682
1299 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 1683 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1684 rp_len);
1300 1685
1301unlock:
1302 kfree(rp); 1686 kfree(rp);
1687
1688unlock:
1303 hci_dev_unlock(hdev); 1689 hci_dev_unlock(hdev);
1304 hci_dev_put(hdev);
1305 return err; 1690 return err;
1306} 1691}
1307 1692
1308static int send_pin_code_neg_reply(struct sock *sk, u16 index, 1693static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1309 struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) 1694 struct mgmt_cp_pin_code_neg_reply *cp)
1310{ 1695{
1311 struct pending_cmd *cmd; 1696 struct pending_cmd *cmd;
1312 int err; 1697 int err;
1313 1698
1314 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, 1699 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1315 sizeof(*cp)); 1700 sizeof(*cp));
1316 if (!cmd) 1701 if (!cmd)
1317 return -ENOMEM; 1702 return -ENOMEM;
1318 1703
1319 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr), 1704 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1320 &cp->bdaddr); 1705 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1321 if (err < 0) 1706 if (err < 0)
1322 mgmt_pending_remove(cmd); 1707 mgmt_pending_remove(cmd);
1323 1708
1324 return err; 1709 return err;
1325} 1710}
1326 1711
1327static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, 1712static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1328 u16 len) 1713 u16 len)
1329{ 1714{
1330 struct hci_dev *hdev;
1331 struct hci_conn *conn; 1715 struct hci_conn *conn;
1332 struct mgmt_cp_pin_code_reply *cp; 1716 struct mgmt_cp_pin_code_reply *cp = data;
1333 struct mgmt_cp_pin_code_neg_reply ncp;
1334 struct hci_cp_pin_code_reply reply; 1717 struct hci_cp_pin_code_reply reply;
1335 struct pending_cmd *cmd; 1718 struct pending_cmd *cmd;
1336 int err; 1719 int err;
1337 1720
1338 BT_DBG(""); 1721 BT_DBG("");
1339 1722
1340 cp = (void *) data;
1341
1342 if (len != sizeof(*cp))
1343 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1344 MGMT_STATUS_INVALID_PARAMS);
1345
1346 hdev = hci_dev_get(index);
1347 if (!hdev)
1348 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1349 MGMT_STATUS_INVALID_PARAMS);
1350
1351 hci_dev_lock(hdev); 1723 hci_dev_lock(hdev);
1352 1724
1353 if (!test_bit(HCI_UP, &hdev->flags)) { 1725 if (!hdev_is_powered(hdev)) {
1354 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1726 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1355 MGMT_STATUS_NOT_POWERED); 1727 MGMT_STATUS_NOT_POWERED);
1356 goto failed; 1728 goto failed;
1357 } 1729 }
1358 1730
1359 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1731 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1360 if (!conn) { 1732 if (!conn) {
1361 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1733 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1362 MGMT_STATUS_NOT_CONNECTED); 1734 MGMT_STATUS_NOT_CONNECTED);
1363 goto failed; 1735 goto failed;
1364 } 1736 }
1365 1737
1366 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { 1738 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1367 bacpy(&ncp.bdaddr, &cp->bdaddr); 1739 struct mgmt_cp_pin_code_neg_reply ncp;
1740
1741 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1368 1742
1369 BT_ERR("PIN code is not 16 bytes long"); 1743 BT_ERR("PIN code is not 16 bytes long");
1370 1744
1371 err = send_pin_code_neg_reply(sk, index, hdev, &ncp); 1745 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1372 if (err >= 0) 1746 if (err >= 0)
1373 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, 1747 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1374 MGMT_STATUS_INVALID_PARAMS); 1748 MGMT_STATUS_INVALID_PARAMS);
1375 1749
1376 goto failed; 1750 goto failed;
1377 } 1751 }
@@ -1382,7 +1756,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1382 goto failed; 1756 goto failed;
1383 } 1757 }
1384 1758
1385 bacpy(&reply.bdaddr, &cp->bdaddr); 1759 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1386 reply.pin_len = cp->pin_len; 1760 reply.pin_len = cp->pin_len;
1387 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); 1761 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1388 1762
@@ -1392,67 +1766,39 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1392 1766
1393failed: 1767failed:
1394 hci_dev_unlock(hdev); 1768 hci_dev_unlock(hdev);
1395 hci_dev_put(hdev);
1396
1397 return err; 1769 return err;
1398} 1770}
1399 1771
1400static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, 1772static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1401 u16 len) 1773 void *data, u16 len)
1402{ 1774{
1403 struct hci_dev *hdev; 1775 struct mgmt_cp_pin_code_neg_reply *cp = data;
1404 struct mgmt_cp_pin_code_neg_reply *cp;
1405 int err; 1776 int err;
1406 1777
1407 BT_DBG(""); 1778 BT_DBG("");
1408 1779
1409 cp = (void *) data;
1410
1411 if (len != sizeof(*cp))
1412 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1413 MGMT_STATUS_INVALID_PARAMS);
1414
1415 hdev = hci_dev_get(index);
1416 if (!hdev)
1417 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1418 MGMT_STATUS_INVALID_PARAMS);
1419
1420 hci_dev_lock(hdev); 1780 hci_dev_lock(hdev);
1421 1781
1422 if (!test_bit(HCI_UP, &hdev->flags)) { 1782 if (!hdev_is_powered(hdev)) {
1423 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1783 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
1424 MGMT_STATUS_NOT_POWERED); 1784 MGMT_STATUS_NOT_POWERED);
1425 goto failed; 1785 goto failed;
1426 } 1786 }
1427 1787
1428 err = send_pin_code_neg_reply(sk, index, hdev, cp); 1788 err = send_pin_code_neg_reply(sk, hdev, cp);
1429 1789
1430failed: 1790failed:
1431 hci_dev_unlock(hdev); 1791 hci_dev_unlock(hdev);
1432 hci_dev_put(hdev);
1433
1434 return err; 1792 return err;
1435} 1793}
1436 1794
1437static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, 1795static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1438 u16 len) 1796 u16 len)
1439{ 1797{
1440 struct hci_dev *hdev; 1798 struct mgmt_cp_set_io_capability *cp = data;
1441 struct mgmt_cp_set_io_capability *cp;
1442 1799
1443 BT_DBG(""); 1800 BT_DBG("");
1444 1801
1445 cp = (void *) data;
1446
1447 if (len != sizeof(*cp))
1448 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
1449 MGMT_STATUS_INVALID_PARAMS);
1450
1451 hdev = hci_dev_get(index);
1452 if (!hdev)
1453 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
1454 MGMT_STATUS_INVALID_PARAMS);
1455
1456 hci_dev_lock(hdev); 1802 hci_dev_lock(hdev);
1457 1803
1458 hdev->io_capability = cp->io_capability; 1804 hdev->io_capability = cp->io_capability;
@@ -1461,9 +1807,9 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1461 hdev->io_capability); 1807 hdev->io_capability);
1462 1808
1463 hci_dev_unlock(hdev); 1809 hci_dev_unlock(hdev);
1464 hci_dev_put(hdev);
1465 1810
1466 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1811 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
1812 0);
1467} 1813}
1468 1814
1469static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1815static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
@@ -1491,9 +1837,9 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
1491 1837
1492 bacpy(&rp.addr.bdaddr, &conn->dst); 1838 bacpy(&rp.addr.bdaddr, &conn->dst);
1493 rp.addr.type = link_to_mgmt(conn->type, conn->dst_type); 1839 rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
1494 rp.status = status;
1495 1840
1496 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); 1841 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
1842 &rp, sizeof(rp));
1497 1843
1498 /* So we don't get further callbacks for this connection */ 1844 /* So we don't get further callbacks for this connection */
1499 conn->connect_cfm_cb = NULL; 1845 conn->connect_cfm_cb = NULL;
@@ -1515,13 +1861,13 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1515 if (!cmd) 1861 if (!cmd)
1516 BT_DBG("Unable to find a pending command"); 1862 BT_DBG("Unable to find a pending command");
1517 else 1863 else
1518 pairing_complete(cmd, status); 1864 pairing_complete(cmd, mgmt_status(status));
1519} 1865}
1520 1866
1521static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) 1867static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1868 u16 len)
1522{ 1869{
1523 struct hci_dev *hdev; 1870 struct mgmt_cp_pair_device *cp = data;
1524 struct mgmt_cp_pair_device *cp;
1525 struct mgmt_rp_pair_device rp; 1871 struct mgmt_rp_pair_device rp;
1526 struct pending_cmd *cmd; 1872 struct pending_cmd *cmd;
1527 u8 sec_level, auth_type; 1873 u8 sec_level, auth_type;
@@ -1530,19 +1876,14 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1530 1876
1531 BT_DBG(""); 1877 BT_DBG("");
1532 1878
1533 cp = (void *) data;
1534
1535 if (len != sizeof(*cp))
1536 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
1537 MGMT_STATUS_INVALID_PARAMS);
1538
1539 hdev = hci_dev_get(index);
1540 if (!hdev)
1541 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
1542 MGMT_STATUS_INVALID_PARAMS);
1543
1544 hci_dev_lock(hdev); 1879 hci_dev_lock(hdev);
1545 1880
1881 if (!hdev_is_powered(hdev)) {
1882 err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1883 MGMT_STATUS_NOT_POWERED);
1884 goto unlock;
1885 }
1886
1546 sec_level = BT_SECURITY_MEDIUM; 1887 sec_level = BT_SECURITY_MEDIUM;
1547 if (cp->io_cap == 0x03) 1888 if (cp->io_cap == 0x03)
1548 auth_type = HCI_AT_DEDICATED_BONDING; 1889 auth_type = HCI_AT_DEDICATED_BONDING;
@@ -1551,27 +1892,26 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1551 1892
1552 if (cp->addr.type == MGMT_ADDR_BREDR) 1893 if (cp->addr.type == MGMT_ADDR_BREDR)
1553 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level, 1894 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
1554 auth_type); 1895 auth_type);
1555 else 1896 else
1556 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level, 1897 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
1557 auth_type); 1898 auth_type);
1558 1899
1559 memset(&rp, 0, sizeof(rp)); 1900 memset(&rp, 0, sizeof(rp));
1560 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1901 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1561 rp.addr.type = cp->addr.type; 1902 rp.addr.type = cp->addr.type;
1562 1903
1563 if (IS_ERR(conn)) { 1904 if (IS_ERR(conn)) {
1564 rp.status = -PTR_ERR(conn); 1905 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1565 err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, 1906 MGMT_STATUS_CONNECT_FAILED, &rp,
1566 &rp, sizeof(rp)); 1907 sizeof(rp));
1567 goto unlock; 1908 goto unlock;
1568 } 1909 }
1569 1910
1570 if (conn->connect_cfm_cb) { 1911 if (conn->connect_cfm_cb) {
1571 hci_conn_put(conn); 1912 hci_conn_put(conn);
1572 rp.status = EBUSY; 1913 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1573 err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE, 1914 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1574 &rp, sizeof(rp));
1575 goto unlock; 1915 goto unlock;
1576 } 1916 }
1577 1917
@@ -1599,58 +1939,88 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1599 1939
1600unlock: 1940unlock:
1601 hci_dev_unlock(hdev); 1941 hci_dev_unlock(hdev);
1602 hci_dev_put(hdev);
1603
1604 return err; 1942 return err;
1605} 1943}
1606 1944
1607static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr, 1945static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1608 u16 mgmt_op, u16 hci_op, __le32 passkey) 1946 u16 len)
1609{ 1947{
1948 struct mgmt_addr_info *addr = data;
1610 struct pending_cmd *cmd; 1949 struct pending_cmd *cmd;
1611 struct hci_dev *hdev;
1612 struct hci_conn *conn; 1950 struct hci_conn *conn;
1613 int err; 1951 int err;
1614 1952
1615 hdev = hci_dev_get(index); 1953 BT_DBG("");
1616 if (!hdev)
1617 return cmd_status(sk, index, mgmt_op,
1618 MGMT_STATUS_INVALID_PARAMS);
1619 1954
1620 hci_dev_lock(hdev); 1955 hci_dev_lock(hdev);
1621 1956
1622 if (!test_bit(HCI_UP, &hdev->flags)) { 1957 if (!hdev_is_powered(hdev)) {
1623 err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED); 1958 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1959 MGMT_STATUS_NOT_POWERED);
1960 goto unlock;
1961 }
1962
1963 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
1964 if (!cmd) {
1965 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1966 MGMT_STATUS_INVALID_PARAMS);
1967 goto unlock;
1968 }
1969
1970 conn = cmd->user_data;
1971
1972 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
1973 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1974 MGMT_STATUS_INVALID_PARAMS);
1975 goto unlock;
1976 }
1977
1978 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
1979
1980 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
1981 addr, sizeof(*addr));
1982unlock:
1983 hci_dev_unlock(hdev);
1984 return err;
1985}
1986
1987static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
1988 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
1989 u16 hci_op, __le32 passkey)
1990{
1991 struct pending_cmd *cmd;
1992 struct hci_conn *conn;
1993 int err;
1994
1995 hci_dev_lock(hdev);
1996
1997 if (!hdev_is_powered(hdev)) {
1998 err = cmd_status(sk, hdev->id, mgmt_op,
1999 MGMT_STATUS_NOT_POWERED);
1624 goto done; 2000 goto done;
1625 } 2001 }
1626 2002
1627 /* 2003 if (type == MGMT_ADDR_BREDR)
1628 * Check for an existing ACL link, if present pair via 2004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1629 * HCI commands. 2005 else
1630 *
1631 * If no ACL link is present, check for an LE link and if
1632 * present, pair via the SMP engine.
1633 *
1634 * If neither ACL nor LE links are present, fail with error.
1635 */
1636 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1637 if (!conn) {
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 2006 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
1639 if (!conn) {
1640 err = cmd_status(sk, index, mgmt_op,
1641 MGMT_STATUS_NOT_CONNECTED);
1642 goto done;
1643 }
1644 2007
2008 if (!conn) {
2009 err = cmd_status(sk, hdev->id, mgmt_op,
2010 MGMT_STATUS_NOT_CONNECTED);
2011 goto done;
2012 }
2013
2014 if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
1645 /* Continue with pairing via SMP */ 2015 /* Continue with pairing via SMP */
1646 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 2016 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
1647 2017
1648 if (!err) 2018 if (!err)
1649 err = cmd_status(sk, index, mgmt_op, 2019 err = cmd_status(sk, hdev->id, mgmt_op,
1650 MGMT_STATUS_SUCCESS); 2020 MGMT_STATUS_SUCCESS);
1651 else 2021 else
1652 err = cmd_status(sk, index, mgmt_op, 2022 err = cmd_status(sk, hdev->id, mgmt_op,
1653 MGMT_STATUS_FAILED); 2023 MGMT_STATUS_FAILED);
1654 2024
1655 goto done; 2025 goto done;
1656 } 2026 }
@@ -1676,94 +2046,96 @@ static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
1676 2046
1677done: 2047done:
1678 hci_dev_unlock(hdev); 2048 hci_dev_unlock(hdev);
1679 hci_dev_put(hdev);
1680
1681 return err; 2049 return err;
1682} 2050}
1683 2051
1684static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len) 2052static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2053 u16 len)
1685{ 2054{
1686 struct mgmt_cp_user_confirm_reply *cp = (void *) data; 2055 struct mgmt_cp_user_confirm_reply *cp = data;
1687 2056
1688 BT_DBG(""); 2057 BT_DBG("");
1689 2058
1690 if (len != sizeof(*cp)) 2059 if (len != sizeof(*cp))
1691 return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY, 2060 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
1692 MGMT_STATUS_INVALID_PARAMS); 2061 MGMT_STATUS_INVALID_PARAMS);
1693 2062
1694 return user_pairing_resp(sk, index, &cp->bdaddr, 2063 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1695 MGMT_OP_USER_CONFIRM_REPLY, 2064 MGMT_OP_USER_CONFIRM_REPLY,
1696 HCI_OP_USER_CONFIRM_REPLY, 0); 2065 HCI_OP_USER_CONFIRM_REPLY, 0);
1697} 2066}
1698 2067
1699static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data, 2068static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
1700 u16 len) 2069 void *data, u16 len)
1701{ 2070{
1702 struct mgmt_cp_user_confirm_neg_reply *cp = data; 2071 struct mgmt_cp_user_confirm_neg_reply *cp = data;
1703 2072
1704 BT_DBG(""); 2073 BT_DBG("");
1705 2074
1706 if (len != sizeof(*cp)) 2075 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1707 return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY, 2076 MGMT_OP_USER_CONFIRM_NEG_REPLY,
1708 MGMT_STATUS_INVALID_PARAMS); 2077 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
1709
1710 return user_pairing_resp(sk, index, &cp->bdaddr,
1711 MGMT_OP_USER_CONFIRM_NEG_REPLY,
1712 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
1713} 2078}
1714 2079
1715static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len) 2080static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2081 u16 len)
1716{ 2082{
1717 struct mgmt_cp_user_passkey_reply *cp = (void *) data; 2083 struct mgmt_cp_user_passkey_reply *cp = data;
1718 2084
1719 BT_DBG(""); 2085 BT_DBG("");
1720 2086
1721 if (len != sizeof(*cp)) 2087 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1722 return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY, 2088 MGMT_OP_USER_PASSKEY_REPLY,
1723 EINVAL); 2089 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
1724
1725 return user_pairing_resp(sk, index, &cp->bdaddr,
1726 MGMT_OP_USER_PASSKEY_REPLY,
1727 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
1728} 2090}
1729 2091
1730static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data, 2092static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
1731 u16 len) 2093 void *data, u16 len)
1732{ 2094{
1733 struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data; 2095 struct mgmt_cp_user_passkey_neg_reply *cp = data;
1734 2096
1735 BT_DBG(""); 2097 BT_DBG("");
1736 2098
1737 if (len != sizeof(*cp)) 2099 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
1738 return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY, 2100 MGMT_OP_USER_PASSKEY_NEG_REPLY,
1739 EINVAL); 2101 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2102}
2103
2104static int update_name(struct hci_dev *hdev, const char *name)
2105{
2106 struct hci_cp_write_local_name cp;
1740 2107
1741 return user_pairing_resp(sk, index, &cp->bdaddr, 2108 memcpy(cp.name, name, sizeof(cp.name));
1742 MGMT_OP_USER_PASSKEY_NEG_REPLY, 2109
1743 HCI_OP_USER_PASSKEY_NEG_REPLY, 0); 2110 return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
1744} 2111}
1745 2112
1746static int set_local_name(struct sock *sk, u16 index, unsigned char *data, 2113static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
1747 u16 len) 2114 u16 len)
1748{ 2115{
1749 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data; 2116 struct mgmt_cp_set_local_name *cp = data;
1750 struct hci_cp_write_local_name hci_cp;
1751 struct hci_dev *hdev;
1752 struct pending_cmd *cmd; 2117 struct pending_cmd *cmd;
1753 int err; 2118 int err;
1754 2119
1755 BT_DBG(""); 2120 BT_DBG("");
1756 2121
1757 if (len != sizeof(*mgmt_cp)) 2122 hci_dev_lock(hdev);
1758 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
1759 MGMT_STATUS_INVALID_PARAMS);
1760 2123
1761 hdev = hci_dev_get(index); 2124 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
1762 if (!hdev)
1763 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
1764 MGMT_STATUS_INVALID_PARAMS);
1765 2125
1766 hci_dev_lock(hdev); 2126 if (!hdev_is_powered(hdev)) {
2127 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2128
2129 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2130 data, len);
2131 if (err < 0)
2132 goto failed;
2133
2134 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2135 sk);
2136
2137 goto failed;
2138 }
1767 2139
1768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); 2140 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
1769 if (!cmd) { 2141 if (!cmd) {
@@ -1771,49 +2143,40 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1771 goto failed; 2143 goto failed;
1772 } 2144 }
1773 2145
1774 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name)); 2146 err = update_name(hdev, cp->name);
1775 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1776 &hci_cp);
1777 if (err < 0) 2147 if (err < 0)
1778 mgmt_pending_remove(cmd); 2148 mgmt_pending_remove(cmd);
1779 2149
1780failed: 2150failed:
1781 hci_dev_unlock(hdev); 2151 hci_dev_unlock(hdev);
1782 hci_dev_put(hdev);
1783
1784 return err; 2152 return err;
1785} 2153}
1786 2154
1787static int read_local_oob_data(struct sock *sk, u16 index) 2155static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2156 void *data, u16 data_len)
1788{ 2157{
1789 struct hci_dev *hdev;
1790 struct pending_cmd *cmd; 2158 struct pending_cmd *cmd;
1791 int err; 2159 int err;
1792 2160
1793 BT_DBG("hci%u", index); 2161 BT_DBG("%s", hdev->name);
1794
1795 hdev = hci_dev_get(index);
1796 if (!hdev)
1797 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1798 MGMT_STATUS_INVALID_PARAMS);
1799 2162
1800 hci_dev_lock(hdev); 2163 hci_dev_lock(hdev);
1801 2164
1802 if (!test_bit(HCI_UP, &hdev->flags)) { 2165 if (!hdev_is_powered(hdev)) {
1803 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2166 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
1804 MGMT_STATUS_NOT_POWERED); 2167 MGMT_STATUS_NOT_POWERED);
1805 goto unlock; 2168 goto unlock;
1806 } 2169 }
1807 2170
1808 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { 2171 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1809 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2172 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
1810 MGMT_STATUS_NOT_SUPPORTED); 2173 MGMT_STATUS_NOT_SUPPORTED);
1811 goto unlock; 2174 goto unlock;
1812 } 2175 }
1813 2176
1814 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { 2177 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
1815 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 2178 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
1816 MGMT_STATUS_BUSY); 2179 MGMT_STATUS_BUSY);
1817 goto unlock; 2180 goto unlock;
1818 } 2181 }
1819 2182
@@ -1829,104 +2192,112 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1829 2192
1830unlock: 2193unlock:
1831 hci_dev_unlock(hdev); 2194 hci_dev_unlock(hdev);
1832 hci_dev_put(hdev);
1833
1834 return err; 2195 return err;
1835} 2196}
1836 2197
1837static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data, 2198static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
1838 u16 len) 2199 void *data, u16 len)
1839{ 2200{
1840 struct hci_dev *hdev; 2201 struct mgmt_cp_add_remote_oob_data *cp = data;
1841 struct mgmt_cp_add_remote_oob_data *cp = (void *) data; 2202 u8 status;
1842 int err; 2203 int err;
1843 2204
1844 BT_DBG("hci%u ", index); 2205 BT_DBG("%s ", hdev->name);
1845
1846 if (len != sizeof(*cp))
1847 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1848 MGMT_STATUS_INVALID_PARAMS);
1849
1850 hdev = hci_dev_get(index);
1851 if (!hdev)
1852 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1853 MGMT_STATUS_INVALID_PARAMS);
1854 2206
1855 hci_dev_lock(hdev); 2207 hci_dev_lock(hdev);
1856 2208
1857 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, 2209 if (!hdev_is_powered(hdev)) {
1858 cp->randomizer); 2210 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
2211 MGMT_STATUS_NOT_POWERED, &cp->addr,
2212 sizeof(cp->addr));
2213 goto unlock;
2214 }
2215
2216 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2217 cp->randomizer);
1859 if (err < 0) 2218 if (err < 0)
1860 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, 2219 status = MGMT_STATUS_FAILED;
1861 MGMT_STATUS_FAILED);
1862 else 2220 else
1863 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 2221 status = 0;
1864 0);
1865 2222
1866 hci_dev_unlock(hdev); 2223 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
1867 hci_dev_put(hdev); 2224 &cp->addr, sizeof(cp->addr));
1868 2225
2226unlock:
2227 hci_dev_unlock(hdev);
1869 return err; 2228 return err;
1870} 2229}
1871 2230
1872static int remove_remote_oob_data(struct sock *sk, u16 index, 2231static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
1873 unsigned char *data, u16 len) 2232 void *data, u16 len)
1874{ 2233{
1875 struct hci_dev *hdev; 2234 struct mgmt_cp_remove_remote_oob_data *cp = data;
1876 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data; 2235 u8 status;
1877 int err; 2236 int err;
1878 2237
1879 BT_DBG("hci%u ", index); 2238 BT_DBG("%s", hdev->name);
1880 2239
1881 if (len != sizeof(*cp)) 2240 hci_dev_lock(hdev);
1882 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2241
1883 MGMT_STATUS_INVALID_PARAMS); 2242 if (!hdev_is_powered(hdev)) {
2243 err = cmd_complete(sk, hdev->id,
2244 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2245 MGMT_STATUS_NOT_POWERED, &cp->addr,
2246 sizeof(cp->addr));
2247 goto unlock;
2248 }
2249
2250 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2251 if (err < 0)
2252 status = MGMT_STATUS_INVALID_PARAMS;
2253 else
2254 status = 0;
1884 2255
1885 hdev = hci_dev_get(index); 2256 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1886 if (!hdev) 2257 status, &cp->addr, sizeof(cp->addr));
1887 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2258
1888 MGMT_STATUS_INVALID_PARAMS); 2259unlock:
2260 hci_dev_unlock(hdev);
2261 return err;
2262}
2263
2264int mgmt_interleaved_discovery(struct hci_dev *hdev)
2265{
2266 int err;
2267
2268 BT_DBG("%s", hdev->name);
1889 2269
1890 hci_dev_lock(hdev); 2270 hci_dev_lock(hdev);
1891 2271
1892 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); 2272 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
1893 if (err < 0) 2273 if (err < 0)
1894 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2274 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1895 MGMT_STATUS_INVALID_PARAMS);
1896 else
1897 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1898 NULL, 0);
1899 2275
1900 hci_dev_unlock(hdev); 2276 hci_dev_unlock(hdev);
1901 hci_dev_put(hdev);
1902 2277
1903 return err; 2278 return err;
1904} 2279}
1905 2280
1906static int start_discovery(struct sock *sk, u16 index, 2281static int start_discovery(struct sock *sk, struct hci_dev *hdev,
1907 unsigned char *data, u16 len) 2282 void *data, u16 len)
1908{ 2283{
1909 struct mgmt_cp_start_discovery *cp = (void *) data; 2284 struct mgmt_cp_start_discovery *cp = data;
1910 struct pending_cmd *cmd; 2285 struct pending_cmd *cmd;
1911 struct hci_dev *hdev;
1912 int err; 2286 int err;
1913 2287
1914 BT_DBG("hci%u", index); 2288 BT_DBG("%s", hdev->name);
1915
1916 if (len != sizeof(*cp))
1917 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
1918 MGMT_STATUS_INVALID_PARAMS);
1919
1920 hdev = hci_dev_get(index);
1921 if (!hdev)
1922 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
1923 MGMT_STATUS_INVALID_PARAMS);
1924 2289
1925 hci_dev_lock(hdev); 2290 hci_dev_lock(hdev);
1926 2291
1927 if (!test_bit(HCI_UP, &hdev->flags)) { 2292 if (!hdev_is_powered(hdev)) {
1928 err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, 2293 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
1929 MGMT_STATUS_NOT_POWERED); 2294 MGMT_STATUS_NOT_POWERED);
2295 goto failed;
2296 }
2297
2298 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2299 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2300 MGMT_STATUS_BUSY);
1930 goto failed; 2301 goto failed;
1931 } 2302 }
1932 2303
@@ -1936,137 +2307,217 @@ static int start_discovery(struct sock *sk, u16 index,
1936 goto failed; 2307 goto failed;
1937 } 2308 }
1938 2309
1939 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); 2310 hdev->discovery.type = cp->type;
2311
2312 switch (hdev->discovery.type) {
2313 case DISCOV_TYPE_BREDR:
2314 if (lmp_bredr_capable(hdev))
2315 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2316 else
2317 err = -ENOTSUPP;
2318 break;
2319
2320 case DISCOV_TYPE_LE:
2321 if (lmp_host_le_capable(hdev))
2322 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2323 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2324 else
2325 err = -ENOTSUPP;
2326 break;
2327
2328 case DISCOV_TYPE_INTERLEAVED:
2329 if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
2330 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2331 LE_SCAN_WIN,
2332 LE_SCAN_TIMEOUT_BREDR_LE);
2333 else
2334 err = -ENOTSUPP;
2335 break;
2336
2337 default:
2338 err = -EINVAL;
2339 }
2340
1940 if (err < 0) 2341 if (err < 0)
1941 mgmt_pending_remove(cmd); 2342 mgmt_pending_remove(cmd);
2343 else
2344 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1942 2345
1943failed: 2346failed:
1944 hci_dev_unlock(hdev); 2347 hci_dev_unlock(hdev);
1945 hci_dev_put(hdev);
1946
1947 return err; 2348 return err;
1948} 2349}
1949 2350
1950static int stop_discovery(struct sock *sk, u16 index) 2351static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2352 u16 len)
1951{ 2353{
1952 struct hci_dev *hdev; 2354 struct mgmt_cp_stop_discovery *mgmt_cp = data;
1953 struct pending_cmd *cmd; 2355 struct pending_cmd *cmd;
2356 struct hci_cp_remote_name_req_cancel cp;
2357 struct inquiry_entry *e;
1954 int err; 2358 int err;
1955 2359
1956 BT_DBG("hci%u", index); 2360 BT_DBG("%s", hdev->name);
1957
1958 hdev = hci_dev_get(index);
1959 if (!hdev)
1960 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
1961 MGMT_STATUS_INVALID_PARAMS);
1962 2361
1963 hci_dev_lock(hdev); 2362 hci_dev_lock(hdev);
1964 2363
2364 if (!hci_discovery_active(hdev)) {
2365 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2366 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2367 sizeof(mgmt_cp->type));
2368 goto unlock;
2369 }
2370
2371 if (hdev->discovery.type != mgmt_cp->type) {
2372 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2373 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2374 sizeof(mgmt_cp->type));
2375 goto unlock;
2376 }
2377
1965 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); 2378 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
1966 if (!cmd) { 2379 if (!cmd) {
1967 err = -ENOMEM; 2380 err = -ENOMEM;
1968 goto failed; 2381 goto unlock;
2382 }
2383
2384 if (hdev->discovery.state == DISCOVERY_FINDING) {
2385 err = hci_cancel_inquiry(hdev);
2386 if (err < 0)
2387 mgmt_pending_remove(cmd);
2388 else
2389 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2390 goto unlock;
2391 }
2392
2393 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
2394 if (!e) {
2395 mgmt_pending_remove(cmd);
2396 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
2397 &mgmt_cp->type, sizeof(mgmt_cp->type));
2398 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2399 goto unlock;
1969 } 2400 }
1970 2401
1971 err = hci_cancel_inquiry(hdev); 2402 bacpy(&cp.bdaddr, &e->data.bdaddr);
2403 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2404 &cp);
1972 if (err < 0) 2405 if (err < 0)
1973 mgmt_pending_remove(cmd); 2406 mgmt_pending_remove(cmd);
2407 else
2408 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1974 2409
1975failed: 2410unlock:
1976 hci_dev_unlock(hdev); 2411 hci_dev_unlock(hdev);
1977 hci_dev_put(hdev);
1978
1979 return err; 2412 return err;
1980} 2413}
1981 2414
1982static int block_device(struct sock *sk, u16 index, unsigned char *data, 2415static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
1983 u16 len) 2416 u16 len)
1984{ 2417{
1985 struct hci_dev *hdev; 2418 struct mgmt_cp_confirm_name *cp = data;
1986 struct mgmt_cp_block_device *cp = (void *) data; 2419 struct inquiry_entry *e;
1987 int err; 2420 int err;
1988 2421
1989 BT_DBG("hci%u", index); 2422 BT_DBG("%s", hdev->name);
1990 2423
1991 if (len != sizeof(*cp)) 2424 hci_dev_lock(hdev);
1992 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 2425
1993 MGMT_STATUS_INVALID_PARAMS); 2426 if (!hci_discovery_active(hdev)) {
2427 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2428 MGMT_STATUS_FAILED);
2429 goto failed;
2430 }
1994 2431
1995 hdev = hci_dev_get(index); 2432 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
1996 if (!hdev) 2433 if (!e) {
1997 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 2434 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
1998 MGMT_STATUS_INVALID_PARAMS); 2435 MGMT_STATUS_INVALID_PARAMS);
2436 goto failed;
2437 }
2438
2439 if (cp->name_known) {
2440 e->name_state = NAME_KNOWN;
2441 list_del(&e->list);
2442 } else {
2443 e->name_state = NAME_NEEDED;
2444 hci_inquiry_cache_update_resolve(hdev, e);
2445 }
2446
2447 err = 0;
2448
2449failed:
2450 hci_dev_unlock(hdev);
2451 return err;
2452}
2453
2454static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2455 u16 len)
2456{
2457 struct mgmt_cp_block_device *cp = data;
2458 u8 status;
2459 int err;
2460
2461 BT_DBG("%s", hdev->name);
1999 2462
2000 hci_dev_lock(hdev); 2463 hci_dev_lock(hdev);
2001 2464
2002 err = hci_blacklist_add(hdev, &cp->bdaddr); 2465 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2003 if (err < 0) 2466 if (err < 0)
2004 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, 2467 status = MGMT_STATUS_FAILED;
2005 MGMT_STATUS_FAILED);
2006 else 2468 else
2007 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, 2469 status = 0;
2008 NULL, 0); 2470
2471 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2472 &cp->addr, sizeof(cp->addr));
2009 2473
2010 hci_dev_unlock(hdev); 2474 hci_dev_unlock(hdev);
2011 hci_dev_put(hdev);
2012 2475
2013 return err; 2476 return err;
2014} 2477}
2015 2478
2016static int unblock_device(struct sock *sk, u16 index, unsigned char *data, 2479static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2017 u16 len) 2480 u16 len)
2018{ 2481{
2019 struct hci_dev *hdev; 2482 struct mgmt_cp_unblock_device *cp = data;
2020 struct mgmt_cp_unblock_device *cp = (void *) data; 2483 u8 status;
2021 int err; 2484 int err;
2022 2485
2023 BT_DBG("hci%u", index); 2486 BT_DBG("%s", hdev->name);
2024
2025 if (len != sizeof(*cp))
2026 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
2027 MGMT_STATUS_INVALID_PARAMS);
2028
2029 hdev = hci_dev_get(index);
2030 if (!hdev)
2031 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
2032 MGMT_STATUS_INVALID_PARAMS);
2033 2487
2034 hci_dev_lock(hdev); 2488 hci_dev_lock(hdev);
2035 2489
2036 err = hci_blacklist_del(hdev, &cp->bdaddr); 2490 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2037
2038 if (err < 0) 2491 if (err < 0)
2039 err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, 2492 status = MGMT_STATUS_INVALID_PARAMS;
2040 MGMT_STATUS_INVALID_PARAMS);
2041 else 2493 else
2042 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, 2494 status = 0;
2043 NULL, 0); 2495
2496 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2497 &cp->addr, sizeof(cp->addr));
2044 2498
2045 hci_dev_unlock(hdev); 2499 hci_dev_unlock(hdev);
2046 hci_dev_put(hdev);
2047 2500
2048 return err; 2501 return err;
2049} 2502}
2050 2503
2051static int set_fast_connectable(struct sock *sk, u16 index, 2504static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2052 unsigned char *data, u16 len) 2505 void *data, u16 len)
2053{ 2506{
2054 struct hci_dev *hdev; 2507 struct mgmt_mode *cp = data;
2055 struct mgmt_mode *cp = (void *) data;
2056 struct hci_cp_write_page_scan_activity acp; 2508 struct hci_cp_write_page_scan_activity acp;
2057 u8 type; 2509 u8 type;
2058 int err; 2510 int err;
2059 2511
2060 BT_DBG("hci%u", index); 2512 BT_DBG("%s", hdev->name);
2061 2513
2062 if (len != sizeof(*cp)) 2514 if (!hdev_is_powered(hdev))
2063 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2515 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2064 MGMT_STATUS_INVALID_PARAMS); 2516 MGMT_STATUS_NOT_POWERED);
2065 2517
2066 hdev = hci_dev_get(index); 2518 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2067 if (!hdev) 2519 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2068 return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2520 MGMT_STATUS_REJECTED);
2069 MGMT_STATUS_INVALID_PARAMS);
2070 2521
2071 hci_dev_lock(hdev); 2522 hci_dev_lock(hdev);
2072 2523
@@ -2080,35 +2531,128 @@ static int set_fast_connectable(struct sock *sk, u16 index,
2080 2531
2081 acp.window = 0x0012; /* default 11.25 msec page scan window */ 2532 acp.window = 0x0012; /* default 11.25 msec page scan window */
2082 2533
2083 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 2534 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2084 sizeof(acp), &acp); 2535 &acp);
2085 if (err < 0) { 2536 if (err < 0) {
2086 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2537 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2087 MGMT_STATUS_FAILED); 2538 MGMT_STATUS_FAILED);
2088 goto done; 2539 goto done;
2089 } 2540 }
2090 2541
2091 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); 2542 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2092 if (err < 0) { 2543 if (err < 0) {
2093 err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2544 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2094 MGMT_STATUS_FAILED); 2545 MGMT_STATUS_FAILED);
2095 goto done; 2546 goto done;
2096 } 2547 }
2097 2548
2098 err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE, 2549 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2099 NULL, 0); 2550 NULL, 0);
2100done: 2551done:
2101 hci_dev_unlock(hdev); 2552 hci_dev_unlock(hdev);
2102 hci_dev_put(hdev);
2103
2104 return err; 2553 return err;
2105} 2554}
2106 2555
2556static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2557 void *cp_data, u16 len)
2558{
2559 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2560 u16 key_count, expected_len;
2561 int i;
2562
2563 key_count = get_unaligned_le16(&cp->key_count);
2564
2565 expected_len = sizeof(*cp) + key_count *
2566 sizeof(struct mgmt_ltk_info);
2567 if (expected_len != len) {
2568 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2569 len, expected_len);
2570 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2571 EINVAL);
2572 }
2573
2574 BT_DBG("%s key_count %u", hdev->name, key_count);
2575
2576 hci_dev_lock(hdev);
2577
2578 hci_smp_ltks_clear(hdev);
2579
2580 for (i = 0; i < key_count; i++) {
2581 struct mgmt_ltk_info *key = &cp->keys[i];
2582 u8 type;
2583
2584 if (key->master)
2585 type = HCI_SMP_LTK;
2586 else
2587 type = HCI_SMP_LTK_SLAVE;
2588
2589 hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
2590 type, 0, key->authenticated, key->val,
2591 key->enc_size, key->ediv, key->rand);
2592 }
2593
2594 hci_dev_unlock(hdev);
2595
2596 return 0;
2597}
2598
2599struct mgmt_handler {
2600 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2601 u16 data_len);
2602 bool var_len;
2603 size_t data_len;
2604} mgmt_handlers[] = {
2605 { NULL }, /* 0x0000 (no command) */
2606 { read_version, false, MGMT_READ_VERSION_SIZE },
2607 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2608 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2609 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2610 { set_powered, false, MGMT_SETTING_SIZE },
2611 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2612 { set_connectable, false, MGMT_SETTING_SIZE },
2613 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2614 { set_pairable, false, MGMT_SETTING_SIZE },
2615 { set_link_security, false, MGMT_SETTING_SIZE },
2616 { set_ssp, false, MGMT_SETTING_SIZE },
2617 { set_hs, false, MGMT_SETTING_SIZE },
2618 { set_le, false, MGMT_SETTING_SIZE },
2619 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2620 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2621 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2622 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2623 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2624 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2625 { disconnect, false, MGMT_DISCONNECT_SIZE },
2626 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2627 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2628 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2629 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2630 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2631 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2632 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2633 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2634 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2635 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2636 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2637 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2638 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2639 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2640 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
2641 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
2642 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
2643 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
2644 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
2645};
2646
2647
2107int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 2648int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2108{ 2649{
2109 unsigned char *buf; 2650 void *buf;
2651 u8 *cp;
2110 struct mgmt_hdr *hdr; 2652 struct mgmt_hdr *hdr;
2111 u16 opcode, index, len; 2653 u16 opcode, index, len;
2654 struct hci_dev *hdev = NULL;
2655 struct mgmt_handler *handler;
2112 int err; 2656 int err;
2113 2657
2114 BT_DBG("got %zu bytes", msglen); 2658 BT_DBG("got %zu bytes", msglen);
@@ -2125,7 +2669,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2125 goto done; 2669 goto done;
2126 } 2670 }
2127 2671
2128 hdr = (struct mgmt_hdr *) buf; 2672 hdr = buf;
2129 opcode = get_unaligned_le16(&hdr->opcode); 2673 opcode = get_unaligned_le16(&hdr->opcode);
2130 index = get_unaligned_le16(&hdr->index); 2674 index = get_unaligned_le16(&hdr->index);
2131 len = get_unaligned_le16(&hdr->len); 2675 len = get_unaligned_le16(&hdr->len);
@@ -2135,117 +2679,54 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2135 goto done; 2679 goto done;
2136 } 2680 }
2137 2681
2138 switch (opcode) { 2682 if (index != MGMT_INDEX_NONE) {
2139 case MGMT_OP_READ_VERSION: 2683 hdev = hci_dev_get(index);
2140 err = read_version(sk); 2684 if (!hdev) {
2141 break; 2685 err = cmd_status(sk, index, opcode,
2142 case MGMT_OP_READ_INDEX_LIST: 2686 MGMT_STATUS_INVALID_INDEX);
2143 err = read_index_list(sk); 2687 goto done;
2144 break; 2688 }
2145 case MGMT_OP_READ_INFO: 2689 }
2146 err = read_controller_info(sk, index); 2690
2147 break; 2691 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2148 case MGMT_OP_SET_POWERED: 2692 mgmt_handlers[opcode].func == NULL) {
2149 err = set_powered(sk, index, buf + sizeof(*hdr), len);
2150 break;
2151 case MGMT_OP_SET_DISCOVERABLE:
2152 err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
2153 break;
2154 case MGMT_OP_SET_CONNECTABLE:
2155 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
2156 break;
2157 case MGMT_OP_SET_FAST_CONNECTABLE:
2158 err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
2159 len);
2160 break;
2161 case MGMT_OP_SET_PAIRABLE:
2162 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
2163 break;
2164 case MGMT_OP_ADD_UUID:
2165 err = add_uuid(sk, index, buf + sizeof(*hdr), len);
2166 break;
2167 case MGMT_OP_REMOVE_UUID:
2168 err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
2169 break;
2170 case MGMT_OP_SET_DEV_CLASS:
2171 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
2172 break;
2173 case MGMT_OP_LOAD_LINK_KEYS:
2174 err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
2175 break;
2176 case MGMT_OP_REMOVE_KEYS:
2177 err = remove_keys(sk, index, buf + sizeof(*hdr), len);
2178 break;
2179 case MGMT_OP_DISCONNECT:
2180 err = disconnect(sk, index, buf + sizeof(*hdr), len);
2181 break;
2182 case MGMT_OP_GET_CONNECTIONS:
2183 err = get_connections(sk, index);
2184 break;
2185 case MGMT_OP_PIN_CODE_REPLY:
2186 err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
2187 break;
2188 case MGMT_OP_PIN_CODE_NEG_REPLY:
2189 err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
2190 break;
2191 case MGMT_OP_SET_IO_CAPABILITY:
2192 err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
2193 break;
2194 case MGMT_OP_PAIR_DEVICE:
2195 err = pair_device(sk, index, buf + sizeof(*hdr), len);
2196 break;
2197 case MGMT_OP_USER_CONFIRM_REPLY:
2198 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
2199 break;
2200 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
2201 err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
2202 len);
2203 break;
2204 case MGMT_OP_USER_PASSKEY_REPLY:
2205 err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
2206 break;
2207 case MGMT_OP_USER_PASSKEY_NEG_REPLY:
2208 err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
2209 len);
2210 break;
2211 case MGMT_OP_SET_LOCAL_NAME:
2212 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
2213 break;
2214 case MGMT_OP_READ_LOCAL_OOB_DATA:
2215 err = read_local_oob_data(sk, index);
2216 break;
2217 case MGMT_OP_ADD_REMOTE_OOB_DATA:
2218 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
2219 break;
2220 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
2221 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
2222 len);
2223 break;
2224 case MGMT_OP_START_DISCOVERY:
2225 err = start_discovery(sk, index, buf + sizeof(*hdr), len);
2226 break;
2227 case MGMT_OP_STOP_DISCOVERY:
2228 err = stop_discovery(sk, index);
2229 break;
2230 case MGMT_OP_BLOCK_DEVICE:
2231 err = block_device(sk, index, buf + sizeof(*hdr), len);
2232 break;
2233 case MGMT_OP_UNBLOCK_DEVICE:
2234 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
2235 break;
2236 default:
2237 BT_DBG("Unknown op %u", opcode); 2693 BT_DBG("Unknown op %u", opcode);
2238 err = cmd_status(sk, index, opcode, 2694 err = cmd_status(sk, index, opcode,
2239 MGMT_STATUS_UNKNOWN_COMMAND); 2695 MGMT_STATUS_UNKNOWN_COMMAND);
2240 break; 2696 goto done;
2697 }
2698
2699 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2700 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2701 err = cmd_status(sk, index, opcode,
2702 MGMT_STATUS_INVALID_INDEX);
2703 goto done;
2241 } 2704 }
2242 2705
2706 handler = &mgmt_handlers[opcode];
2707
2708 if ((handler->var_len && len < handler->data_len) ||
2709 (!handler->var_len && len != handler->data_len)) {
2710 err = cmd_status(sk, index, opcode,
2711 MGMT_STATUS_INVALID_PARAMS);
2712 goto done;
2713 }
2714
2715 if (hdev)
2716 mgmt_init_hdev(sk, hdev);
2717
2718 cp = buf + sizeof(*hdr);
2719
2720 err = handler->func(sk, hdev, cp, len);
2243 if (err < 0) 2721 if (err < 0)
2244 goto done; 2722 goto done;
2245 2723
2246 err = msglen; 2724 err = msglen;
2247 2725
2248done: 2726done:
2727 if (hdev)
2728 hci_dev_put(hdev);
2729
2249 kfree(buf); 2730 kfree(buf);
2250 return err; 2731 return err;
2251} 2732}
@@ -2265,7 +2746,7 @@ int mgmt_index_added(struct hci_dev *hdev)
2265 2746
2266int mgmt_index_removed(struct hci_dev *hdev) 2747int mgmt_index_removed(struct hci_dev *hdev)
2267{ 2748{
2268 u8 status = ENODEV; 2749 u8 status = MGMT_STATUS_INVALID_INDEX;
2269 2750
2270 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2751 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2271 2752
@@ -2273,9 +2754,9 @@ int mgmt_index_removed(struct hci_dev *hdev)
2273} 2754}
2274 2755
2275struct cmd_lookup { 2756struct cmd_lookup {
2276 u8 val;
2277 struct sock *sk; 2757 struct sock *sk;
2278 struct hci_dev *hdev; 2758 struct hci_dev *hdev;
2759 u8 mgmt_status;
2279}; 2760};
2280 2761
2281static void settings_rsp(struct pending_cmd *cmd, void *data) 2762static void settings_rsp(struct pending_cmd *cmd, void *data)
@@ -2296,63 +2777,91 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
2296 2777
2297int mgmt_powered(struct hci_dev *hdev, u8 powered) 2778int mgmt_powered(struct hci_dev *hdev, u8 powered)
2298{ 2779{
2299 struct cmd_lookup match = { powered, NULL, hdev }; 2780 struct cmd_lookup match = { NULL, hdev };
2300 __le32 ev; 2781 int err;
2301 int ret; 2782
2783 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2784 return 0;
2302 2785
2303 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 2786 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2304 2787
2305 if (!powered) { 2788 if (powered) {
2306 u8 status = ENETDOWN; 2789 u8 scan = 0;
2790
2791 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2792 scan |= SCAN_PAGE;
2793 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2794 scan |= SCAN_INQUIRY;
2795
2796 if (scan)
2797 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2798
2799 update_class(hdev);
2800 update_name(hdev, hdev->dev_name);
2801 update_eir(hdev);
2802 } else {
2803 u8 status = MGMT_STATUS_NOT_POWERED;
2307 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2804 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2308 } 2805 }
2309 2806
2310 ev = cpu_to_le32(get_current_settings(hdev)); 2807 err = new_settings(hdev, match.sk);
2311
2312 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
2313 match.sk);
2314 2808
2315 if (match.sk) 2809 if (match.sk)
2316 sock_put(match.sk); 2810 sock_put(match.sk);
2317 2811
2318 return ret; 2812 return err;
2319} 2813}
2320 2814
2321int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 2815int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2322{ 2816{
2323 struct cmd_lookup match = { discoverable, NULL, hdev }; 2817 struct cmd_lookup match = { NULL, hdev };
2324 __le32 ev; 2818 bool changed = false;
2325 int ret; 2819 int err = 0;
2326 2820
2327 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match); 2821 if (discoverable) {
2822 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2823 changed = true;
2824 } else {
2825 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2826 changed = true;
2827 }
2328 2828
2329 ev = cpu_to_le32(get_current_settings(hdev)); 2829 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
2830 &match);
2831
2832 if (changed)
2833 err = new_settings(hdev, match.sk);
2330 2834
2331 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
2332 match.sk);
2333 if (match.sk) 2835 if (match.sk)
2334 sock_put(match.sk); 2836 sock_put(match.sk);
2335 2837
2336 return ret; 2838 return err;
2337} 2839}
2338 2840
2339int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 2841int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
2340{ 2842{
2341 __le32 ev; 2843 struct cmd_lookup match = { NULL, hdev };
2342 struct cmd_lookup match = { connectable, NULL, hdev }; 2844 bool changed = false;
2343 int ret; 2845 int err = 0;
2344 2846
2345 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp, 2847 if (connectable) {
2346 &match); 2848 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2849 changed = true;
2850 } else {
2851 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2852 changed = true;
2853 }
2347 2854
2348 ev = cpu_to_le32(get_current_settings(hdev)); 2855 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
2856 &match);
2349 2857
2350 ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk); 2858 if (changed)
2859 err = new_settings(hdev, match.sk);
2351 2860
2352 if (match.sk) 2861 if (match.sk)
2353 sock_put(match.sk); 2862 sock_put(match.sk);
2354 2863
2355 return ret; 2864 return err;
2356} 2865}
2357 2866
2358int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) 2867int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
@@ -2361,24 +2870,24 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2361 2870
2362 if (scan & SCAN_PAGE) 2871 if (scan & SCAN_PAGE)
2363 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, 2872 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
2364 cmd_status_rsp, &mgmt_err); 2873 cmd_status_rsp, &mgmt_err);
2365 2874
2366 if (scan & SCAN_INQUIRY) 2875 if (scan & SCAN_INQUIRY)
2367 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, 2876 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
2368 cmd_status_rsp, &mgmt_err); 2877 cmd_status_rsp, &mgmt_err);
2369 2878
2370 return 0; 2879 return 0;
2371} 2880}
2372 2881
2373int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 2882int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
2374 u8 persistent)
2375{ 2883{
2376 struct mgmt_ev_new_link_key ev; 2884 struct mgmt_ev_new_link_key ev;
2377 2885
2378 memset(&ev, 0, sizeof(ev)); 2886 memset(&ev, 0, sizeof(ev));
2379 2887
2380 ev.store_hint = persistent; 2888 ev.store_hint = persistent;
2381 bacpy(&ev.key.bdaddr, &key->bdaddr); 2889 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2890 ev.key.addr.type = MGMT_ADDR_BREDR;
2382 ev.key.type = key->type; 2891 ev.key.type = key->type;
2383 memcpy(ev.key.val, key->val, 16); 2892 memcpy(ev.key.val, key->val, 16);
2384 ev.key.pin_len = key->pin_len; 2893 ev.key.pin_len = key->pin_len;
@@ -2386,15 +2895,54 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2386 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2895 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
2387} 2896}
2388 2897
2389int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 2898int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
2390 u8 addr_type)
2391{ 2899{
2392 struct mgmt_addr_info ev; 2900 struct mgmt_ev_new_long_term_key ev;
2393 2901
2394 bacpy(&ev.bdaddr, bdaddr); 2902 memset(&ev, 0, sizeof(ev));
2395 ev.type = link_to_mgmt(link_type, addr_type);
2396 2903
2397 return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL); 2904 ev.store_hint = persistent;
2905 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2906 ev.key.addr.type = key->bdaddr_type;
2907 ev.key.authenticated = key->authenticated;
2908 ev.key.enc_size = key->enc_size;
2909 ev.key.ediv = key->ediv;
2910
2911 if (key->type == HCI_SMP_LTK)
2912 ev.key.master = 1;
2913
2914 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
2915 memcpy(ev.key.val, key->val, sizeof(key->val));
2916
2917 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
2918 NULL);
2919}
2920
2921int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2922 u8 addr_type, u32 flags, u8 *name, u8 name_len,
2923 u8 *dev_class)
2924{
2925 char buf[512];
2926 struct mgmt_ev_device_connected *ev = (void *) buf;
2927 u16 eir_len = 0;
2928
2929 bacpy(&ev->addr.bdaddr, bdaddr);
2930 ev->addr.type = link_to_mgmt(link_type, addr_type);
2931
2932 ev->flags = __cpu_to_le32(flags);
2933
2934 if (name_len > 0)
2935 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
2936 name, name_len);
2937
2938 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
2939 eir_len = eir_append_data(&ev->eir[eir_len], eir_len,
2940 EIR_CLASS_OF_DEV, dev_class, 3);
2941
2942 put_unaligned_le16(eir_len, &ev->eir_len);
2943
2944 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
2945 sizeof(*ev) + eir_len, NULL);
2398} 2946}
2399 2947
2400static void disconnect_rsp(struct pending_cmd *cmd, void *data) 2948static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2403,10 +2951,11 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
2403 struct sock **sk = data; 2951 struct sock **sk = data;
2404 struct mgmt_rp_disconnect rp; 2952 struct mgmt_rp_disconnect rp;
2405 2953
2406 bacpy(&rp.bdaddr, &cp->bdaddr); 2954 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2407 rp.status = 0; 2955 rp.addr.type = cp->addr.type;
2408 2956
2409 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); 2957 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
2958 sizeof(rp));
2410 2959
2411 *sk = cmd->sk; 2960 *sk = cmd->sk;
2412 sock_hold(*sk); 2961 sock_hold(*sk);
@@ -2414,25 +2963,25 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
2414 mgmt_pending_remove(cmd); 2963 mgmt_pending_remove(cmd);
2415} 2964}
2416 2965
2417static void remove_keys_rsp(struct pending_cmd *cmd, void *data) 2966static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
2418{ 2967{
2419 u8 *status = data; 2968 struct hci_dev *hdev = data;
2420 struct mgmt_cp_remove_keys *cp = cmd->param; 2969 struct mgmt_cp_unpair_device *cp = cmd->param;
2421 struct mgmt_rp_remove_keys rp; 2970 struct mgmt_rp_unpair_device rp;
2422 2971
2423 memset(&rp, 0, sizeof(rp)); 2972 memset(&rp, 0, sizeof(rp));
2424 bacpy(&rp.bdaddr, &cp->bdaddr); 2973 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2425 if (status != NULL) 2974 rp.addr.type = cp->addr.type;
2426 rp.status = *status;
2427 2975
2428 cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp, 2976 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2429 sizeof(rp)); 2977
2978 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
2430 2979
2431 mgmt_pending_remove(cmd); 2980 mgmt_pending_remove(cmd);
2432} 2981}
2433 2982
2434int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 2983int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
2435 u8 addr_type) 2984 u8 link_type, u8 addr_type)
2436{ 2985{
2437 struct mgmt_addr_info ev; 2986 struct mgmt_addr_info ev;
2438 struct sock *sk = NULL; 2987 struct sock *sk = NULL;
@@ -2443,45 +2992,44 @@ int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2443 bacpy(&ev.bdaddr, bdaddr); 2992 bacpy(&ev.bdaddr, bdaddr);
2444 ev.type = link_to_mgmt(link_type, addr_type); 2993 ev.type = link_to_mgmt(link_type, addr_type);
2445 2994
2446 err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk); 2995 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
2996 sk);
2447 2997
2448 if (sk) 2998 if (sk)
2449 sock_put(sk); 2999 sock_put(sk);
2450 3000
2451 mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL); 3001 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3002 hdev);
2452 3003
2453 return err; 3004 return err;
2454} 3005}
2455 3006
2456int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) 3007int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3008 u8 link_type, u8 addr_type, u8 status)
2457{ 3009{
3010 struct mgmt_rp_disconnect rp;
2458 struct pending_cmd *cmd; 3011 struct pending_cmd *cmd;
2459 u8 mgmt_err = mgmt_status(status);
2460 int err; 3012 int err;
2461 3013
2462 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); 3014 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
2463 if (!cmd) 3015 if (!cmd)
2464 return -ENOENT; 3016 return -ENOENT;
2465 3017
2466 if (bdaddr) { 3018 bacpy(&rp.addr.bdaddr, bdaddr);
2467 struct mgmt_rp_disconnect rp; 3019 rp.addr.type = link_to_mgmt(link_type, addr_type);
2468 3020
2469 bacpy(&rp.bdaddr, bdaddr); 3021 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
2470 rp.status = status; 3022 mgmt_status(status), &rp, sizeof(rp));
2471
2472 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
2473 &rp, sizeof(rp));
2474 } else
2475 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
2476 mgmt_err);
2477 3023
2478 mgmt_pending_remove(cmd); 3024 mgmt_pending_remove(cmd);
2479 3025
3026 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3027 hdev);
2480 return err; 3028 return err;
2481} 3029}
2482 3030
2483int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3031int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2484 u8 addr_type, u8 status) 3032 u8 addr_type, u8 status)
2485{ 3033{
2486 struct mgmt_ev_connect_failed ev; 3034 struct mgmt_ev_connect_failed ev;
2487 3035
@@ -2496,15 +3044,16 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
2496{ 3044{
2497 struct mgmt_ev_pin_code_request ev; 3045 struct mgmt_ev_pin_code_request ev;
2498 3046
2499 bacpy(&ev.bdaddr, bdaddr); 3047 bacpy(&ev.addr.bdaddr, bdaddr);
3048 ev.addr.type = MGMT_ADDR_BREDR;
2500 ev.secure = secure; 3049 ev.secure = secure;
2501 3050
2502 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), 3051 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
2503 NULL); 3052 NULL);
2504} 3053}
2505 3054
2506int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3055int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2507 u8 status) 3056 u8 status)
2508{ 3057{
2509 struct pending_cmd *cmd; 3058 struct pending_cmd *cmd;
2510 struct mgmt_rp_pin_code_reply rp; 3059 struct mgmt_rp_pin_code_reply rp;
@@ -2514,11 +3063,11 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2514 if (!cmd) 3063 if (!cmd)
2515 return -ENOENT; 3064 return -ENOENT;
2516 3065
2517 bacpy(&rp.bdaddr, bdaddr); 3066 bacpy(&rp.addr.bdaddr, bdaddr);
2518 rp.status = mgmt_status(status); 3067 rp.addr.type = MGMT_ADDR_BREDR;
2519 3068
2520 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp, 3069 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2521 sizeof(rp)); 3070 mgmt_status(status), &rp, sizeof(rp));
2522 3071
2523 mgmt_pending_remove(cmd); 3072 mgmt_pending_remove(cmd);
2524 3073
@@ -2526,7 +3075,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2526} 3075}
2527 3076
2528int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3077int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2529 u8 status) 3078 u8 status)
2530{ 3079{
2531 struct pending_cmd *cmd; 3080 struct pending_cmd *cmd;
2532 struct mgmt_rp_pin_code_reply rp; 3081 struct mgmt_rp_pin_code_reply rp;
@@ -2536,11 +3085,11 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2536 if (!cmd) 3085 if (!cmd)
2537 return -ENOENT; 3086 return -ENOENT;
2538 3087
2539 bacpy(&rp.bdaddr, bdaddr); 3088 bacpy(&rp.addr.bdaddr, bdaddr);
2540 rp.status = mgmt_status(status); 3089 rp.addr.type = MGMT_ADDR_BREDR;
2541 3090
2542 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, 3091 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
2543 sizeof(rp)); 3092 mgmt_status(status), &rp, sizeof(rp));
2544 3093
2545 mgmt_pending_remove(cmd); 3094 mgmt_pending_remove(cmd);
2546 3095
@@ -2548,34 +3097,39 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548} 3097}
2549 3098
2550int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3099int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
2551 __le32 value, u8 confirm_hint) 3100 u8 link_type, u8 addr_type, __le32 value,
3101 u8 confirm_hint)
2552{ 3102{
2553 struct mgmt_ev_user_confirm_request ev; 3103 struct mgmt_ev_user_confirm_request ev;
2554 3104
2555 BT_DBG("%s", hdev->name); 3105 BT_DBG("%s", hdev->name);
2556 3106
2557 bacpy(&ev.bdaddr, bdaddr); 3107 bacpy(&ev.addr.bdaddr, bdaddr);
3108 ev.addr.type = link_to_mgmt(link_type, addr_type);
2558 ev.confirm_hint = confirm_hint; 3109 ev.confirm_hint = confirm_hint;
2559 put_unaligned_le32(value, &ev.value); 3110 put_unaligned_le32(value, &ev.value);
2560 3111
2561 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), 3112 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
2562 NULL); 3113 NULL);
2563} 3114}
2564 3115
2565int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr) 3116int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3117 u8 link_type, u8 addr_type)
2566{ 3118{
2567 struct mgmt_ev_user_passkey_request ev; 3119 struct mgmt_ev_user_passkey_request ev;
2568 3120
2569 BT_DBG("%s", hdev->name); 3121 BT_DBG("%s", hdev->name);
2570 3122
2571 bacpy(&ev.bdaddr, bdaddr); 3123 bacpy(&ev.addr.bdaddr, bdaddr);
3124 ev.addr.type = link_to_mgmt(link_type, addr_type);
2572 3125
2573 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), 3126 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
2574 NULL); 3127 NULL);
2575} 3128}
2576 3129
2577static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3130static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2578 u8 status, u8 opcode) 3131 u8 link_type, u8 addr_type, u8 status,
3132 u8 opcode)
2579{ 3133{
2580 struct pending_cmd *cmd; 3134 struct pending_cmd *cmd;
2581 struct mgmt_rp_user_confirm_reply rp; 3135 struct mgmt_rp_user_confirm_reply rp;
@@ -2585,9 +3139,10 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2585 if (!cmd) 3139 if (!cmd)
2586 return -ENOENT; 3140 return -ENOENT;
2587 3141
2588 bacpy(&rp.bdaddr, bdaddr); 3142 bacpy(&rp.addr.bdaddr, bdaddr);
2589 rp.status = mgmt_status(status); 3143 rp.addr.type = link_to_mgmt(link_type, addr_type);
2590 err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp)); 3144 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3145 &rp, sizeof(rp));
2591 3146
2592 mgmt_pending_remove(cmd); 3147 mgmt_pending_remove(cmd);
2593 3148
@@ -2595,72 +3150,215 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2595} 3150}
2596 3151
2597int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3152int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2598 u8 status) 3153 u8 link_type, u8 addr_type, u8 status)
2599{ 3154{
2600 return user_pairing_resp_complete(hdev, bdaddr, status, 3155 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2601 MGMT_OP_USER_CONFIRM_REPLY); 3156 status, MGMT_OP_USER_CONFIRM_REPLY);
2602} 3157}
2603 3158
2604int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, 3159int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2605 bdaddr_t *bdaddr, u8 status) 3160 u8 link_type, u8 addr_type, u8 status)
2606{ 3161{
2607 return user_pairing_resp_complete(hdev, bdaddr, status, 3162 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2608 MGMT_OP_USER_CONFIRM_NEG_REPLY); 3163 status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
2609} 3164}
2610 3165
2611int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3166int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2612 u8 status) 3167 u8 link_type, u8 addr_type, u8 status)
2613{ 3168{
2614 return user_pairing_resp_complete(hdev, bdaddr, status, 3169 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2615 MGMT_OP_USER_PASSKEY_REPLY); 3170 status, MGMT_OP_USER_PASSKEY_REPLY);
2616} 3171}
2617 3172
2618int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, 3173int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
2619 bdaddr_t *bdaddr, u8 status) 3174 u8 link_type, u8 addr_type, u8 status)
2620{ 3175{
2621 return user_pairing_resp_complete(hdev, bdaddr, status, 3176 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
2622 MGMT_OP_USER_PASSKEY_NEG_REPLY); 3177 status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
2623} 3178}
2624 3179
2625int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) 3180int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3181 u8 addr_type, u8 status)
2626{ 3182{
2627 struct mgmt_ev_auth_failed ev; 3183 struct mgmt_ev_auth_failed ev;
2628 3184
2629 bacpy(&ev.bdaddr, bdaddr); 3185 bacpy(&ev.addr.bdaddr, bdaddr);
3186 ev.addr.type = link_to_mgmt(link_type, addr_type);
2630 ev.status = mgmt_status(status); 3187 ev.status = mgmt_status(status);
2631 3188
2632 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); 3189 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
2633} 3190}
2634 3191
3192int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3193{
3194 struct cmd_lookup match = { NULL, hdev };
3195 bool changed = false;
3196 int err = 0;
3197
3198 if (status) {
3199 u8 mgmt_err = mgmt_status(status);
3200 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3201 cmd_status_rsp, &mgmt_err);
3202 return 0;
3203 }
3204
3205 if (test_bit(HCI_AUTH, &hdev->flags)) {
3206 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3207 changed = true;
3208 } else {
3209 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3210 changed = true;
3211 }
3212
3213 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3214 &match);
3215
3216 if (changed)
3217 err = new_settings(hdev, match.sk);
3218
3219 if (match.sk)
3220 sock_put(match.sk);
3221
3222 return err;
3223}
3224
3225static int clear_eir(struct hci_dev *hdev)
3226{
3227 struct hci_cp_write_eir cp;
3228
3229 if (!(hdev->features[6] & LMP_EXT_INQ))
3230 return 0;
3231
3232 memset(hdev->eir, 0, sizeof(hdev->eir));
3233
3234 memset(&cp, 0, sizeof(cp));
3235
3236 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3237}
3238
3239int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3240{
3241 struct cmd_lookup match = { NULL, hdev };
3242 bool changed = false;
3243 int err = 0;
3244
3245 if (status) {
3246 u8 mgmt_err = mgmt_status(status);
3247
3248 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3249 &hdev->dev_flags))
3250 err = new_settings(hdev, NULL);
3251
3252 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3253 &mgmt_err);
3254
3255 return err;
3256 }
3257
3258 if (enable) {
3259 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3260 changed = true;
3261 } else {
3262 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3263 changed = true;
3264 }
3265
3266 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3267
3268 if (changed)
3269 err = new_settings(hdev, match.sk);
3270
3271 if (match.sk)
3272 sock_put(match.sk);
3273
3274 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3275 update_eir(hdev);
3276 else
3277 clear_eir(hdev);
3278
3279 return err;
3280}
3281
3282static void class_rsp(struct pending_cmd *cmd, void *data)
3283{
3284 struct cmd_lookup *match = data;
3285
3286 cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3287 match->hdev->dev_class, 3);
3288
3289 list_del(&cmd->list);
3290
3291 if (match->sk == NULL) {
3292 match->sk = cmd->sk;
3293 sock_hold(match->sk);
3294 }
3295
3296 mgmt_pending_free(cmd);
3297}
3298
3299int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3300 u8 status)
3301{
3302 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3303 int err = 0;
3304
3305 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3306
3307 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
3308 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3309 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3310
3311 if (!status)
3312 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3313 3, NULL);
3314
3315 if (match.sk)
3316 sock_put(match.sk);
3317
3318 return err;
3319}
3320
2635int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 3321int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
2636{ 3322{
2637 struct pending_cmd *cmd; 3323 struct pending_cmd *cmd;
2638 struct mgmt_cp_set_local_name ev; 3324 struct mgmt_cp_set_local_name ev;
2639 int err; 3325 bool changed = false;
3326 int err = 0;
3327
3328 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3329 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3330 changed = true;
3331 }
2640 3332
2641 memset(&ev, 0, sizeof(ev)); 3333 memset(&ev, 0, sizeof(ev));
2642 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 3334 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3335 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
2643 3336
2644 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 3337 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2645 if (!cmd) 3338 if (!cmd)
2646 goto send_event; 3339 goto send_event;
2647 3340
3341 /* Always assume that either the short or the complete name has
3342 * changed if there was a pending mgmt command */
3343 changed = true;
3344
2648 if (status) { 3345 if (status) {
2649 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 3346 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2650 mgmt_status(status)); 3347 mgmt_status(status));
2651 goto failed; 3348 goto failed;
2652 } 3349 }
2653 3350
2654 update_eir(hdev); 3351 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
2655 3352 sizeof(ev));
2656 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
2657 sizeof(ev));
2658 if (err < 0) 3353 if (err < 0)
2659 goto failed; 3354 goto failed;
2660 3355
2661send_event: 3356send_event:
2662 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 3357 if (changed)
2663 cmd ? cmd->sk : NULL); 3358 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3359 sizeof(ev), cmd ? cmd->sk : NULL);
3360
3361 update_eir(hdev);
2664 3362
2665failed: 3363failed:
2666 if (cmd) 3364 if (cmd)
@@ -2669,7 +3367,7 @@ failed:
2669} 3367}
2670 3368
2671int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 3369int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2672 u8 *randomizer, u8 status) 3370 u8 *randomizer, u8 status)
2673{ 3371{
2674 struct pending_cmd *cmd; 3372 struct pending_cmd *cmd;
2675 int err; 3373 int err;
@@ -2681,9 +3379,8 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2681 return -ENOENT; 3379 return -ENOENT;
2682 3380
2683 if (status) { 3381 if (status) {
2684 err = cmd_status(cmd->sk, hdev->id, 3382 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2685 MGMT_OP_READ_LOCAL_OOB_DATA, 3383 mgmt_status(status));
2686 mgmt_status(status));
2687 } else { 3384 } else {
2688 struct mgmt_rp_read_local_oob_data rp; 3385 struct mgmt_rp_read_local_oob_data rp;
2689 3386
@@ -2691,8 +3388,8 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2691 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); 3388 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2692 3389
2693 err = cmd_complete(cmd->sk, hdev->id, 3390 err = cmd_complete(cmd->sk, hdev->id,
2694 MGMT_OP_READ_LOCAL_OOB_DATA, 3391 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
2695 &rp, sizeof(rp)); 3392 sizeof(rp));
2696 } 3393 }
2697 3394
2698 mgmt_pending_remove(cmd); 3395 mgmt_pending_remove(cmd);
@@ -2700,48 +3397,120 @@ int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
2700 return err; 3397 return err;
2701} 3398}
2702 3399
3400int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3401{
3402 struct cmd_lookup match = { NULL, hdev };
3403 bool changed = false;
3404 int err = 0;
3405
3406 if (status) {
3407 u8 mgmt_err = mgmt_status(status);
3408
3409 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3410 &hdev->dev_flags))
3411 err = new_settings(hdev, NULL);
3412
3413 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
3414 cmd_status_rsp, &mgmt_err);
3415
3416 return err;
3417 }
3418
3419 if (enable) {
3420 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3421 changed = true;
3422 } else {
3423 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3424 changed = true;
3425 }
3426
3427 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3428
3429 if (changed)
3430 err = new_settings(hdev, match.sk);
3431
3432 if (match.sk)
3433 sock_put(match.sk);
3434
3435 return err;
3436}
3437
2703int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3438int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2704 u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir) 3439 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3440 ssp, u8 *eir, u16 eir_len)
2705{ 3441{
2706 struct mgmt_ev_device_found ev; 3442 char buf[512];
3443 struct mgmt_ev_device_found *ev = (void *) buf;
3444 size_t ev_size;
2707 3445
2708 memset(&ev, 0, sizeof(ev)); 3446 /* Leave 5 bytes for a potential CoD field */
3447 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3448 return -EINVAL;
2709 3449
2710 bacpy(&ev.addr.bdaddr, bdaddr); 3450 memset(buf, 0, sizeof(buf));
2711 ev.addr.type = link_to_mgmt(link_type, addr_type); 3451
2712 ev.rssi = rssi; 3452 bacpy(&ev->addr.bdaddr, bdaddr);
3453 ev->addr.type = link_to_mgmt(link_type, addr_type);
3454 ev->rssi = rssi;
3455 if (cfm_name)
3456 ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
3457 if (!ssp)
3458 ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
3459
3460 if (eir_len > 0)
3461 memcpy(ev->eir, eir, eir_len);
2713 3462
2714 if (eir) 3463 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
2715 memcpy(ev.eir, eir, sizeof(ev.eir)); 3464 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3465 dev_class, 3);
2716 3466
2717 if (dev_class) 3467 put_unaligned_le16(eir_len, &ev->eir_len);
2718 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2719 3468
2720 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL); 3469 ev_size = sizeof(*ev) + eir_len;
3470
3471 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
2721} 3472}
2722 3473
2723int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name) 3474int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3475 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
2724{ 3476{
2725 struct mgmt_ev_remote_name ev; 3477 struct mgmt_ev_device_found *ev;
3478 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3479 u16 eir_len;
2726 3480
2727 memset(&ev, 0, sizeof(ev)); 3481 ev = (struct mgmt_ev_device_found *) buf;
2728 3482
2729 bacpy(&ev.bdaddr, bdaddr); 3483 memset(buf, 0, sizeof(buf));
2730 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 3484
3485 bacpy(&ev->addr.bdaddr, bdaddr);
3486 ev->addr.type = link_to_mgmt(link_type, addr_type);
3487 ev->rssi = rssi;
3488
3489 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3490 name_len);
3491
3492 put_unaligned_le16(eir_len, &ev->eir_len);
2731 3493
2732 return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL); 3494 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3495 sizeof(*ev) + eir_len, NULL);
2733} 3496}
2734 3497
2735int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) 3498int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2736{ 3499{
2737 struct pending_cmd *cmd; 3500 struct pending_cmd *cmd;
3501 u8 type;
2738 int err; 3502 int err;
2739 3503
3504 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3505
2740 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 3506 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2741 if (!cmd) 3507 if (!cmd)
2742 return -ENOENT; 3508 return -ENOENT;
2743 3509
2744 err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status)); 3510 type = hdev->discovery.type;
3511
3512 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3513 &type, sizeof(type));
2745 mgmt_pending_remove(cmd); 3514 mgmt_pending_remove(cmd);
2746 3515
2747 return err; 3516 return err;
@@ -2756,7 +3525,8 @@ int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2756 if (!cmd) 3525 if (!cmd)
2757 return -ENOENT; 3526 return -ENOENT;
2758 3527
2759 err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status)); 3528 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3529 &hdev->discovery.type, sizeof(hdev->discovery.type));
2760 mgmt_pending_remove(cmd); 3530 mgmt_pending_remove(cmd);
2761 3531
2762 return err; 3532 return err;
@@ -2764,44 +3534,61 @@ int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2764 3534
2765int mgmt_discovering(struct hci_dev *hdev, u8 discovering) 3535int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
2766{ 3536{
3537 struct mgmt_ev_discovering ev;
2767 struct pending_cmd *cmd; 3538 struct pending_cmd *cmd;
2768 3539
3540 BT_DBG("%s discovering %u", hdev->name, discovering);
3541
2769 if (discovering) 3542 if (discovering)
2770 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 3543 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2771 else 3544 else
2772 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); 3545 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2773 3546
2774 if (cmd != NULL) { 3547 if (cmd != NULL) {
2775 cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0); 3548 u8 type = hdev->discovery.type;
3549
3550 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
3551 sizeof(type));
2776 mgmt_pending_remove(cmd); 3552 mgmt_pending_remove(cmd);
2777 } 3553 }
2778 3554
2779 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering, 3555 memset(&ev, 0, sizeof(ev));
2780 sizeof(discovering), NULL); 3556 ev.type = hdev->discovery.type;
3557 ev.discovering = discovering;
3558
3559 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
2781} 3560}
2782 3561
2783int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr) 3562int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2784{ 3563{
2785 struct pending_cmd *cmd; 3564 struct pending_cmd *cmd;
2786 struct mgmt_ev_device_blocked ev; 3565 struct mgmt_ev_device_blocked ev;
2787 3566
2788 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev); 3567 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
2789 3568
2790 bacpy(&ev.bdaddr, bdaddr); 3569 bacpy(&ev.addr.bdaddr, bdaddr);
3570 ev.addr.type = type;
2791 3571
2792 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev), 3572 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
2793 cmd ? cmd->sk : NULL); 3573 cmd ? cmd->sk : NULL);
2794} 3574}
2795 3575
2796int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr) 3576int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2797{ 3577{
2798 struct pending_cmd *cmd; 3578 struct pending_cmd *cmd;
2799 struct mgmt_ev_device_unblocked ev; 3579 struct mgmt_ev_device_unblocked ev;
2800 3580
2801 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev); 3581 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
2802 3582
2803 bacpy(&ev.bdaddr, bdaddr); 3583 bacpy(&ev.addr.bdaddr, bdaddr);
3584 ev.addr.type = type;
2804 3585
2805 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev), 3586 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
2806 cmd ? cmd->sk : NULL); 3587 cmd ? cmd->sk : NULL);
2807} 3588}
3589
3590module_param(enable_hs, bool, 0644);
3591MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
3592
3593module_param(enable_le, bool, 0644);
3594MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 7adb03ca51c2..4bf54b377255 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -196,7 +196,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
196static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) 196static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
197{ 197{
198 struct rfcomm_dev *dev, *entry; 198 struct rfcomm_dev *dev, *entry;
199 struct list_head *head = &rfcomm_dev_list, *p; 199 struct list_head *head = &rfcomm_dev_list;
200 int err = 0; 200 int err = 0;
201 201
202 BT_DBG("id %d channel %d", req->dev_id, req->channel); 202 BT_DBG("id %d channel %d", req->dev_id, req->channel);
@@ -215,7 +215,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
215 break; 215 break;
216 216
217 dev->id++; 217 dev->id++;
218 head = p; 218 head = &entry->list;
219 } 219 }
220 } else { 220 } else {
221 dev->id = req->dev_id; 221 dev->id = req->dev_id;
@@ -229,7 +229,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
229 if (entry->id > dev->id - 1) 229 if (entry->id > dev->id - 1)
230 break; 230 break;
231 231
232 head = p; 232 head = &entry->list;
233 } 233 }
234 } 234 }
235 235
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 32c47de30344..deb119875fd9 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -29,7 +29,7 @@
29#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h> 30#include <crypto/b128ops.h>
31 31
32#define SMP_TIMEOUT 30000 /* 30 seconds */ 32#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 33
34static inline void swap128(u8 src[16], u8 dst[16]) 34static inline void swap128(u8 src[16], u8 dst[16])
35{ 35{
@@ -186,8 +186,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
186 hci_send_acl(conn->hchan, skb, 0); 186 hci_send_acl(conn->hchan, skb, 0);
187 187
188 cancel_delayed_work_sync(&conn->security_timer); 188 cancel_delayed_work_sync(&conn->security_timer);
189 schedule_delayed_work(&conn->security_timer, 189 schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
190 msecs_to_jiffies(SMP_TIMEOUT));
191} 190}
192 191
193static __u8 authreq_to_seclevel(__u8 authreq) 192static __u8 authreq_to_seclevel(__u8 authreq)
@@ -217,7 +216,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
217{ 216{
218 u8 dist_keys = 0; 217 u8 dist_keys = 0;
219 218
220 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) { 219 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
221 dist_keys = SMP_DIST_ENC_KEY; 220 dist_keys = SMP_DIST_ENC_KEY;
222 authreq |= SMP_AUTH_BONDING; 221 authreq |= SMP_AUTH_BONDING;
223 } else { 222 } else {
@@ -250,21 +249,27 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
250 (max_key_size < SMP_MIN_ENC_KEY_SIZE)) 249 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
251 return SMP_ENC_KEY_SIZE; 250 return SMP_ENC_KEY_SIZE;
252 251
253 smp->smp_key_size = max_key_size; 252 smp->enc_key_size = max_key_size;
254 253
255 return 0; 254 return 0;
256} 255}
257 256
258static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) 257static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
259{ 258{
259 struct hci_conn *hcon = conn->hcon;
260
260 if (send) 261 if (send)
261 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), 262 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
262 &reason); 263 &reason);
263 264
264 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend); 265 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
265 mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason); 266 mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
266 cancel_delayed_work_sync(&conn->security_timer); 267 hcon->dst_type, reason);
267 smp_chan_destroy(conn); 268
269 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
270 cancel_delayed_work_sync(&conn->security_timer);
271 smp_chan_destroy(conn);
272 }
268} 273}
269 274
270#define JUST_WORKS 0x00 275#define JUST_WORKS 0x00
@@ -305,7 +310,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
305 remote_io > SMP_IO_KEYBOARD_DISPLAY) 310 remote_io > SMP_IO_KEYBOARD_DISPLAY)
306 method = JUST_WORKS; 311 method = JUST_WORKS;
307 else 312 else
308 method = gen_method[local_io][remote_io]; 313 method = gen_method[remote_io][local_io];
309 314
310 /* If not bonding, don't ask user to confirm a Zero TK */ 315 /* If not bonding, don't ask user to confirm a Zero TK */
311 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 316 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -346,9 +351,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
346 hci_dev_lock(hcon->hdev); 351 hci_dev_lock(hcon->hdev);
347 352
348 if (method == REQ_PASSKEY) 353 if (method == REQ_PASSKEY)
349 ret = mgmt_user_passkey_request(hcon->hdev, conn->dst); 354 ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
355 hcon->type, hcon->dst_type);
350 else 356 else
351 ret = mgmt_user_confirm_request(hcon->hdev, conn->dst, 357 ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
358 hcon->type, hcon->dst_type,
352 cpu_to_le32(passkey), 0); 359 cpu_to_le32(passkey), 0);
353 360
354 hci_dev_unlock(hcon->hdev); 361 hci_dev_unlock(hcon->hdev);
@@ -377,12 +384,11 @@ static void confirm_work(struct work_struct *work)
377 384
378 if (conn->hcon->out) 385 if (conn->hcon->out)
379 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0, 386 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
380 conn->src, conn->hcon->dst_type, conn->dst, 387 conn->src, conn->hcon->dst_type, conn->dst, res);
381 res);
382 else 388 else
383 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 389 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
384 conn->hcon->dst_type, conn->dst, 0, conn->src, 390 conn->hcon->dst_type, conn->dst, 0, conn->src,
385 res); 391 res);
386 if (ret) { 392 if (ret) {
387 reason = SMP_UNSPECIFIED; 393 reason = SMP_UNSPECIFIED;
388 goto error; 394 goto error;
@@ -417,12 +423,10 @@ static void random_work(struct work_struct *work)
417 423
418 if (hcon->out) 424 if (hcon->out)
419 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0, 425 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
420 conn->src, hcon->dst_type, conn->dst, 426 conn->src, hcon->dst_type, conn->dst, res);
421 res);
422 else 427 else
423 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 428 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
424 hcon->dst_type, conn->dst, 0, conn->src, 429 hcon->dst_type, conn->dst, 0, conn->src, res);
425 res);
426 if (ret) { 430 if (ret) {
427 reason = SMP_UNSPECIFIED; 431 reason = SMP_UNSPECIFIED;
428 goto error; 432 goto error;
@@ -446,16 +450,16 @@ static void random_work(struct work_struct *work)
446 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key); 450 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
447 swap128(key, stk); 451 swap128(key, stk);
448 452
449 memset(stk + smp->smp_key_size, 0, 453 memset(stk + smp->enc_key_size, 0,
450 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size); 454 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
451 455
452 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) { 456 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) {
453 reason = SMP_UNSPECIFIED; 457 reason = SMP_UNSPECIFIED;
454 goto error; 458 goto error;
455 } 459 }
456 460
457 hci_le_start_enc(hcon, ediv, rand, stk); 461 hci_le_start_enc(hcon, ediv, rand, stk);
458 hcon->enc_key_size = smp->smp_key_size; 462 hcon->enc_key_size = smp->enc_key_size;
459 } else { 463 } else {
460 u8 stk[16], r[16], rand[8]; 464 u8 stk[16], r[16], rand[8];
461 __le16 ediv; 465 __le16 ediv;
@@ -469,11 +473,12 @@ static void random_work(struct work_struct *work)
469 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key); 473 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
470 swap128(key, stk); 474 swap128(key, stk);
471 475
472 memset(stk + smp->smp_key_size, 0, 476 memset(stk + smp->enc_key_size, 0,
473 SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size); 477 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
474 478
475 hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size, 479 hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
476 ediv, rand, stk); 480 HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
481 ediv, rand);
477 } 482 }
478 483
479 return; 484 return;
@@ -506,7 +511,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
506{ 511{
507 struct smp_chan *smp = conn->smp_chan; 512 struct smp_chan *smp = conn->smp_chan;
508 513
509 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); 514 BUG_ON(!smp);
510 515
511 if (smp->tfm) 516 if (smp->tfm)
512 crypto_free_blkcipher(smp->tfm); 517 crypto_free_blkcipher(smp->tfm);
@@ -571,7 +576,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
571 if (conn->hcon->link_mode & HCI_LM_MASTER) 576 if (conn->hcon->link_mode & HCI_LM_MASTER)
572 return SMP_CMD_NOTSUPP; 577 return SMP_CMD_NOTSUPP;
573 578
574 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) 579 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
575 smp = smp_chan_create(conn); 580 smp = smp_chan_create(conn);
576 581
577 smp = conn->smp_chan; 582 smp = conn->smp_chan;
@@ -584,6 +589,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
584 if (req->auth_req & SMP_AUTH_BONDING) 589 if (req->auth_req & SMP_AUTH_BONDING)
585 auth = req->auth_req; 590 auth = req->auth_req;
586 591
592 conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
593
587 build_pairing_cmd(conn, req, &rsp, auth); 594 build_pairing_cmd(conn, req, &rsp, auth);
588 595
589 key_size = min(req->max_key_size, rsp.max_key_size); 596 key_size = min(req->max_key_size, rsp.max_key_size);
@@ -698,23 +705,18 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
698 705
699static u8 smp_ltk_encrypt(struct l2cap_conn *conn) 706static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
700{ 707{
701 struct link_key *key; 708 struct smp_ltk *key;
702 struct key_master_id *master;
703 struct hci_conn *hcon = conn->hcon; 709 struct hci_conn *hcon = conn->hcon;
704 710
705 key = hci_find_link_key_type(hcon->hdev, conn->dst, 711 key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
706 HCI_LK_SMP_LTK);
707 if (!key) 712 if (!key)
708 return 0; 713 return 0;
709 714
710 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, 715 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
711 &hcon->pend))
712 return 1; 716 return 1;
713 717
714 master = (void *) key->data; 718 hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
715 hci_le_start_enc(hcon, master->ediv, master->rand, 719 hcon->enc_key_size = key->enc_size;
716 key->val);
717 hcon->enc_key_size = key->pin_len;
718 720
719 return 1; 721 return 1;
720 722
@@ -733,7 +735,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
733 if (smp_ltk_encrypt(conn)) 735 if (smp_ltk_encrypt(conn))
734 return 0; 736 return 0;
735 737
736 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) 738 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
737 return 0; 739 return 0;
738 740
739 smp = smp_chan_create(conn); 741 smp = smp_chan_create(conn);
@@ -772,7 +774,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
772 if (smp_ltk_encrypt(conn)) 774 if (smp_ltk_encrypt(conn))
773 goto done; 775 goto done;
774 776
775 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) 777 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
776 return 0; 778 return 0;
777 779
778 smp = smp_chan_create(conn); 780 smp = smp_chan_create(conn);
@@ -817,13 +819,19 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
817{ 819{
818 struct smp_cmd_master_ident *rp = (void *) skb->data; 820 struct smp_cmd_master_ident *rp = (void *) skb->data;
819 struct smp_chan *smp = conn->smp_chan; 821 struct smp_chan *smp = conn->smp_chan;
822 struct hci_dev *hdev = conn->hcon->hdev;
823 struct hci_conn *hcon = conn->hcon;
824 u8 authenticated;
820 825
821 skb_pull(skb, sizeof(*rp)); 826 skb_pull(skb, sizeof(*rp));
822 827
823 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size, 828 hci_dev_lock(hdev);
824 rp->ediv, rp->rand, smp->tk); 829 authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
825 830 hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
831 HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
832 rp->ediv, rp->rand);
826 smp_distribute_keys(conn, 1); 833 smp_distribute_keys(conn, 1);
834 hci_dev_unlock(hdev);
827 835
828 return 0; 836 return 0;
829} 837}
@@ -908,7 +916,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
908 916
909 BT_DBG("conn %p force %d", conn, force); 917 BT_DBG("conn %p force %d", conn, force);
910 918
911 if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend)) 919 if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
912 return 0; 920 return 0;
913 921
914 rsp = (void *) &smp->prsp[1]; 922 rsp = (void *) &smp->prsp[1];
@@ -933,6 +941,8 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
933 if (*keydist & SMP_DIST_ENC_KEY) { 941 if (*keydist & SMP_DIST_ENC_KEY) {
934 struct smp_cmd_encrypt_info enc; 942 struct smp_cmd_encrypt_info enc;
935 struct smp_cmd_master_ident ident; 943 struct smp_cmd_master_ident ident;
944 struct hci_conn *hcon = conn->hcon;
945 u8 authenticated;
936 __le16 ediv; 946 __le16 ediv;
937 947
938 get_random_bytes(enc.ltk, sizeof(enc.ltk)); 948 get_random_bytes(enc.ltk, sizeof(enc.ltk));
@@ -941,8 +951,10 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
941 951
942 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); 952 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
943 953
944 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size, 954 authenticated = hcon->sec_level == BT_SECURITY_HIGH;
945 ediv, ident.rand, enc.ltk); 955 hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
956 HCI_SMP_LTK_SLAVE, 1, authenticated,
957 enc.ltk, smp->enc_key_size, ediv, ident.rand);
946 958
947 ident.ediv = cpu_to_le16(ediv); 959 ident.ediv = cpu_to_le16(ediv);
948 960
@@ -982,7 +994,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
982 } 994 }
983 995
984 if (conn->hcon->out || force) { 996 if (conn->hcon->out || force) {
985 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend); 997 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
986 cancel_delayed_work_sync(&conn->security_timer); 998 cancel_delayed_work_sync(&conn->security_timer);
987 smp_chan_destroy(conn); 999 smp_chan_destroy(conn);
988 } 1000 }
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 71773b014e0c..ba829de84423 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -167,10 +167,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
167 struct sockaddr *addr = p; 167 struct sockaddr *addr = p;
168 168
169 if (!is_valid_ether_addr(addr->sa_data)) 169 if (!is_valid_ether_addr(addr->sa_data))
170 return -EINVAL; 170 return -EADDRNOTAVAIL;
171 171
172 spin_lock_bh(&br->lock); 172 spin_lock_bh(&br->lock);
173 if (compare_ether_addr(dev->dev_addr, addr->sa_data)) { 173 if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
174 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
174 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 175 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
175 br_fdb_change_mac_address(br, addr->sa_data); 176 br_fdb_change_mac_address(br, addr->sa_data);
176 br_stp_change_bridge_id(br, addr->sa_data); 177 br_stp_change_bridge_id(br, addr->sa_data);
@@ -334,7 +335,7 @@ void br_dev_setup(struct net_device *dev)
334{ 335{
335 struct net_bridge *br = netdev_priv(dev); 336 struct net_bridge *br = netdev_priv(dev);
336 337
337 random_ether_addr(dev->dev_addr); 338 eth_hw_addr_random(dev);
338 ether_setup(dev); 339 ether_setup(dev);
339 340
340 dev->netdev_ops = &br_netdev_ops; 341 dev->netdev_ops = &br_netdev_ops;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 82c57069415f..aa6f716524fd 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -162,7 +162,6 @@ void caif_flow_cb(struct sk_buff *skb)
162static int transmit(struct cflayer *layer, struct cfpkt *pkt) 162static int transmit(struct cflayer *layer, struct cfpkt *pkt)
163{ 163{
164 int err, high = 0, qlen = 0; 164 int err, high = 0, qlen = 0;
165 struct caif_dev_common *caifdev;
166 struct caif_device_entry *caifd = 165 struct caif_device_entry *caifd =
167 container_of(layer, struct caif_device_entry, layer); 166 container_of(layer, struct caif_device_entry, layer);
168 struct sk_buff *skb; 167 struct sk_buff *skb;
@@ -174,7 +173,6 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
174 skb->dev = caifd->netdev; 173 skb->dev = caifd->netdev;
175 skb_reset_network_header(skb); 174 skb_reset_network_header(skb);
176 skb->protocol = htons(ETH_P_CAIF); 175 skb->protocol = htons(ETH_P_CAIF);
177 caifdev = netdev_priv(caifd->netdev);
178 176
179 /* Check if we need to handle xoff */ 177 /* Check if we need to handle xoff */
180 if (likely(caifd->netdev->tx_queue_len == 0)) 178 if (likely(caifd->netdev->tx_queue_len == 0))
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a97d97a3a512..5016fa57b623 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -43,34 +43,9 @@ enum caif_states {
43#define TX_FLOW_ON_BIT 1 43#define TX_FLOW_ON_BIT 1
44#define RX_FLOW_ON_BIT 2 44#define RX_FLOW_ON_BIT 2
45 45
46static struct dentry *debugfsdir;
47
48#ifdef CONFIG_DEBUG_FS
49struct debug_fs_counter {
50 atomic_t caif_nr_socks;
51 atomic_t caif_sock_create;
52 atomic_t num_connect_req;
53 atomic_t num_connect_resp;
54 atomic_t num_connect_fail_resp;
55 atomic_t num_disconnect;
56 atomic_t num_remote_shutdown_ind;
57 atomic_t num_tx_flow_off_ind;
58 atomic_t num_tx_flow_on_ind;
59 atomic_t num_rx_flow_off;
60 atomic_t num_rx_flow_on;
61};
62static struct debug_fs_counter cnt;
63#define dbfs_atomic_inc(v) atomic_inc_return(v)
64#define dbfs_atomic_dec(v) atomic_dec_return(v)
65#else
66#define dbfs_atomic_inc(v) 0
67#define dbfs_atomic_dec(v) 0
68#endif
69
70struct caifsock { 46struct caifsock {
71 struct sock sk; /* must be first member */ 47 struct sock sk; /* must be first member */
72 struct cflayer layer; 48 struct cflayer layer;
73 char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
74 u32 flow_state; 49 u32 flow_state;
75 struct caif_connect_request conn_req; 50 struct caif_connect_request conn_req;
76 struct mutex readlock; 51 struct mutex readlock;
@@ -161,7 +136,6 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
161 atomic_read(&cf_sk->sk.sk_rmem_alloc), 136 atomic_read(&cf_sk->sk.sk_rmem_alloc),
162 sk_rcvbuf_lowwater(cf_sk)); 137 sk_rcvbuf_lowwater(cf_sk));
163 set_rx_flow_off(cf_sk); 138 set_rx_flow_off(cf_sk);
164 dbfs_atomic_inc(&cnt.num_rx_flow_off);
165 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 139 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
166 } 140 }
167 141
@@ -172,7 +146,6 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
172 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
173 if (net_ratelimit()) 147 if (net_ratelimit())
174 pr_debug("sending flow OFF due to rmem_schedule\n"); 148 pr_debug("sending flow OFF due to rmem_schedule\n");
175 dbfs_atomic_inc(&cnt.num_rx_flow_off);
176 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 149 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
177 } 150 }
178 skb->dev = NULL; 151 skb->dev = NULL;
@@ -233,14 +206,12 @@ static void caif_ctrl_cb(struct cflayer *layr,
233 switch (flow) { 206 switch (flow) {
234 case CAIF_CTRLCMD_FLOW_ON_IND: 207 case CAIF_CTRLCMD_FLOW_ON_IND:
235 /* OK from modem to start sending again */ 208 /* OK from modem to start sending again */
236 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
237 set_tx_flow_on(cf_sk); 209 set_tx_flow_on(cf_sk);
238 cf_sk->sk.sk_state_change(&cf_sk->sk); 210 cf_sk->sk.sk_state_change(&cf_sk->sk);
239 break; 211 break;
240 212
241 case CAIF_CTRLCMD_FLOW_OFF_IND: 213 case CAIF_CTRLCMD_FLOW_OFF_IND:
242 /* Modem asks us to shut up */ 214 /* Modem asks us to shut up */
243 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
244 set_tx_flow_off(cf_sk); 215 set_tx_flow_off(cf_sk);
245 cf_sk->sk.sk_state_change(&cf_sk->sk); 216 cf_sk->sk.sk_state_change(&cf_sk->sk);
246 break; 217 break;
@@ -249,7 +220,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
249 /* We're now connected */ 220 /* We're now connected */
250 caif_client_register_refcnt(&cf_sk->layer, 221 caif_client_register_refcnt(&cf_sk->layer,
251 cfsk_hold, cfsk_put); 222 cfsk_hold, cfsk_put);
252 dbfs_atomic_inc(&cnt.num_connect_resp);
253 cf_sk->sk.sk_state = CAIF_CONNECTED; 223 cf_sk->sk.sk_state = CAIF_CONNECTED;
254 set_tx_flow_on(cf_sk); 224 set_tx_flow_on(cf_sk);
255 cf_sk->sk.sk_state_change(&cf_sk->sk); 225 cf_sk->sk.sk_state_change(&cf_sk->sk);
@@ -263,7 +233,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
263 233
264 case CAIF_CTRLCMD_INIT_FAIL_RSP: 234 case CAIF_CTRLCMD_INIT_FAIL_RSP:
265 /* Connect request failed */ 235 /* Connect request failed */
266 dbfs_atomic_inc(&cnt.num_connect_fail_resp);
267 cf_sk->sk.sk_err = ECONNREFUSED; 236 cf_sk->sk.sk_err = ECONNREFUSED;
268 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 237 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
269 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 238 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
@@ -277,7 +246,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
277 246
278 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: 247 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
279 /* Modem has closed this connection, or device is down. */ 248 /* Modem has closed this connection, or device is down. */
280 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
281 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; 249 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
282 cf_sk->sk.sk_err = ECONNRESET; 250 cf_sk->sk.sk_err = ECONNRESET;
283 set_rx_flow_on(cf_sk); 251 set_rx_flow_on(cf_sk);
@@ -297,7 +265,6 @@ static void caif_check_flow_release(struct sock *sk)
297 return; 265 return;
298 266
299 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 267 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
300 dbfs_atomic_inc(&cnt.num_rx_flow_on);
301 set_rx_flow_on(cf_sk); 268 set_rx_flow_on(cf_sk);
302 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 269 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
303 } 270 }
@@ -856,7 +823,6 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
856 /*ifindex = id of the interface.*/ 823 /*ifindex = id of the interface.*/
857 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; 824 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
858 825
859 dbfs_atomic_inc(&cnt.num_connect_req);
860 cf_sk->layer.receive = caif_sktrecv_cb; 826 cf_sk->layer.receive = caif_sktrecv_cb;
861 827
862 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, 828 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
@@ -945,8 +911,6 @@ static int caif_release(struct socket *sock)
945 spin_unlock_bh(&sk->sk_receive_queue.lock); 911 spin_unlock_bh(&sk->sk_receive_queue.lock);
946 sock->sk = NULL; 912 sock->sk = NULL;
947 913
948 dbfs_atomic_inc(&cnt.num_disconnect);
949
950 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); 914 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
951 if (cf_sk->debugfs_socket_dir != NULL) 915 if (cf_sk->debugfs_socket_dir != NULL)
952 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 916 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
@@ -1054,14 +1018,12 @@ static void caif_sock_destructor(struct sock *sk)
1054 return; 1018 return;
1055 } 1019 }
1056 sk_stream_kill_queues(&cf_sk->sk); 1020 sk_stream_kill_queues(&cf_sk->sk);
1057 dbfs_atomic_dec(&cnt.caif_nr_socks);
1058 caif_free_client(&cf_sk->layer); 1021 caif_free_client(&cf_sk->layer);
1059} 1022}
1060 1023
1061static int caif_create(struct net *net, struct socket *sock, int protocol, 1024static int caif_create(struct net *net, struct socket *sock, int protocol,
1062 int kern) 1025 int kern)
1063{ 1026{
1064 int num;
1065 struct sock *sk = NULL; 1027 struct sock *sk = NULL;
1066 struct caifsock *cf_sk = NULL; 1028 struct caifsock *cf_sk = NULL;
1067 static struct proto prot = {.name = "PF_CAIF", 1029 static struct proto prot = {.name = "PF_CAIF",
@@ -1122,34 +1084,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1122 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; 1084 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
1123 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1085 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1124 cf_sk->conn_req.protocol = protocol; 1086 cf_sk->conn_req.protocol = protocol;
1125 /* Increase the number of sockets created. */
1126 dbfs_atomic_inc(&cnt.caif_nr_socks);
1127 num = dbfs_atomic_inc(&cnt.caif_sock_create);
1128#ifdef CONFIG_DEBUG_FS
1129 if (!IS_ERR(debugfsdir)) {
1130
1131 /* Fill in some information concerning the misc socket. */
1132 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num);
1133
1134 cf_sk->debugfs_socket_dir =
1135 debugfs_create_dir(cf_sk->name, debugfsdir);
1136
1137 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1138 cf_sk->debugfs_socket_dir,
1139 (u32 *) &cf_sk->sk.sk_state);
1140 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1141 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1142 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1143 cf_sk->debugfs_socket_dir,
1144 (u32 *) &cf_sk->sk.sk_rmem_alloc);
1145 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1146 cf_sk->debugfs_socket_dir,
1147 (u32 *) &cf_sk->sk.sk_wmem_alloc);
1148 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1149 cf_sk->debugfs_socket_dir,
1150 (u32 *) &cf_sk->layer.id);
1151 }
1152#endif
1153 release_sock(&cf_sk->sk); 1087 release_sock(&cf_sk->sk);
1154 return 0; 1088 return 0;
1155} 1089}
@@ -1161,7 +1095,7 @@ static struct net_proto_family caif_family_ops = {
1161 .owner = THIS_MODULE, 1095 .owner = THIS_MODULE,
1162}; 1096};
1163 1097
1164static int af_caif_init(void) 1098static int __init caif_sktinit_module(void)
1165{ 1099{
1166 int err = sock_register(&caif_family_ops); 1100 int err = sock_register(&caif_family_ops);
1167 if (!err) 1101 if (!err)
@@ -1169,54 +1103,9 @@ static int af_caif_init(void)
1169 return 0; 1103 return 0;
1170} 1104}
1171 1105
1172static int __init caif_sktinit_module(void)
1173{
1174#ifdef CONFIG_DEBUG_FS
1175 debugfsdir = debugfs_create_dir("caif_sk", NULL);
1176 if (!IS_ERR(debugfsdir)) {
1177 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1178 debugfsdir,
1179 (u32 *) &cnt.caif_nr_socks);
1180 debugfs_create_u32("num_create", S_IRUSR | S_IWUSR,
1181 debugfsdir,
1182 (u32 *) &cnt.caif_sock_create);
1183 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1184 debugfsdir,
1185 (u32 *) &cnt.num_connect_req);
1186 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1187 debugfsdir,
1188 (u32 *) &cnt.num_connect_resp);
1189 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1190 debugfsdir,
1191 (u32 *) &cnt.num_connect_fail_resp);
1192 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1193 debugfsdir,
1194 (u32 *) &cnt.num_disconnect);
1195 debugfs_create_u32("num_remote_shutdown_ind",
1196 S_IRUSR | S_IWUSR, debugfsdir,
1197 (u32 *) &cnt.num_remote_shutdown_ind);
1198 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1199 debugfsdir,
1200 (u32 *) &cnt.num_tx_flow_off_ind);
1201 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1202 debugfsdir,
1203 (u32 *) &cnt.num_tx_flow_on_ind);
1204 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1205 debugfsdir,
1206 (u32 *) &cnt.num_rx_flow_off);
1207 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1208 debugfsdir,
1209 (u32 *) &cnt.num_rx_flow_on);
1210 }
1211#endif
1212 return af_caif_init();
1213}
1214
1215static void __exit caif_sktexit_module(void) 1106static void __exit caif_sktexit_module(void)
1216{ 1107{
1217 sock_unregister(PF_CAIF); 1108 sock_unregister(PF_CAIF);
1218 if (debugfsdir != NULL)
1219 debugfs_remove_recursive(debugfsdir);
1220} 1109}
1221module_init(caif_sktinit_module); 1110module_init(caif_sktinit_module);
1222module_exit(caif_sktexit_module); 1111module_exit(caif_sktexit_module);
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 65d6ef3cf9aa..2914659eb9b2 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -41,8 +41,10 @@ static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
41 struct caif_payload_info *info; 41 struct caif_payload_info *info;
42 int ret; 42 int ret;
43 43
44 if (!cfsrvl_ready(service, &ret)) 44 if (!cfsrvl_ready(service, &ret)) {
45 cfpkt_destroy(pkt);
45 return ret; 46 return ret;
47 }
46 48
47 /* Add info for MUX-layer to route the packet out */ 49 /* Add info for MUX-layer to route the packet out */
48 info = cfpkt_info(pkt); 50 info = cfpkt_info(pkt);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0f5ff27aa41c..a63f4a5f5aff 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -86,12 +86,17 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
86 struct caif_payload_info *info; 86 struct caif_payload_info *info;
87 struct cfsrvl *service = container_obj(layr); 87 struct cfsrvl *service = container_obj(layr);
88 int ret; 88 int ret;
89 if (!cfsrvl_ready(service, &ret)) 89
90 if (!cfsrvl_ready(service, &ret)) {
91 cfpkt_destroy(pkt);
90 return ret; 92 return ret;
93 }
91 94
92 /* STE Modem cannot handle more than 1500 bytes datagrams */ 95 /* STE Modem cannot handle more than 1500 bytes datagrams */
93 if (cfpkt_getlen(pkt) > DGM_MTU) 96 if (cfpkt_getlen(pkt) > DGM_MTU) {
97 cfpkt_destroy(pkt);
94 return -EMSGSIZE; 98 return -EMSGSIZE;
99 }
95 100
96 cfpkt_add_head(pkt, &zero, 3); 101 cfpkt_add_head(pkt, &zero, 3);
97 packet_type = 0x08; /* B9 set - UNCLASSIFIED */ 102 packet_type = 0x08; /* B9 set - UNCLASSIFIED */
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 6dc75d4f8d94..2b563ad04597 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -184,6 +184,11 @@ out:
184 rfml->serv.dev_info.id); 184 rfml->serv.dev_info.id);
185 } 185 }
186 spin_unlock(&rfml->sync); 186 spin_unlock(&rfml->sync);
187
188 if (unlikely(err == -EAGAIN))
189 /* It is not possible to recover after drop of a fragment */
190 err = -EIO;
191
187 return err; 192 return err;
188} 193}
189 194
@@ -218,7 +223,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
218 caif_assert(layr->dn->transmit != NULL); 223 caif_assert(layr->dn->transmit != NULL);
219 224
220 if (!cfsrvl_ready(&rfml->serv, &err)) 225 if (!cfsrvl_ready(&rfml->serv, &err))
221 return err; 226 goto out;
222 227
223 err = -EPROTO; 228 err = -EPROTO;
224 if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1) 229 if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
@@ -251,8 +256,11 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
251 256
252 err = cfrfml_transmit_segment(rfml, frontpkt); 257 err = cfrfml_transmit_segment(rfml, frontpkt);
253 258
254 if (err != 0) 259 if (err != 0) {
260 frontpkt = NULL;
255 goto out; 261 goto out;
262 }
263
256 frontpkt = rearpkt; 264 frontpkt = rearpkt;
257 rearpkt = NULL; 265 rearpkt = NULL;
258 266
@@ -286,19 +294,8 @@ out:
286 if (rearpkt) 294 if (rearpkt)
287 cfpkt_destroy(rearpkt); 295 cfpkt_destroy(rearpkt);
288 296
289 if (frontpkt && frontpkt != pkt) { 297 if (frontpkt)
290
291 cfpkt_destroy(frontpkt); 298 cfpkt_destroy(frontpkt);
292 /*
293 * Socket layer will free the original packet,
294 * but this packet may already be sent and
295 * freed. So we have to return 0 in this case
296 * to avoid socket layer to re-free this packet.
297 * The return of shutdown indication will
298 * cause connection to be invalidated anyhow.
299 */
300 err = 0;
301 }
302 } 299 }
303 300
304 return err; 301 return err;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index b99f5b22689d..4aa33d4496b6 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -174,15 +174,11 @@ void cfsrvl_init(struct cfsrvl *service,
174 174
175bool cfsrvl_ready(struct cfsrvl *service, int *err) 175bool cfsrvl_ready(struct cfsrvl *service, int *err)
176{ 176{
177 if (service->open && service->modem_flow_on && service->phy_flow_on)
178 return true;
179 if (!service->open) { 177 if (!service->open) {
180 *err = -ENOTCONN; 178 *err = -ENOTCONN;
181 return false; 179 return false;
182 } 180 }
183 caif_assert(!(service->modem_flow_on && service->phy_flow_on)); 181 return true;
184 *err = -EAGAIN;
185 return false;
186} 182}
187 183
188u8 cfsrvl_getphyid(struct cflayer *layer) 184u8 cfsrvl_getphyid(struct cflayer *layer)
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 53e49f3e3af3..86d2dadb4b73 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -84,8 +84,11 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 caif_assert(layr != NULL); 84 caif_assert(layr != NULL);
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfsrvl_ready(service, &ret)) 87
88 if (!cfsrvl_ready(service, &ret)) {
89 cfpkt_destroy(pkt);
88 return ret; 90 return ret;
91 }
89 92
90 cfpkt_add_head(pkt, &zero, 1); 93 cfpkt_add_head(pkt, &zero, 1);
91 /* Add info for MUX-layer to route the packet out. */ 94 /* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index e3f37db40ac3..a8e2a2d758a5 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -50,8 +50,12 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
50 struct caif_payload_info *info; 50 struct caif_payload_info *info;
51 u32 videoheader = 0; 51 u32 videoheader = 0;
52 int ret; 52 int ret;
53 if (!cfsrvl_ready(service, &ret)) 53
54 if (!cfsrvl_ready(service, &ret)) {
55 cfpkt_destroy(pkt);
54 return ret; 56 return ret;
57 }
58
55 cfpkt_add_head(pkt, &videoheader, 4); 59 cfpkt_add_head(pkt, &videoheader, 4);
56 /* Add info for MUX-layer to route the packet out */ 60 /* Add info for MUX-layer to route the packet out */
57 info = cfpkt_info(pkt); 61 info = cfpkt_info(pkt);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 865690948bbc..20618dd3088b 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -28,6 +28,7 @@
28/* 5 sec. connect timeout */ 28/* 5 sec. connect timeout */
29#define CONNECT_TIMEOUT (5 * HZ) 29#define CONNECT_TIMEOUT (5 * HZ)
30#define CAIF_NET_DEFAULT_QUEUE_LEN 500 30#define CAIF_NET_DEFAULT_QUEUE_LEN 500
31#define UNDEF_CONNID 0xffffffff
31 32
32/*This list is protected by the rtnl lock. */ 33/*This list is protected by the rtnl lock. */
33static LIST_HEAD(chnl_net_list); 34static LIST_HEAD(chnl_net_list);
@@ -72,14 +73,12 @@ static void robust_list_del(struct list_head *delete_node)
72static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) 73static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
73{ 74{
74 struct sk_buff *skb; 75 struct sk_buff *skb;
75 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 76 struct chnl_net *priv;
76 int pktlen; 77 int pktlen;
77 int err = 0;
78 const u8 *ip_version; 78 const u8 *ip_version;
79 u8 buf; 79 u8 buf;
80 80
81 priv = container_of(layr, struct chnl_net, chnl); 81 priv = container_of(layr, struct chnl_net, chnl);
82
83 if (!priv) 82 if (!priv)
84 return -EINVAL; 83 return -EINVAL;
85 84
@@ -95,8 +94,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
95 94
96 /* check the version of IP */ 95 /* check the version of IP */
97 ip_version = skb_header_pointer(skb, 0, 1, &buf); 96 ip_version = skb_header_pointer(skb, 0, 1, &buf);
98 if (!ip_version) 97
99 return -EINVAL;
100 switch (*ip_version >> 4) { 98 switch (*ip_version >> 4) {
101 case 4: 99 case 4:
102 skb->protocol = htons(ETH_P_IP); 100 skb->protocol = htons(ETH_P_IP);
@@ -105,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
105 skb->protocol = htons(ETH_P_IPV6); 103 skb->protocol = htons(ETH_P_IPV6);
106 break; 104 break;
107 default: 105 default:
106 priv->netdev->stats.rx_errors++;
108 return -EINVAL; 107 return -EINVAL;
109 } 108 }
110 109
@@ -123,7 +122,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
123 priv->netdev->stats.rx_packets++; 122 priv->netdev->stats.rx_packets++;
124 priv->netdev->stats.rx_bytes += pktlen; 123 priv->netdev->stats.rx_bytes += pktlen;
125 124
126 return err; 125 return 0;
127} 126}
128 127
129static int delete_device(struct chnl_net *dev) 128static int delete_device(struct chnl_net *dev)
@@ -221,11 +220,13 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
221 220
222 if (skb->len > priv->netdev->mtu) { 221 if (skb->len > priv->netdev->mtu) {
223 pr_warn("Size of skb exceeded MTU\n"); 222 pr_warn("Size of skb exceeded MTU\n");
223 dev->stats.tx_errors++;
224 return -ENOSPC; 224 return -ENOSPC;
225 } 225 }
226 226
227 if (!priv->flowenabled) { 227 if (!priv->flowenabled) {
228 pr_debug("dropping packets flow off\n"); 228 pr_debug("dropping packets flow off\n");
229 dev->stats.tx_dropped++;
229 return NETDEV_TX_BUSY; 230 return NETDEV_TX_BUSY;
230 } 231 }
231 232
@@ -240,8 +241,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
240 /* Send the packet down the stack. */ 241 /* Send the packet down the stack. */
241 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); 242 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
242 if (result) { 243 if (result) {
243 if (result == -EAGAIN) 244 dev->stats.tx_dropped++;
244 result = NETDEV_TX_BUSY;
245 return result; 245 return result;
246 } 246 }
247 247
@@ -409,7 +409,7 @@ static void ipcaif_net_setup(struct net_device *dev)
409 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; 409 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
410 priv->conn_req.priority = CAIF_PRIO_LOW; 410 priv->conn_req.priority = CAIF_PRIO_LOW;
411 /* Insert illegal value */ 411 /* Insert illegal value */
412 priv->conn_req.sockaddr.u.dgm.connection_id = 0; 412 priv->conn_req.sockaddr.u.dgm.connection_id = UNDEF_CONNID;
413 priv->flowenabled = false; 413 priv->flowenabled = false;
414 414
415 init_waitqueue_head(&priv->netmgmt_wq); 415 init_waitqueue_head(&priv->netmgmt_wq);
@@ -472,9 +472,11 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
472 else 472 else
473 list_add(&caifdev->list_field, &chnl_net_list); 473 list_add(&caifdev->list_field, &chnl_net_list);
474 474
475 /* Take ifindex as connection-id if null */ 475 /* Use ifindex as connection id, and use loopback channel default. */
476 if (caifdev->conn_req.sockaddr.u.dgm.connection_id == 0) 476 if (caifdev->conn_req.sockaddr.u.dgm.connection_id == UNDEF_CONNID) {
477 caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex; 477 caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex;
478 caifdev->conn_req.protocol = CAIFPROTO_DATAGRAM_LOOP;
479 }
478 return ret; 480 return ret;
479} 481}
480 482
diff --git a/net/compat.c b/net/compat.c
index 6def90e0a112..64b4515a64e6 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -79,7 +79,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
79 79
80/* I've named the args so it is easy to tell whose space the pointers are in. */ 80/* I've named the args so it is easy to tell whose space the pointers are in. */
81int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, 81int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
82 struct sockaddr *kern_address, int mode) 82 struct sockaddr_storage *kern_address, int mode)
83{ 83{
84 int tot_len; 84 int tot_len;
85 85
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 68bbf9f65cb0..d3cf12f62c8f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -132,6 +132,8 @@ out_noerr:
132 * __skb_recv_datagram - Receive a datagram skbuff 132 * __skb_recv_datagram - Receive a datagram skbuff
133 * @sk: socket 133 * @sk: socket
134 * @flags: MSG_ flags 134 * @flags: MSG_ flags
135 * @off: an offset in bytes to peek skb from. Returns an offset
136 * within an skb where data actually starts
135 * @peeked: returns non-zero if this packet has been seen before 137 * @peeked: returns non-zero if this packet has been seen before
136 * @err: error code returned 138 * @err: error code returned
137 * 139 *
@@ -158,7 +160,7 @@ out_noerr:
158 * the standard around please. 160 * the standard around please.
159 */ 161 */
160struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 162struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
161 int *peeked, int *err) 163 int *peeked, int *off, int *err)
162{ 164{
163 struct sk_buff *skb; 165 struct sk_buff *skb;
164 long timeo; 166 long timeo;
@@ -180,21 +182,25 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
180 * However, this function was correct in any case. 8) 182 * However, this function was correct in any case. 8)
181 */ 183 */
182 unsigned long cpu_flags; 184 unsigned long cpu_flags;
185 struct sk_buff_head *queue = &sk->sk_receive_queue;
183 186
184 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 187 spin_lock_irqsave(&queue->lock, cpu_flags);
185 skb = skb_peek(&sk->sk_receive_queue); 188 skb_queue_walk(queue, skb) {
186 if (skb) {
187 *peeked = skb->peeked; 189 *peeked = skb->peeked;
188 if (flags & MSG_PEEK) { 190 if (flags & MSG_PEEK) {
191 if (*off >= skb->len) {
192 *off -= skb->len;
193 continue;
194 }
189 skb->peeked = 1; 195 skb->peeked = 1;
190 atomic_inc(&skb->users); 196 atomic_inc(&skb->users);
191 } else 197 } else
192 __skb_unlink(skb, &sk->sk_receive_queue); 198 __skb_unlink(skb, queue);
193 }
194 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
195 199
196 if (skb) 200 spin_unlock_irqrestore(&queue->lock, cpu_flags);
197 return skb; 201 return skb;
202 }
203 spin_unlock_irqrestore(&queue->lock, cpu_flags);
198 204
199 /* User doesn't want to wait */ 205 /* User doesn't want to wait */
200 error = -EAGAIN; 206 error = -EAGAIN;
@@ -214,10 +220,10 @@ EXPORT_SYMBOL(__skb_recv_datagram);
214struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 220struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
215 int noblock, int *err) 221 int noblock, int *err)
216{ 222{
217 int peeked; 223 int peeked, off = 0;
218 224
219 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 225 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
220 &peeked, err); 226 &peeked, &off, err);
221} 227}
222EXPORT_SYMBOL(skb_recv_datagram); 228EXPORT_SYMBOL(skb_recv_datagram);
223 229
diff --git a/net/core/dev.c b/net/core/dev.c
index 6982bfd6a781..0f3eb7d79a2d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -446,7 +446,7 @@ void __dev_remove_pack(struct packet_type *pt)
446 } 446 }
447 } 447 }
448 448
449 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 449 pr_warn("dev_remove_pack: %p not found\n", pt);
450out: 450out:
451 spin_unlock(&ptype_lock); 451 spin_unlock(&ptype_lock);
452} 452}
@@ -848,21 +848,21 @@ EXPORT_SYMBOL(dev_get_by_flags_rcu);
848 * to allow sysfs to work. We also disallow any kind of 848 * to allow sysfs to work. We also disallow any kind of
849 * whitespace. 849 * whitespace.
850 */ 850 */
851int dev_valid_name(const char *name) 851bool dev_valid_name(const char *name)
852{ 852{
853 if (*name == '\0') 853 if (*name == '\0')
854 return 0; 854 return false;
855 if (strlen(name) >= IFNAMSIZ) 855 if (strlen(name) >= IFNAMSIZ)
856 return 0; 856 return false;
857 if (!strcmp(name, ".") || !strcmp(name, "..")) 857 if (!strcmp(name, ".") || !strcmp(name, ".."))
858 return 0; 858 return false;
859 859
860 while (*name) { 860 while (*name) {
861 if (*name == '/' || isspace(*name)) 861 if (*name == '/' || isspace(*name))
862 return 0; 862 return false;
863 name++; 863 name++;
864 } 864 }
865 return 1; 865 return true;
866} 866}
867EXPORT_SYMBOL(dev_valid_name); 867EXPORT_SYMBOL(dev_valid_name);
868 868
@@ -1039,8 +1039,7 @@ rollback:
1039 memcpy(dev->name, oldname, IFNAMSIZ); 1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback; 1040 goto rollback;
1041 } else { 1041 } else {
1042 printk(KERN_ERR 1042 pr_err("%s: name change rollback failed: %d\n",
1043 "%s: name change rollback failed: %d.\n",
1044 dev->name, ret); 1043 dev->name, ret);
1045 } 1044 }
1046 } 1045 }
@@ -1139,9 +1138,8 @@ void dev_load(struct net *net, const char *name)
1139 no_module = request_module("netdev-%s", name); 1138 no_module = request_module("netdev-%s", name);
1140 if (no_module && capable(CAP_SYS_MODULE)) { 1139 if (no_module && capable(CAP_SYS_MODULE)) {
1141 if (!request_module("%s", name)) 1140 if (!request_module("%s", name))
1142 pr_err("Loading kernel module for a network device " 1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1142 name);
1144"instead\n", name);
1145 } 1143 }
1146} 1144}
1147EXPORT_SYMBOL(dev_load); 1145EXPORT_SYMBOL(dev_load);
@@ -1655,10 +1653,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1655 if (skb_network_header(skb2) < skb2->data || 1653 if (skb_network_header(skb2) < skb2->data ||
1656 skb2->network_header > skb2->tail) { 1654 skb2->network_header > skb2->tail) {
1657 if (net_ratelimit()) 1655 if (net_ratelimit())
1658 printk(KERN_CRIT "protocol %04x is " 1656 pr_crit("protocol %04x is buggy, dev %s\n",
1659 "buggy, dev %s\n", 1657 ntohs(skb2->protocol),
1660 ntohs(skb2->protocol), 1658 dev->name);
1661 dev->name);
1662 skb_reset_network_header(skb2); 1659 skb_reset_network_header(skb2);
1663 } 1660 }
1664 1661
@@ -1691,9 +1688,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1691 1688
1692 /* If TC0 is invalidated disable TC mapping */ 1689 /* If TC0 is invalidated disable TC mapping */
1693 if (tc->offset + tc->count > txq) { 1690 if (tc->offset + tc->count > txq) {
1694 pr_warning("Number of in use tx queues changed " 1691 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1695 "invalidating tc mappings. Priority "
1696 "traffic classification disabled!\n");
1697 dev->num_tc = 0; 1692 dev->num_tc = 0;
1698 return; 1693 return;
1699 } 1694 }
@@ -1704,11 +1699,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1704 1699
1705 tc = &dev->tc_to_txq[q]; 1700 tc = &dev->tc_to_txq[q];
1706 if (tc->offset + tc->count > txq) { 1701 if (tc->offset + tc->count > txq) {
1707 pr_warning("Number of in use tx queues " 1702 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1708 "changed. Priority %i to tc " 1703 i, q);
1709 "mapping %i is no longer valid "
1710 "setting map to 0\n",
1711 i, q);
1712 netdev_set_prio_tc_map(dev, i, 0); 1704 netdev_set_prio_tc_map(dev, i, 0);
1713 } 1705 }
1714 } 1706 }
@@ -2014,8 +2006,7 @@ EXPORT_SYMBOL(skb_gso_segment);
2014void netdev_rx_csum_fault(struct net_device *dev) 2006void netdev_rx_csum_fault(struct net_device *dev)
2015{ 2007{
2016 if (net_ratelimit()) { 2008 if (net_ratelimit()) {
2017 printk(KERN_ERR "%s: hw csum failure.\n", 2009 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2018 dev ? dev->name : "<unknown>");
2019 dump_stack(); 2010 dump_stack();
2020 } 2011 }
2021} 2012}
@@ -2332,9 +2323,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2332{ 2323{
2333 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2324 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2334 if (net_ratelimit()) { 2325 if (net_ratelimit()) {
2335 pr_warning("%s selects TX queue %d, but " 2326 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
2336 "real number of TX queues is %d\n", 2327 dev->name, queue_index,
2337 dev->name, queue_index, dev->real_num_tx_queues); 2328 dev->real_num_tx_queues);
2338 } 2329 }
2339 return 0; 2330 return 0;
2340 } 2331 }
@@ -2578,16 +2569,16 @@ int dev_queue_xmit(struct sk_buff *skb)
2578 } 2569 }
2579 HARD_TX_UNLOCK(dev, txq); 2570 HARD_TX_UNLOCK(dev, txq);
2580 if (net_ratelimit()) 2571 if (net_ratelimit())
2581 printk(KERN_CRIT "Virtual device %s asks to " 2572 pr_crit("Virtual device %s asks to queue packet!\n",
2582 "queue packet!\n", dev->name); 2573 dev->name);
2583 } else { 2574 } else {
2584 /* Recursion is detected! It is possible, 2575 /* Recursion is detected! It is possible,
2585 * unfortunately 2576 * unfortunately
2586 */ 2577 */
2587recursion_alert: 2578recursion_alert:
2588 if (net_ratelimit()) 2579 if (net_ratelimit())
2589 printk(KERN_CRIT "Dead loop on virtual device " 2580 pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
2590 "%s, fix it urgently!\n", dev->name); 2581 dev->name);
2591 } 2582 }
2592 } 2583 }
2593 2584
@@ -3069,8 +3060,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3069 3060
3070 if (unlikely(MAX_RED_LOOP < ttl++)) { 3061 if (unlikely(MAX_RED_LOOP < ttl++)) {
3071 if (net_ratelimit()) 3062 if (net_ratelimit())
3072 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 3063 pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
3073 skb->skb_iif, dev->ifindex); 3064 skb->skb_iif, dev->ifindex);
3074 return TC_ACT_SHOT; 3065 return TC_ACT_SHOT;
3075 } 3066 }
3076 3067
@@ -4497,16 +4488,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4497 dev->flags &= ~IFF_PROMISC; 4488 dev->flags &= ~IFF_PROMISC;
4498 else { 4489 else {
4499 dev->promiscuity -= inc; 4490 dev->promiscuity -= inc;
4500 printk(KERN_WARNING "%s: promiscuity touches roof, " 4491 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4501 "set promiscuity failed, promiscuity feature " 4492 dev->name);
4502 "of device might be broken.\n", dev->name);
4503 return -EOVERFLOW; 4493 return -EOVERFLOW;
4504 } 4494 }
4505 } 4495 }
4506 if (dev->flags != old_flags) { 4496 if (dev->flags != old_flags) {
4507 printk(KERN_INFO "device %s %s promiscuous mode\n", 4497 pr_info("device %s %s promiscuous mode\n",
4508 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4498 dev->name,
4509 "left"); 4499 dev->flags & IFF_PROMISC ? "entered" : "left");
4510 if (audit_enabled) { 4500 if (audit_enabled) {
4511 current_uid_gid(&uid, &gid); 4501 current_uid_gid(&uid, &gid);
4512 audit_log(current->audit_context, GFP_ATOMIC, 4502 audit_log(current->audit_context, GFP_ATOMIC,
@@ -4579,9 +4569,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
4579 dev->flags &= ~IFF_ALLMULTI; 4569 dev->flags &= ~IFF_ALLMULTI;
4580 else { 4570 else {
4581 dev->allmulti -= inc; 4571 dev->allmulti -= inc;
4582 printk(KERN_WARNING "%s: allmulti touches roof, " 4572 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4583 "set allmulti failed, allmulti feature of " 4573 dev->name);
4584 "device might be broken.\n", dev->name);
4585 return -EOVERFLOW; 4574 return -EOVERFLOW;
4586 } 4575 }
4587 } 4576 }
@@ -5238,8 +5227,8 @@ static void rollback_registered_many(struct list_head *head)
5238 * devices and proceed with the remaining. 5227 * devices and proceed with the remaining.
5239 */ 5228 */
5240 if (dev->reg_state == NETREG_UNINITIALIZED) { 5229 if (dev->reg_state == NETREG_UNINITIALIZED) {
5241 pr_debug("unregister_netdevice: device %s/%p never " 5230 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5242 "was registered\n", dev->name, dev); 5231 dev->name, dev);
5243 5232
5244 WARN_ON(1); 5233 WARN_ON(1);
5245 list_del(&dev->unreg_list); 5234 list_del(&dev->unreg_list);
@@ -5471,7 +5460,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5471 5460
5472 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5461 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5473 if (!rx) { 5462 if (!rx) {
5474 pr_err("netdev: Unable to allocate %u rx queues.\n", count); 5463 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5475 return -ENOMEM; 5464 return -ENOMEM;
5476 } 5465 }
5477 dev->_rx = rx; 5466 dev->_rx = rx;
@@ -5505,8 +5494,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5505 5494
5506 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5495 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5507 if (!tx) { 5496 if (!tx) {
5508 pr_err("netdev: Unable to allocate %u tx queues.\n", 5497 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5509 count);
5510 return -ENOMEM; 5498 return -ENOMEM;
5511 } 5499 }
5512 dev->_tx = tx; 5500 dev->_tx = tx;
@@ -5765,10 +5753,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5765 refcnt = netdev_refcnt_read(dev); 5753 refcnt = netdev_refcnt_read(dev);
5766 5754
5767 if (time_after(jiffies, warning_time + 10 * HZ)) { 5755 if (time_after(jiffies, warning_time + 10 * HZ)) {
5768 printk(KERN_EMERG "unregister_netdevice: " 5756 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5769 "waiting for %s to become free. Usage " 5757 dev->name, refcnt);
5770 "count = %d\n",
5771 dev->name, refcnt);
5772 warning_time = jiffies; 5758 warning_time = jiffies;
5773 } 5759 }
5774 } 5760 }
@@ -5819,7 +5805,7 @@ void netdev_run_todo(void)
5819 list_del(&dev->todo_list); 5805 list_del(&dev->todo_list);
5820 5806
5821 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5807 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5822 printk(KERN_ERR "network todo '%s' but state %d\n", 5808 pr_err("network todo '%s' but state %d\n",
5823 dev->name, dev->reg_state); 5809 dev->name, dev->reg_state);
5824 dump_stack(); 5810 dump_stack();
5825 continue; 5811 continue;
@@ -5848,12 +5834,12 @@ void netdev_run_todo(void)
5848/* Convert net_device_stats to rtnl_link_stats64. They have the same 5834/* Convert net_device_stats to rtnl_link_stats64. They have the same
5849 * fields in the same order, with only the type differing. 5835 * fields in the same order, with only the type differing.
5850 */ 5836 */
5851static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5837void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5852 const struct net_device_stats *netdev_stats) 5838 const struct net_device_stats *netdev_stats)
5853{ 5839{
5854#if BITS_PER_LONG == 64 5840#if BITS_PER_LONG == 64
5855 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5841 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5856 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5842 memcpy(stats64, netdev_stats, sizeof(*stats64));
5857#else 5843#else
5858 size_t i, n = sizeof(*stats64) / sizeof(u64); 5844 size_t i, n = sizeof(*stats64) / sizeof(u64);
5859 const unsigned long *src = (const unsigned long *)netdev_stats; 5845 const unsigned long *src = (const unsigned long *)netdev_stats;
@@ -5865,6 +5851,7 @@ static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5865 dst[i] = src[i]; 5851 dst[i] = src[i];
5866#endif 5852#endif
5867} 5853}
5854EXPORT_SYMBOL(netdev_stats_to_stats64);
5868 5855
5869/** 5856/**
5870 * dev_get_stats - get network device statistics 5857 * dev_get_stats - get network device statistics
@@ -5935,15 +5922,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5935 BUG_ON(strlen(name) >= sizeof(dev->name)); 5922 BUG_ON(strlen(name) >= sizeof(dev->name));
5936 5923
5937 if (txqs < 1) { 5924 if (txqs < 1) {
5938 pr_err("alloc_netdev: Unable to allocate device " 5925 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5939 "with zero queues.\n");
5940 return NULL; 5926 return NULL;
5941 } 5927 }
5942 5928
5943#ifdef CONFIG_RPS 5929#ifdef CONFIG_RPS
5944 if (rxqs < 1) { 5930 if (rxqs < 1) {
5945 pr_err("alloc_netdev: Unable to allocate device " 5931 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5946 "with zero RX queues.\n");
5947 return NULL; 5932 return NULL;
5948 } 5933 }
5949#endif 5934#endif
@@ -5959,7 +5944,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5959 5944
5960 p = kzalloc(alloc_size, GFP_KERNEL); 5945 p = kzalloc(alloc_size, GFP_KERNEL);
5961 if (!p) { 5946 if (!p) {
5962 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5947 pr_err("alloc_netdev: Unable to allocate device\n");
5963 return NULL; 5948 return NULL;
5964 } 5949 }
5965 5950
@@ -6492,8 +6477,8 @@ static void __net_exit default_device_exit(struct net *net)
6492 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6477 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6493 err = dev_change_net_namespace(dev, &init_net, fb_name); 6478 err = dev_change_net_namespace(dev, &init_net, fb_name);
6494 if (err) { 6479 if (err) {
6495 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 6480 pr_emerg("%s: failed to move %s to init_net: %d\n",
6496 __func__, dev->name, err); 6481 __func__, dev->name, err);
6497 BUG(); 6482 BUG();
6498 } 6483 }
6499 } 6484 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3f79db1b612a..6d6d7d25caaa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -73,6 +73,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
73 [NETIF_F_RXCSUM_BIT] = "rx-checksum", 73 [NETIF_F_RXCSUM_BIT] = "rx-checksum",
74 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", 74 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
75 [NETIF_F_LOOPBACK_BIT] = "loopback", 75 [NETIF_F_LOOPBACK_BIT] = "loopback",
76 [NETIF_F_RXFCS_BIT] = "rx-fcs",
77 [NETIF_F_RXALL_BIT] = "rx-all",
76}; 78};
77 79
78static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 80static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
diff --git a/net/core/iovec.c b/net/core/iovec.c
index c40f27e7d208..7e7aeb01de45 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,7 +35,7 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
39{ 39{
40 int size, ct, err; 40 int size, ct, err;
41 41
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2a83914b0277..0a68045782d1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2167,6 +2167,35 @@ nla_put_failure:
2167 return -EMSGSIZE; 2167 return -EMSGSIZE;
2168} 2168}
2169 2169
2170static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2171 u32 pid, u32 seq, int type, unsigned int flags,
2172 struct neigh_table *tbl)
2173{
2174 struct nlmsghdr *nlh;
2175 struct ndmsg *ndm;
2176
2177 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2178 if (nlh == NULL)
2179 return -EMSGSIZE;
2180
2181 ndm = nlmsg_data(nlh);
2182 ndm->ndm_family = tbl->family;
2183 ndm->ndm_pad1 = 0;
2184 ndm->ndm_pad2 = 0;
2185 ndm->ndm_flags = pn->flags | NTF_PROXY;
2186 ndm->ndm_type = NDA_DST;
2187 ndm->ndm_ifindex = pn->dev->ifindex;
2188 ndm->ndm_state = NUD_NONE;
2189
2190 NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
2191
2192 return nlmsg_end(skb, nlh);
2193
2194nla_put_failure:
2195 nlmsg_cancel(skb, nlh);
2196 return -EMSGSIZE;
2197}
2198
2170static void neigh_update_notify(struct neighbour *neigh) 2199static void neigh_update_notify(struct neighbour *neigh)
2171{ 2200{
2172 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2201 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
@@ -2216,23 +2245,78 @@ out:
2216 return rc; 2245 return rc;
2217} 2246}
2218 2247
2248static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2249 struct netlink_callback *cb)
2250{
2251 struct pneigh_entry *n;
2252 struct net *net = sock_net(skb->sk);
2253 int rc, h, s_h = cb->args[3];
2254 int idx, s_idx = idx = cb->args[4];
2255
2256 read_lock_bh(&tbl->lock);
2257
2258 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2259 if (h < s_h)
2260 continue;
2261 if (h > s_h)
2262 s_idx = 0;
2263 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2264 if (dev_net(n->dev) != net)
2265 continue;
2266 if (idx < s_idx)
2267 goto next;
2268 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2269 cb->nlh->nlmsg_seq,
2270 RTM_NEWNEIGH,
2271 NLM_F_MULTI, tbl) <= 0) {
2272 read_unlock_bh(&tbl->lock);
2273 rc = -1;
2274 goto out;
2275 }
2276 next:
2277 idx++;
2278 }
2279 }
2280
2281 read_unlock_bh(&tbl->lock);
2282 rc = skb->len;
2283out:
2284 cb->args[3] = h;
2285 cb->args[4] = idx;
2286 return rc;
2287
2288}
2289
2219static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2290static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2220{ 2291{
2221 struct neigh_table *tbl; 2292 struct neigh_table *tbl;
2222 int t, family, s_t; 2293 int t, family, s_t;
2294 int proxy = 0;
2295 int err = 0;
2223 2296
2224 read_lock(&neigh_tbl_lock); 2297 read_lock(&neigh_tbl_lock);
2225 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2298 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2299
2300 /* check for full ndmsg structure presence, family member is
2301 * the same for both structures
2302 */
2303 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2304 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2305 proxy = 1;
2306
2226 s_t = cb->args[0]; 2307 s_t = cb->args[0];
2227 2308
2228 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { 2309 for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
2310 tbl = tbl->next, t++) {
2229 if (t < s_t || (family && tbl->family != family)) 2311 if (t < s_t || (family && tbl->family != family))
2230 continue; 2312 continue;
2231 if (t > s_t) 2313 if (t > s_t)
2232 memset(&cb->args[1], 0, sizeof(cb->args) - 2314 memset(&cb->args[1], 0, sizeof(cb->args) -
2233 sizeof(cb->args[0])); 2315 sizeof(cb->args[0]));
2234 if (neigh_dump_table(tbl, skb, cb) < 0) 2316 if (proxy)
2235 break; 2317 err = pneigh_dump_table(tbl, skb, cb);
2318 else
2319 err = neigh_dump_table(tbl, skb, cb);
2236 } 2320 }
2237 read_unlock(&neigh_tbl_lock); 2321 read_unlock(&neigh_tbl_lock);
2238 2322
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ddefc513b44a..3d84fb9d8873 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,8 @@
9 * Copyright (C) 2002 Red Hat, Inc. 9 * Copyright (C) 2002 Red Hat, Inc.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
14#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
@@ -45,9 +47,11 @@ static atomic_t trapped;
45#define NETPOLL_RX_ENABLED 1 47#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2 48#define NETPOLL_RX_DROP 2
47 49
48#define MAX_SKB_SIZE \ 50#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 51 (sizeof(struct ethhdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 52 sizeof(struct iphdr) + \
53 sizeof(struct udphdr) + \
54 MAX_UDP_CHUNK)
51 55
52static void zap_completion_queue(void); 56static void zap_completion_queue(void);
53static void arp_reply(struct sk_buff *skb); 57static void arp_reply(struct sk_buff *skb);
@@ -55,6 +59,13 @@ static void arp_reply(struct sk_buff *skb);
55static unsigned int carrier_timeout = 4; 59static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644); 60module_param(carrier_timeout, uint, 0644);
57 61
62#define np_info(np, fmt, ...) \
63 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64#define np_err(np, fmt, ...) \
65 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66#define np_notice(np, fmt, ...) \
67 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
58static void queue_process(struct work_struct *work) 69static void queue_process(struct work_struct *work)
59{ 70{
60 struct netpoll_info *npinfo = 71 struct netpoll_info *npinfo =
@@ -627,18 +638,12 @@ out:
627 638
628void netpoll_print_options(struct netpoll *np) 639void netpoll_print_options(struct netpoll *np)
629{ 640{
630 printk(KERN_INFO "%s: local port %d\n", 641 np_info(np, "local port %d\n", np->local_port);
631 np->name, np->local_port); 642 np_info(np, "local IP %pI4\n", &np->local_ip);
632 printk(KERN_INFO "%s: local IP %pI4\n", 643 np_info(np, "interface '%s'\n", np->dev_name);
633 np->name, &np->local_ip); 644 np_info(np, "remote port %d\n", np->remote_port);
634 printk(KERN_INFO "%s: interface '%s'\n", 645 np_info(np, "remote IP %pI4\n", &np->remote_ip);
635 np->name, np->dev_name); 646 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
642} 647}
643EXPORT_SYMBOL(netpoll_print_options); 648EXPORT_SYMBOL(netpoll_print_options);
644 649
@@ -680,8 +685,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
680 goto parse_failed; 685 goto parse_failed;
681 *delim = 0; 686 *delim = 0;
682 if (*cur == ' ' || *cur == '\t') 687 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace" 688 np_info(np, "warning: whitespace is not allowed\n");
684 "is not allowed\n", np->name);
685 np->remote_port = simple_strtol(cur, NULL, 10); 689 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim; 690 cur = delim;
687 } 691 }
@@ -705,8 +709,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
705 return 0; 709 return 0;
706 710
707 parse_failed: 711 parse_failed:
708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", 712 np_info(np, "couldn't parse config at '%s'!\n", cur);
709 np->name, cur);
710 return -1; 713 return -1;
711} 714}
712EXPORT_SYMBOL(netpoll_parse_options); 715EXPORT_SYMBOL(netpoll_parse_options);
@@ -721,8 +724,8 @@ int __netpoll_setup(struct netpoll *np)
721 724
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 725 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) { 726 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 727 np_err(np, "%s doesn't support polling, aborting\n",
725 np->name, np->dev_name); 728 np->dev_name);
726 err = -ENOTSUPP; 729 err = -ENOTSUPP;
727 goto out; 730 goto out;
728 } 731 }
@@ -785,14 +788,12 @@ int netpoll_setup(struct netpoll *np)
785 if (np->dev_name) 788 if (np->dev_name)
786 ndev = dev_get_by_name(&init_net, np->dev_name); 789 ndev = dev_get_by_name(&init_net, np->dev_name);
787 if (!ndev) { 790 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 791 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
789 np->name, np->dev_name);
790 return -ENODEV; 792 return -ENODEV;
791 } 793 }
792 794
793 if (ndev->master) { 795 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n", 796 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
795 np->name, np->dev_name);
796 err = -EBUSY; 797 err = -EBUSY;
797 goto put; 798 goto put;
798 } 799 }
@@ -800,16 +801,14 @@ int netpoll_setup(struct netpoll *np)
800 if (!netif_running(ndev)) { 801 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast; 802 unsigned long atmost, atleast;
802 803
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 804 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
804 np->name, np->dev_name);
805 805
806 rtnl_lock(); 806 rtnl_lock();
807 err = dev_open(ndev); 807 err = dev_open(ndev);
808 rtnl_unlock(); 808 rtnl_unlock();
809 809
810 if (err) { 810 if (err) {
811 printk(KERN_ERR "%s: failed to open %s\n", 811 np_err(np, "failed to open %s\n", ndev->name);
812 np->name, ndev->name);
813 goto put; 812 goto put;
814 } 813 }
815 814
@@ -817,9 +816,7 @@ int netpoll_setup(struct netpoll *np)
817 atmost = jiffies + carrier_timeout * HZ; 816 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) { 817 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) { 818 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE 819 np_notice(np, "timeout waiting for carrier\n");
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break; 820 break;
824 } 821 }
825 msleep(1); 822 msleep(1);
@@ -831,9 +828,7 @@ int netpoll_setup(struct netpoll *np)
831 */ 828 */
832 829
833 if (time_before(jiffies, atleast)) { 830 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears" 831 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000); 832 msleep(4000);
838 } 833 }
839 } 834 }
@@ -844,15 +839,15 @@ int netpoll_setup(struct netpoll *np)
844 839
845 if (!in_dev || !in_dev->ifa_list) { 840 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock(); 841 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 842 np_err(np, "no IP address for %s, aborting\n",
848 np->name, np->dev_name); 843 np->dev_name);
849 err = -EDESTADDRREQ; 844 err = -EDESTADDRREQ;
850 goto put; 845 goto put;
851 } 846 }
852 847
853 np->local_ip = in_dev->ifa_list->ifa_local; 848 np->local_ip = in_dev->ifa_list->ifa_local;
854 rcu_read_unlock(); 849 rcu_read_unlock();
855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 850 np_info(np, "local IP %pI4\n", &np->local_ip);
856 } 851 }
857 852
858 np->dev = ndev; 853 np->dev = ndev;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f965dce6f20f..1a63c6efd2ea 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1133,6 +1133,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1133 .len = sizeof(struct ifla_vf_vlan) }, 1133 .len = sizeof(struct ifla_vf_vlan) },
1134 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, 1134 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
1135 .len = sizeof(struct ifla_vf_tx_rate) }, 1135 .len = sizeof(struct ifla_vf_tx_rate) },
1136 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
1137 .len = sizeof(struct ifla_vf_spoofchk) },
1136}; 1138};
1137 1139
1138static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1140static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -2019,8 +2021,13 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2019 2021
2020 __rtnl_unlock(); 2022 __rtnl_unlock();
2021 rtnl = net->rtnl; 2023 rtnl = net->rtnl;
2022 err = netlink_dump_start(rtnl, skb, nlh, dumpit, 2024 {
2023 NULL, min_dump_alloc); 2025 struct netlink_dump_control c = {
2026 .dump = dumpit,
2027 .min_dump_alloc = min_dump_alloc,
2028 };
2029 err = netlink_dump_start(rtnl, skb, nlh, &c);
2030 }
2024 rtnl_lock(); 2031 rtnl_lock();
2025 return err; 2032 return err;
2026 } 2033 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da0c97f2fab4..6eb656acdfe5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -592,6 +592,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
592 new->rxhash = old->rxhash; 592 new->rxhash = old->rxhash;
593 new->ooo_okay = old->ooo_okay; 593 new->ooo_okay = old->ooo_okay;
594 new->l4_rxhash = old->l4_rxhash; 594 new->l4_rxhash = old->l4_rxhash;
595 new->no_fcs = old->no_fcs;
595#ifdef CONFIG_XFRM 596#ifdef CONFIG_XFRM
596 new->sp = secpath_get(old->sp); 597 new->sp = secpath_get(old->sp);
597#endif 598#endif
@@ -2906,7 +2907,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2906 nskb->prev = p; 2907 nskb->prev = p;
2907 2908
2908 nskb->data_len += p->len; 2909 nskb->data_len += p->len;
2909 nskb->truesize += p->len; 2910 nskb->truesize += p->truesize;
2910 nskb->len += p->len; 2911 nskb->len += p->len;
2911 2912
2912 *head = nskb; 2913 *head = nskb;
@@ -2916,6 +2917,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2916 p = nskb; 2917 p = nskb;
2917 2918
2918merge: 2919merge:
2920 p->truesize += skb->truesize - len;
2919 if (offset > headlen) { 2921 if (offset > headlen) {
2920 unsigned int eat = offset - headlen; 2922 unsigned int eat = offset - headlen;
2921 2923
diff --git a/net/core/sock.c b/net/core/sock.c
index 1fb21b51593b..9be6d0d6c533 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -793,6 +793,17 @@ set_rcvbuf:
793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
794 break; 794 break;
795 795
796 case SO_PEEK_OFF:
797 if (sock->ops->set_peek_off)
798 sock->ops->set_peek_off(sk, val);
799 else
800 ret = -EOPNOTSUPP;
801 break;
802
803 case SO_NOFCS:
804 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
805 break;
806
796 default: 807 default:
797 ret = -ENOPROTOOPT; 808 ret = -ENOPROTOOPT;
798 break; 809 break;
@@ -1018,6 +1029,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1018 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); 1029 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
1019 break; 1030 break;
1020 1031
1032 case SO_PEEK_OFF:
1033 if (!sock->ops->set_peek_off)
1034 return -EOPNOTSUPP;
1035
1036 v.val = sk->sk_peek_off;
1037 break;
1038 case SO_NOFCS:
1039 v.val = !!sock_flag(sk, SOCK_NOFCS);
1040 break;
1021 default: 1041 default:
1022 return -ENOPROTOOPT; 1042 return -ENOPROTOOPT;
1023 } 1043 }
@@ -2092,6 +2112,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2092 2112
2093 sk->sk_sndmsg_page = NULL; 2113 sk->sk_sndmsg_page = NULL;
2094 sk->sk_sndmsg_off = 0; 2114 sk->sk_sndmsg_off = 0;
2115 sk->sk_peek_off = -1;
2095 2116
2096 sk->sk_peer_pid = NULL; 2117 sk->sk_peer_pid = NULL;
2097 sk->sk_peer_cred = NULL; 2118 sk->sk_peer_cred = NULL;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 560627307200..70bfaf2d1965 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -98,6 +98,7 @@ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
98{ 98{
99 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x); 99 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
100 100
101 DCCP_BUG_ON(hc->tx_t_ipi == 0);
101 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi, 102 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
102 hc->tx_s, (unsigned)(hc->tx_x >> 6)); 103 hc->tx_s, (unsigned)(hc->tx_x >> 6));
103} 104}
@@ -236,8 +237,6 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
236 * 237 *
237 * Note that X_recv is scaled by 2^6 while X_calc is not 238 * Note that X_recv is scaled by 2^6 while X_calc is not
238 */ 239 */
239 BUG_ON(hc->tx_p && !hc->tx_x_calc);
240
241 if (hc->tx_x_calc > (hc->tx_x_recv >> 5)) 240 if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
242 hc->tx_x_recv = 241 hc->tx_x_recv =
243 max(hc->tx_x_recv / 2, 242 max(hc->tx_x_recv / 2,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 1c67fe8ff90d..caf6e1734b62 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -300,7 +300,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
300 */ 300 */
301 WARN_ON(req->sk); 301 WARN_ON(req->sk);
302 302
303 if (seq != dccp_rsk(req)->dreq_iss) { 303 if (!between48(seq, dccp_rsk(req)->dreq_iss,
304 dccp_rsk(req)->dreq_gss)) {
304 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 305 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
305 goto out; 306 goto out;
306 } 307 }
@@ -639,11 +640,12 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
639 * 640 *
640 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 641 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
641 * 642 *
642 * In fact we defer setting S.GSR, S.SWL, S.SWH to 643 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
643 * dccp_create_openreq_child.
644 */ 644 */
645 dreq->dreq_isr = dcb->dccpd_seq; 645 dreq->dreq_isr = dcb->dccpd_seq;
646 dreq->dreq_gsr = dreq->dreq_isr;
646 dreq->dreq_iss = dccp_v4_init_sequence(skb); 647 dreq->dreq_iss = dccp_v4_init_sequence(skb);
648 dreq->dreq_gss = dreq->dreq_iss;
647 dreq->dreq_service = service; 649 dreq->dreq_service = service;
648 650
649 if (dccp_v4_send_response(sk, req, NULL)) 651 if (dccp_v4_send_response(sk, req, NULL))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ce903f747e64..4dc588f520e0 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -193,7 +193,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
193 */ 193 */
194 WARN_ON(req->sk != NULL); 194 WARN_ON(req->sk != NULL);
195 195
196 if (seq != dccp_rsk(req)->dreq_iss) { 196 if (!between48(seq, dccp_rsk(req)->dreq_iss,
197 dccp_rsk(req)->dreq_gss)) {
197 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 198 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
198 goto out; 199 goto out;
199 } 200 }
@@ -440,11 +441,12 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
440 * 441 *
441 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 442 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
442 * 443 *
443 * In fact we defer setting S.GSR, S.SWL, S.SWH to 444 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
444 * dccp_create_openreq_child.
445 */ 445 */
446 dreq->dreq_isr = dcb->dccpd_seq; 446 dreq->dreq_isr = dcb->dccpd_seq;
447 dreq->dreq_gsr = dreq->dreq_isr;
447 dreq->dreq_iss = dccp_v6_init_sequence(skb); 448 dreq->dreq_iss = dccp_v6_init_sequence(skb);
449 dreq->dreq_gss = dreq->dreq_iss;
448 dreq->dreq_service = service; 450 dreq->dreq_service = service;
449 451
450 if (dccp_v6_send_response(sk, req, NULL)) 452 if (dccp_v6_send_response(sk, req, NULL))
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5a7f90bbffac..ea850ce35d4a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -127,9 +127,11 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
127 * activation below, as these windows all depend on the local 127 * activation below, as these windows all depend on the local
128 * and remote Sequence Window feature values (7.5.2). 128 * and remote Sequence Window feature values (7.5.2).
129 */ 129 */
130 newdp->dccps_gss = newdp->dccps_iss = dreq->dreq_iss; 130 newdp->dccps_iss = dreq->dreq_iss;
131 newdp->dccps_gss = dreq->dreq_gss;
131 newdp->dccps_gar = newdp->dccps_iss; 132 newdp->dccps_gar = newdp->dccps_iss;
132 newdp->dccps_gsr = newdp->dccps_isr = dreq->dreq_isr; 133 newdp->dccps_isr = dreq->dreq_isr;
134 newdp->dccps_gsr = dreq->dreq_gsr;
133 135
134 /* 136 /*
135 * Activate features: initialise CCIDs, sequence windows etc. 137 * Activate features: initialise CCIDs, sequence windows etc.
@@ -164,9 +166,9 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
164 /* Check for retransmitted REQUEST */ 166 /* Check for retransmitted REQUEST */
165 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 167 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
166 168
167 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) { 169 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
168 dccp_pr_debug("Retransmitted REQUEST\n"); 170 dccp_pr_debug("Retransmitted REQUEST\n");
169 dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq; 171 dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
170 /* 172 /*
171 * Send another RESPONSE packet 173 * Send another RESPONSE packet
172 * To protect against Request floods, increment retrans 174 * To protect against Request floods, increment retrans
@@ -186,12 +188,14 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
186 goto drop; 188 goto drop;
187 189
188 /* Invalid ACK */ 190 /* Invalid ACK */
189 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dreq->dreq_iss) { 191 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
192 dreq->dreq_iss, dreq->dreq_gss)) {
190 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, " 193 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
191 "dreq_iss=%llu\n", 194 "dreq_iss=%llu, dreq_gss=%llu\n",
192 (unsigned long long) 195 (unsigned long long)
193 DCCP_SKB_CB(skb)->dccpd_ack_seq, 196 DCCP_SKB_CB(skb)->dccpd_ack_seq,
194 (unsigned long long) dreq->dreq_iss); 197 (unsigned long long) dreq->dreq_iss,
198 (unsigned long long) dreq->dreq_gss);
195 goto drop; 199 goto drop;
196 } 200 }
197 201
diff --git a/net/dccp/output.c b/net/dccp/output.c
index dede3edb8849..787367308797 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -408,10 +408,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
408 skb_dst_set(skb, dst_clone(dst)); 408 skb_dst_set(skb, dst_clone(dst));
409 409
410 dreq = dccp_rsk(req); 410 dreq = dccp_rsk(req);
411 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ 411 if (inet_rsk(req)->acked) /* increase GSS upon retransmission */
412 dccp_inc_seqno(&dreq->dreq_iss); 412 dccp_inc_seqno(&dreq->dreq_gss);
413 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 413 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
414 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; 414 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
415 415
416 /* Resolve feature dependencies resulting from choice of CCID */ 416 /* Resolve feature dependencies resulting from choice of CCID */
417 if (dccp_feat_server_ccid_dependencies(dreq)) 417 if (dccp_feat_server_ccid_dependencies(dreq))
@@ -429,8 +429,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
429 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 429 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
430 dh->dccph_type = DCCP_PKT_RESPONSE; 430 dh->dccph_type = DCCP_PKT_RESPONSE;
431 dh->dccph_x = 1; 431 dh->dccph_x = 1;
432 dccp_hdr_set_seq(dh, dreq->dreq_iss); 432 dccp_hdr_set_seq(dh, dreq->dreq_gss);
433 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); 433 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
434 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 434 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
435 435
436 dccp_csum_outgoing(skb); 436 dccp_csum_outgoing(skb);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index befe426491ba..ee7013f24fca 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -205,17 +205,23 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
205 struct neighbour *neigh = dst_get_neighbour_noref(dst); 205 struct neighbour *neigh = dst_get_neighbour_noref(dst);
206 struct net_device *dev = neigh->dev; 206 struct net_device *dev = neigh->dev;
207 char mac_addr[ETH_ALEN]; 207 char mac_addr[ETH_ALEN];
208 unsigned int seq;
209 int err;
208 210
209 dn_dn2eth(mac_addr, rt->rt_local_src); 211 dn_dn2eth(mac_addr, rt->rt_local_src);
210 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, 212 do {
211 mac_addr, skb->len) >= 0) 213 seq = read_seqbegin(&neigh->ha_lock);
212 return dev_queue_xmit(skb); 214 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
213 215 neigh->ha, mac_addr, skb->len);
214 if (net_ratelimit()) 216 } while (read_seqretry(&neigh->ha_lock, seq));
215 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n"); 217
216 218 if (err >= 0)
217 kfree_skb(skb); 219 err = dev_queue_xmit(skb);
218 return -EINVAL; 220 else {
221 kfree_skb(skb);
222 err = -EINVAL;
223 }
224 return err;
219} 225}
220 226
221static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) 227static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index f31ce72dca65..80a3de4906d3 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -724,11 +724,10 @@ static int dn_output(struct sk_buff *skb)
724 struct dn_route *rt = (struct dn_route *)dst; 724 struct dn_route *rt = (struct dn_route *)dst;
725 struct net_device *dev = dst->dev; 725 struct net_device *dev = dst->dev;
726 struct dn_skb_cb *cb = DN_SKB_CB(skb); 726 struct dn_skb_cb *cb = DN_SKB_CB(skb);
727 struct neighbour *neigh;
728 727
729 int err = -EINVAL; 728 int err = -EINVAL;
730 729
731 if ((neigh = dst_get_neighbour_noref(dst)) == NULL) 730 if (dst_get_neighbour_noref(dst) == NULL)
732 goto error; 731 goto error;
733 732
734 skb->dev = dev; 733 skb->dev = dev;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a2468363978e..a93af86b8474 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -288,6 +288,8 @@ int eth_mac_addr(struct net_device *dev, void *p)
288 if (!is_valid_ether_addr(addr->sa_data)) 288 if (!is_valid_ether_addr(addr->sa_data))
289 return -EADDRNOTAVAIL; 289 return -EADDRNOTAVAIL;
290 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 290 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
291 /* if device marked as NET_ADDR_RANDOM, reset it */
292 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
291 return 0; 293 return 0;
292} 294}
293EXPORT_SYMBOL(eth_mac_addr); 295EXPORT_SYMBOL(eth_mac_addr);
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index e4ecc1eef98c..368515885368 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,6 +55,7 @@
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h>
58#include <net/af_ieee802154.h> 59#include <net/af_ieee802154.h>
59#include <net/ieee802154.h> 60#include <net/ieee802154.h>
60#include <net/ieee802154_netdev.h> 61#include <net/ieee802154_netdev.h>
@@ -924,19 +925,6 @@ drop:
924 return -EINVAL; 925 return -EINVAL;
925} 926}
926 927
927static int lowpan_set_address(struct net_device *dev, void *p)
928{
929 struct sockaddr *sa = p;
930
931 if (netif_running(dev))
932 return -EBUSY;
933
934 /* TODO: validate addr */
935 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
936
937 return 0;
938}
939
940static int lowpan_get_mac_header_length(struct sk_buff *skb) 928static int lowpan_get_mac_header_length(struct sk_buff *skb)
941{ 929{
942 /* 930 /*
@@ -1062,7 +1050,7 @@ static struct header_ops lowpan_header_ops = {
1062 1050
1063static const struct net_device_ops lowpan_netdev_ops = { 1051static const struct net_device_ops lowpan_netdev_ops = {
1064 .ndo_start_xmit = lowpan_xmit, 1052 .ndo_start_xmit = lowpan_xmit,
1065 .ndo_set_mac_address = lowpan_set_address, 1053 .ndo_set_mac_address = eth_mac_addr,
1066}; 1054};
1067 1055
1068static void lowpan_setup(struct net_device *dev) 1056static void lowpan_setup(struct net_device *dev)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f7b5670744f0..fdf49fd44bb4 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -65,6 +65,8 @@
65 * 2 of the License, or (at your option) any later version. 65 * 2 of the License, or (at your option) any later version.
66 */ 66 */
67 67
68#define pr_fmt(fmt) "IPv4: " fmt
69
68#include <linux/err.h> 70#include <linux/err.h>
69#include <linux/errno.h> 71#include <linux/errno.h>
70#include <linux/types.h> 72#include <linux/types.h>
@@ -381,6 +383,7 @@ lookup_protocol:
381 inet->mc_all = 1; 383 inet->mc_all = 1;
382 inet->mc_index = 0; 384 inet->mc_index = 0;
383 inet->mc_list = NULL; 385 inet->mc_list = NULL;
386 inet->rcv_tos = 0;
384 387
385 sk_refcnt_debug_inc(sk); 388 sk_refcnt_debug_inc(sk);
386 389
@@ -1084,13 +1087,11 @@ out:
1084 return; 1087 return;
1085 1088
1086out_permanent: 1089out_permanent:
1087 printk(KERN_ERR "Attempt to override permanent protocol %d.\n", 1090 pr_err("Attempt to override permanent protocol %d\n", protocol);
1088 protocol);
1089 goto out; 1091 goto out;
1090 1092
1091out_illegal: 1093out_illegal:
1092 printk(KERN_ERR 1094 pr_err("Ignoring attempt to register invalid socket type %d\n",
1093 "Ignoring attempt to register invalid socket type %d.\n",
1094 p->type); 1095 p->type);
1095 goto out; 1096 goto out;
1096} 1097}
@@ -1099,8 +1100,7 @@ EXPORT_SYMBOL(inet_register_protosw);
1099void inet_unregister_protosw(struct inet_protosw *p) 1100void inet_unregister_protosw(struct inet_protosw *p)
1100{ 1101{
1101 if (INET_PROTOSW_PERMANENT & p->flags) { 1102 if (INET_PROTOSW_PERMANENT & p->flags) {
1102 printk(KERN_ERR 1103 pr_err("Attempt to unregister permanent protocol %d\n",
1103 "Attempt to unregister permanent protocol %d.\n",
1104 p->protocol); 1104 p->protocol);
1105 } else { 1105 } else {
1106 spin_lock_bh(&inetsw_lock); 1106 spin_lock_bh(&inetsw_lock);
@@ -1149,8 +1149,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1149 return 0; 1149 return 0;
1150 1150
1151 if (sysctl_ip_dynaddr > 1) { 1151 if (sysctl_ip_dynaddr > 1) {
1152 printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n", 1152 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1153 __func__, &old_saddr, &new_saddr); 1153 __func__, &old_saddr, &new_saddr);
1154 } 1154 }
1155 1155
1156 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; 1156 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
@@ -1679,14 +1679,14 @@ static int __init inet_init(void)
1679 */ 1679 */
1680 1680
1681 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0) 1681 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1682 printk(KERN_CRIT "inet_init: Cannot add ICMP protocol\n"); 1682 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1683 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0) 1683 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1684 printk(KERN_CRIT "inet_init: Cannot add UDP protocol\n"); 1684 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1685 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0) 1685 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1686 printk(KERN_CRIT "inet_init: Cannot add TCP protocol\n"); 1686 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1687#ifdef CONFIG_IP_MULTICAST 1687#ifdef CONFIG_IP_MULTICAST
1688 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0) 1688 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1689 printk(KERN_CRIT "inet_init: Cannot add IGMP protocol\n"); 1689 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1690#endif 1690#endif
1691 1691
1692 /* Register the socket-side information for inet_create. */ 1692 /* Register the socket-side information for inet_create. */
@@ -1733,14 +1733,14 @@ static int __init inet_init(void)
1733 */ 1733 */
1734#if defined(CONFIG_IP_MROUTE) 1734#if defined(CONFIG_IP_MROUTE)
1735 if (ip_mr_init()) 1735 if (ip_mr_init())
1736 printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n"); 1736 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1737#endif 1737#endif
1738 /* 1738 /*
1739 * Initialise per-cpu ipv4 mibs 1739 * Initialise per-cpu ipv4 mibs
1740 */ 1740 */
1741 1741
1742 if (init_ipv4_mibs()) 1742 if (init_ipv4_mibs())
1743 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); 1743 pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1744 1744
1745 ipv4_proc_init(); 1745 ipv4_proc_init();
1746 1746
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 36d14406261e..fd508b526014 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) "IPsec: " fmt
2
1#include <crypto/hash.h> 3#include <crypto/hash.h>
2#include <linux/err.h> 4#include <linux/err.h>
3#include <linux/module.h> 5#include <linux/module.h>
@@ -445,9 +447,10 @@ static int ah_init_state(struct xfrm_state *x)
445 447
446 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 448 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
447 crypto_ahash_digestsize(ahash)) { 449 crypto_ahash_digestsize(ahash)) {
448 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 450 pr_info("%s: %s digestsize %u != %hu\n",
449 x->aalg->alg_name, crypto_ahash_digestsize(ahash), 451 __func__, x->aalg->alg_name,
450 aalg_desc->uinfo.auth.icv_fullbits/8); 452 crypto_ahash_digestsize(ahash),
453 aalg_desc->uinfo.auth.icv_fullbits / 8);
451 goto error; 454 goto error;
452 } 455 }
453 456
@@ -510,11 +513,11 @@ static const struct net_protocol ah4_protocol = {
510static int __init ah4_init(void) 513static int __init ah4_init(void)
511{ 514{
512 if (xfrm_register_type(&ah_type, AF_INET) < 0) { 515 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
513 printk(KERN_INFO "ip ah init: can't add xfrm type\n"); 516 pr_info("%s: can't add xfrm type\n", __func__);
514 return -EAGAIN; 517 return -EAGAIN;
515 } 518 }
516 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) { 519 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
517 printk(KERN_INFO "ip ah init: can't add protocol\n"); 520 pr_info("%s: can't add protocol\n", __func__);
518 xfrm_unregister_type(&ah_type, AF_INET); 521 xfrm_unregister_type(&ah_type, AF_INET);
519 return -EAGAIN; 522 return -EAGAIN;
520 } 523 }
@@ -524,9 +527,9 @@ static int __init ah4_init(void)
524static void __exit ah4_fini(void) 527static void __exit ah4_fini(void)
525{ 528{
526 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0) 529 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
527 printk(KERN_INFO "ip ah close: can't remove protocol\n"); 530 pr_info("%s: can't remove protocol\n", __func__);
528 if (xfrm_unregister_type(&ah_type, AF_INET) < 0) 531 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
529 printk(KERN_INFO "ip ah close: can't remove xfrm type\n"); 532 pr_info("%s: can't remove xfrm type\n", __func__);
530} 533}
531 534
532module_init(ah4_init); 535module_init(ah4_init);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 63e49890ad31..73f46d691abc 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -889,7 +889,7 @@ static int arp_process(struct sk_buff *skb)
889 889
890 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 890 n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
891 891
892 if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) { 892 if (IN_DEV_ARP_ACCEPT(in_dev)) {
893 /* Unsolicited ARP is not accepted by default. 893 /* Unsolicited ARP is not accepted by default.
894 It is possible, that this option should be enabled for some 894 It is possible, that this option should be enabled for some
895 devices (strip is candidate) 895 devices (strip is candidate)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index a5b413416da3..89a47b35905d 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) "IPsec: " fmt
2
1#include <crypto/aead.h> 3#include <crypto/aead.h>
2#include <crypto/authenc.h> 4#include <crypto/authenc.h>
3#include <linux/err.h> 5#include <linux/err.h>
@@ -706,11 +708,11 @@ static const struct net_protocol esp4_protocol = {
706static int __init esp4_init(void) 708static int __init esp4_init(void)
707{ 709{
708 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 710 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
709 printk(KERN_INFO "ip esp init: can't add xfrm type\n"); 711 pr_info("%s: can't add xfrm type\n", __func__);
710 return -EAGAIN; 712 return -EAGAIN;
711 } 713 }
712 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { 714 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
713 printk(KERN_INFO "ip esp init: can't add protocol\n"); 715 pr_info("%s: can't add protocol\n", __func__);
714 xfrm_unregister_type(&esp_type, AF_INET); 716 xfrm_unregister_type(&esp_type, AF_INET);
715 return -EAGAIN; 717 return -EAGAIN;
716 } 718 }
@@ -720,9 +722,9 @@ static int __init esp4_init(void)
720static void __exit esp4_fini(void) 722static void __exit esp4_fini(void)
721{ 723{
722 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) 724 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
723 printk(KERN_INFO "ip esp close: can't remove protocol\n"); 725 pr_info("%s: can't remove protocol\n", __func__);
724 if (xfrm_unregister_type(&esp_type, AF_INET) < 0) 726 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
725 printk(KERN_INFO "ip esp close: can't remove xfrm type\n"); 727 pr_info("%s: can't remove xfrm type\n", __func__);
726} 728}
727 729
728module_init(esp4_init); 730module_init(esp4_init);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 92fc5f69f5da..76e72bacc217 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -695,7 +695,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
695 if (ifa->ifa_flags & IFA_F_SECONDARY) { 695 if (ifa->ifa_flags & IFA_F_SECONDARY) {
696 prim = inet_ifa_byprefix(in_dev, prefix, mask); 696 prim = inet_ifa_byprefix(in_dev, prefix, mask);
697 if (prim == NULL) { 697 if (prim == NULL) {
698 printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n"); 698 pr_warn("%s: bug: prim == NULL\n", __func__);
699 return; 699 return;
700 } 700 }
701 } 701 }
@@ -749,11 +749,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
749 if (ifa->ifa_flags & IFA_F_SECONDARY) { 749 if (ifa->ifa_flags & IFA_F_SECONDARY) {
750 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 750 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
751 if (prim == NULL) { 751 if (prim == NULL) {
752 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); 752 pr_warn("%s: bug: prim == NULL\n", __func__);
753 return; 753 return;
754 } 754 }
755 if (iprim && iprim != prim) { 755 if (iprim && iprim != prim) {
756 printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n"); 756 pr_warn("%s: bug: iprim != prim\n", __func__);
757 return; 757 return;
758 } 758 }
759 } else if (!ipv4_is_zeronet(any) && 759 } else if (!ipv4_is_zeronet(any) &&
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 80106d89d548..a8c5c1d6715b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -154,7 +154,7 @@ static void free_fib_info_rcu(struct rcu_head *head)
154void free_fib_info(struct fib_info *fi) 154void free_fib_info(struct fib_info *fi)
155{ 155{
156 if (fi->fib_dead == 0) { 156 if (fi->fib_dead == 0) {
157 pr_warning("Freeing alive fib_info %p\n", fi); 157 pr_warn("Freeing alive fib_info %p\n", fi);
158 return; 158 return;
159 } 159 }
160 change_nexthops(fi) { 160 change_nexthops(fi) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b555a5521e0..da9b9cb2282d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1170,9 +1170,8 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1170 } 1170 }
1171 1171
1172 if (tp && tp->pos + tp->bits > 32) 1172 if (tp && tp->pos + tp->bits > 32)
1173 pr_warning("fib_trie" 1173 pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1174 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", 1174 tp, tp->pos, tp->bits, key, plen);
1175 tp, tp->pos, tp->bits, key, plen);
1176 1175
1177 /* Rebalance the trie */ 1176 /* Rebalance the trie */
1178 1177
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index 8cb1ebb7cd74..42a491055c76 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/kmod.h> 17#include <linux/kmod.h>
@@ -118,10 +120,10 @@ static const struct net_protocol net_gre_protocol = {
118 120
119static int __init gre_init(void) 121static int __init gre_init(void)
120{ 122{
121 pr_info("GRE over IPv4 demultiplexor driver"); 123 pr_info("GRE over IPv4 demultiplexor driver\n");
122 124
123 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { 125 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
124 pr_err("gre: can't add protocol\n"); 126 pr_err("can't add protocol\n");
125 return -EAGAIN; 127 return -EAGAIN;
126 } 128 }
127 129
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ab188ae12fd9..9664d353ccd8 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -62,6 +62,8 @@
62 * 62 *
63 */ 63 */
64 64
65#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
65#include <linux/module.h> 67#include <linux/module.h>
66#include <linux/types.h> 68#include <linux/types.h>
67#include <linux/jiffies.h> 69#include <linux/jiffies.h>
@@ -670,7 +672,7 @@ static void icmp_unreach(struct sk_buff *skb)
670 break; 672 break;
671 case ICMP_FRAG_NEEDED: 673 case ICMP_FRAG_NEEDED:
672 if (ipv4_config.no_pmtu_disc) { 674 if (ipv4_config.no_pmtu_disc) {
673 LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", 675 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
674 &iph->daddr); 676 &iph->daddr);
675 } else { 677 } else {
676 info = ip_rt_frag_needed(net, iph, 678 info = ip_rt_frag_needed(net, iph,
@@ -681,7 +683,7 @@ static void icmp_unreach(struct sk_buff *skb)
681 } 683 }
682 break; 684 break;
683 case ICMP_SR_FAILED: 685 case ICMP_SR_FAILED:
684 LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", 686 LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"),
685 &iph->daddr); 687 &iph->daddr);
686 break; 688 break;
687 default: 689 default:
@@ -713,13 +715,10 @@ static void icmp_unreach(struct sk_buff *skb)
713 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && 715 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
714 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { 716 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
715 if (net_ratelimit()) 717 if (net_ratelimit())
716 printk(KERN_WARNING "%pI4 sent an invalid ICMP " 718 pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
717 "type %u, code %u " 719 &ip_hdr(skb)->saddr,
718 "error to a broadcast: %pI4 on %s\n", 720 icmph->type, icmph->code,
719 &ip_hdr(skb)->saddr, 721 &iph->daddr, skb->dev->name);
720 icmph->type, icmph->code,
721 &iph->daddr,
722 skb->dev->name);
723 goto out; 722 goto out;
724 } 723 }
725 724
@@ -946,8 +945,8 @@ static void icmp_address_reply(struct sk_buff *skb)
946 break; 945 break;
947 } 946 }
948 if (!ifa && net_ratelimit()) { 947 if (!ifa && net_ratelimit()) {
949 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", 948 pr_info("Wrong address mask %pI4 from %s/%pI4\n",
950 mp, dev->name, &ip_hdr(skb)->saddr); 949 mp, dev->name, &ip_hdr(skb)->saddr);
951 } 950 }
952 } 951 }
953} 952}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index fcf281819cd4..8d25a1c557eb 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -960,9 +960,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
960 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 960 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
961 return -EINVAL; 961 return -EINVAL;
962 } 962 }
963 963 {
964 return netlink_dump_start(sock_diag_nlsk, skb, nlh, 964 struct netlink_dump_control c = {
965 inet_diag_dump_compat, NULL, 0); 965 .dump = inet_diag_dump_compat,
966 };
967 return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c);
968 }
966 } 969 }
967 970
968 return inet_diag_get_exact_compat(skb, nlh); 971 return inet_diag_get_exact_compat(skb, nlh);
@@ -985,9 +988,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
985 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 988 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
986 return -EINVAL; 989 return -EINVAL;
987 } 990 }
988 991 {
989 return netlink_dump_start(sock_diag_nlsk, skb, h, 992 struct netlink_dump_control c = {
990 inet_diag_dump, NULL, 0); 993 .dump = inet_diag_dump,
994 };
995 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
996 }
991 } 997 }
992 998
993 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); 999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1f23a57aa9e6..3727e234c884 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -20,6 +20,8 @@
20 * Patrick McHardy : LRU queue of frag heads for evictor. 20 * Patrick McHardy : LRU queue of frag heads for evictor.
21 */ 21 */
22 22
23#define pr_fmt(fmt) "IPv4: " fmt
24
23#include <linux/compiler.h> 25#include <linux/compiler.h>
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/types.h> 27#include <linux/types.h>
@@ -299,7 +301,7 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
299 return container_of(q, struct ipq, q); 301 return container_of(q, struct ipq, q);
300 302
301out_nomem: 303out_nomem:
302 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); 304 LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
303 return NULL; 305 return NULL;
304} 306}
305 307
@@ -637,14 +639,13 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
637 return 0; 639 return 0;
638 640
639out_nomem: 641out_nomem:
640 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " 642 LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
641 "queue %p\n", qp); 643 qp);
642 err = -ENOMEM; 644 err = -ENOMEM;
643 goto out_fail; 645 goto out_fail;
644out_oversize: 646out_oversize:
645 if (net_ratelimit()) 647 if (net_ratelimit())
646 printk(KERN_INFO "Oversized IP packet from %pI4.\n", 648 pr_info("Oversized IP packet from %pI4\n", &qp->saddr);
647 &qp->saddr);
648out_fail: 649out_fail:
649 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 650 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
650 return err; 651 return err;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 38673d2860e2..b57532d4742c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/capability.h> 15#include <linux/capability.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/types.h> 17#include <linux/types.h>
@@ -730,15 +732,16 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
730 732
731 if (skb->protocol == htons(ETH_P_IP)) { 733 if (skb->protocol == htons(ETH_P_IP)) {
732 rt = skb_rtable(skb); 734 rt = skb_rtable(skb);
733 if ((dst = rt->rt_gateway) == 0) 735 dst = rt->rt_gateway;
734 goto tx_error_icmp;
735 } 736 }
736#if IS_ENABLED(CONFIG_IPV6) 737#if IS_ENABLED(CONFIG_IPV6)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 738 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
739 const struct in6_addr *addr6; 739 const struct in6_addr *addr6;
740 struct neighbour *neigh;
741 bool do_tx_error_icmp;
740 int addr_type; 742 int addr_type;
741 743
744 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
742 if (neigh == NULL) 745 if (neigh == NULL)
743 goto tx_error; 746 goto tx_error;
744 747
@@ -751,9 +754,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
751 } 754 }
752 755
753 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 756 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
757 do_tx_error_icmp = true;
758 else {
759 do_tx_error_icmp = false;
760 dst = addr6->s6_addr32[3];
761 }
762 neigh_release(neigh);
763 if (do_tx_error_icmp)
754 goto tx_error_icmp; 764 goto tx_error_icmp;
755
756 dst = addr6->s6_addr32[3];
757 } 765 }
758#endif 766#endif
759 else 767 else
@@ -914,9 +922,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
914 __IPTUNNEL_XMIT(tstats, &dev->stats); 922 __IPTUNNEL_XMIT(tstats, &dev->stats);
915 return NETDEV_TX_OK; 923 return NETDEV_TX_OK;
916 924
925#if IS_ENABLED(CONFIG_IPV6)
917tx_error_icmp: 926tx_error_icmp:
918 dst_link_failure(skb); 927 dst_link_failure(skb);
919 928#endif
920tx_error: 929tx_error:
921 dev->stats.tx_errors++; 930 dev->stats.tx_errors++;
922 dev_kfree_skb(skb); 931 dev_kfree_skb(skb);
@@ -1529,7 +1538,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nla
1529 return -EEXIST; 1538 return -EEXIST;
1530 1539
1531 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1540 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1532 random_ether_addr(dev->dev_addr); 1541 eth_hw_addr_random(dev);
1533 1542
1534 mtu = ipgre_tunnel_bind_dev(dev); 1543 mtu = ipgre_tunnel_bind_dev(dev);
1535 if (!tb[IFLA_MTU]) 1544 if (!tb[IFLA_MTU])
@@ -1709,7 +1718,7 @@ static int __init ipgre_init(void)
1709{ 1718{
1710 int err; 1719 int err;
1711 1720
1712 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1721 pr_info("GRE over IPv4 tunneling driver\n");
1713 1722
1714 err = register_pernet_device(&ipgre_net_ops); 1723 err = register_pernet_device(&ipgre_net_ops);
1715 if (err < 0) 1724 if (err < 0)
@@ -1717,7 +1726,7 @@ static int __init ipgre_init(void)
1717 1726
1718 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1727 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1719 if (err < 0) { 1728 if (err < 0) {
1720 printk(KERN_INFO "ipgre init: can't add protocol\n"); 1729 pr_info("%s: can't add protocol\n", __func__);
1721 goto add_proto_failed; 1730 goto add_proto_failed;
1722 } 1731 }
1723 1732
@@ -1746,7 +1755,7 @@ static void __exit ipgre_fini(void)
1746 rtnl_link_unregister(&ipgre_tap_ops); 1755 rtnl_link_unregister(&ipgre_tap_ops);
1747 rtnl_link_unregister(&ipgre_link_ops); 1756 rtnl_link_unregister(&ipgre_link_ops);
1748 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) 1757 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1749 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1758 pr_info("%s: can't remove protocol\n", __func__);
1750 unregister_pernet_device(&ipgre_net_ops); 1759 unregister_pernet_device(&ipgre_net_ops);
1751} 1760}
1752 1761
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 073a9b01c40c..f3f1108940f5 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -113,6 +113,8 @@
113 * 2 of the License, or (at your option) any later version. 113 * 2 of the License, or (at your option) any later version.
114 */ 114 */
115 115
116#define pr_fmt(fmt) "IPv4: " fmt
117
116#include <asm/system.h> 118#include <asm/system.h>
117#include <linux/module.h> 119#include <linux/module.h>
118#include <linux/types.h> 120#include <linux/types.h>
@@ -148,7 +150,7 @@
148/* 150/*
149 * Process Router Attention IP option (RFC 2113) 151 * Process Router Attention IP option (RFC 2113)
150 */ 152 */
151int ip_call_ra_chain(struct sk_buff *skb) 153bool ip_call_ra_chain(struct sk_buff *skb)
152{ 154{
153 struct ip_ra_chain *ra; 155 struct ip_ra_chain *ra;
154 u8 protocol = ip_hdr(skb)->protocol; 156 u8 protocol = ip_hdr(skb)->protocol;
@@ -167,7 +169,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
167 net_eq(sock_net(sk), dev_net(dev))) { 169 net_eq(sock_net(sk), dev_net(dev))) {
168 if (ip_is_fragment(ip_hdr(skb))) { 170 if (ip_is_fragment(ip_hdr(skb))) {
169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) 171 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
170 return 1; 172 return true;
171 } 173 }
172 if (last) { 174 if (last) {
173 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -180,9 +182,9 @@ int ip_call_ra_chain(struct sk_buff *skb)
180 182
181 if (last) { 183 if (last) {
182 raw_rcv(last, skb); 184 raw_rcv(last, skb);
183 return 1; 185 return true;
184 } 186 }
185 return 0; 187 return false;
186} 188}
187 189
188static int ip_local_deliver_finish(struct sk_buff *skb) 190static int ip_local_deliver_finish(struct sk_buff *skb)
@@ -265,7 +267,7 @@ int ip_local_deliver(struct sk_buff *skb)
265 ip_local_deliver_finish); 267 ip_local_deliver_finish);
266} 268}
267 269
268static inline int ip_rcv_options(struct sk_buff *skb) 270static inline bool ip_rcv_options(struct sk_buff *skb)
269{ 271{
270 struct ip_options *opt; 272 struct ip_options *opt;
271 const struct iphdr *iph; 273 const struct iphdr *iph;
@@ -299,8 +301,8 @@ static inline int ip_rcv_options(struct sk_buff *skb)
299 if (!IN_DEV_SOURCE_ROUTE(in_dev)) { 301 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
300 if (IN_DEV_LOG_MARTIANS(in_dev) && 302 if (IN_DEV_LOG_MARTIANS(in_dev) &&
301 net_ratelimit()) 303 net_ratelimit())
302 printk(KERN_INFO "source route option %pI4 -> %pI4\n", 304 pr_info("source route option %pI4 -> %pI4\n",
303 &iph->saddr, &iph->daddr); 305 &iph->saddr, &iph->daddr);
304 goto drop; 306 goto drop;
305 } 307 }
306 } 308 }
@@ -309,9 +311,9 @@ static inline int ip_rcv_options(struct sk_buff *skb)
309 goto drop; 311 goto drop;
310 } 312 }
311 313
312 return 0; 314 return false;
313drop: 315drop:
314 return -1; 316 return true;
315} 317}
316 318
317static int ip_rcv_finish(struct sk_buff *skb) 319static int ip_rcv_finish(struct sk_buff *skb)
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 42dd1a90edea..a0d0d9d9b870 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) "IPv4: " fmt
13
12#include <linux/capability.h> 14#include <linux/capability.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
@@ -577,7 +579,7 @@ void ip_forward_options(struct sk_buff *skb)
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt); 579 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 580 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 581 } else if (net_ratelimit())
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 582 pr_crit("%s(): Argh! Destination lost!\n", __func__);
581 if (opt->ts_needaddr) { 583 if (opt->ts_needaddr) {
582 optptr = raw + opt->ts; 584 optptr = raw + opt->ts;
583 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); 585 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5343d9ac510b..2fd0fba77124 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -464,6 +464,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
464 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 464 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
465 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | 465 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
466 (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || 466 (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
467 optname == IP_UNICAST_IF ||
467 optname == IP_MULTICAST_TTL || 468 optname == IP_MULTICAST_TTL ||
468 optname == IP_MULTICAST_ALL || 469 optname == IP_MULTICAST_ALL ||
469 optname == IP_MULTICAST_LOOP || 470 optname == IP_MULTICAST_LOOP ||
@@ -623,6 +624,35 @@ static int do_ip_setsockopt(struct sock *sk, int level,
623 goto e_inval; 624 goto e_inval;
624 inet->mc_loop = !!val; 625 inet->mc_loop = !!val;
625 break; 626 break;
627 case IP_UNICAST_IF:
628 {
629 struct net_device *dev = NULL;
630 int ifindex;
631
632 if (optlen != sizeof(int))
633 goto e_inval;
634
635 ifindex = (__force int)ntohl((__force __be32)val);
636 if (ifindex == 0) {
637 inet->uc_index = 0;
638 err = 0;
639 break;
640 }
641
642 dev = dev_get_by_index(sock_net(sk), ifindex);
643 err = -EADDRNOTAVAIL;
644 if (!dev)
645 break;
646 dev_put(dev);
647
648 err = -EINVAL;
649 if (sk->sk_bound_dev_if)
650 break;
651
652 inet->uc_index = ifindex;
653 err = 0;
654 break;
655 }
626 case IP_MULTICAST_IF: 656 case IP_MULTICAST_IF:
627 { 657 {
628 struct ip_mreqn mreq; 658 struct ip_mreqn mreq;
@@ -1173,6 +1203,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1173 case IP_MULTICAST_LOOP: 1203 case IP_MULTICAST_LOOP:
1174 val = inet->mc_loop; 1204 val = inet->mc_loop;
1175 break; 1205 break;
1206 case IP_UNICAST_IF:
1207 val = (__force int)htonl((__u32) inet->uc_index);
1208 break;
1176 case IP_MULTICAST_IF: 1209 case IP_MULTICAST_IF:
1177 { 1210 {
1178 struct in_addr addr; 1211 struct in_addr addr;
@@ -1251,6 +1284,10 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1251 int hlim = inet->mc_ttl; 1284 int hlim = inet->mc_ttl;
1252 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); 1285 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1253 } 1286 }
1287 if (inet->cmsg_flags & IP_CMSG_TOS) {
1288 int tos = inet->rcv_tos;
1289 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1290 }
1254 len -= msg.msg_controllen; 1291 len -= msg.msg_controllen;
1255 return put_user(len, optlen); 1292 return put_user(len, optlen);
1256 } 1293 }
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c857f6f49b03..63b64c45a826 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -156,11 +156,11 @@ static const struct net_protocol ipcomp4_protocol = {
156static int __init ipcomp4_init(void) 156static int __init ipcomp4_init(void)
157{ 157{
158 if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) { 158 if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) {
159 printk(KERN_INFO "ipcomp init: can't add xfrm type\n"); 159 pr_info("%s: can't add xfrm type\n", __func__);
160 return -EAGAIN; 160 return -EAGAIN;
161 } 161 }
162 if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) { 162 if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
163 printk(KERN_INFO "ipcomp init: can't add protocol\n"); 163 pr_info("%s: can't add protocol\n", __func__);
164 xfrm_unregister_type(&ipcomp_type, AF_INET); 164 xfrm_unregister_type(&ipcomp_type, AF_INET);
165 return -EAGAIN; 165 return -EAGAIN;
166 } 166 }
@@ -170,9 +170,9 @@ static int __init ipcomp4_init(void)
170static void __exit ipcomp4_fini(void) 170static void __exit ipcomp4_fini(void)
171{ 171{
172 if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) 172 if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0)
173 printk(KERN_INFO "ip ipcomp close: can't remove protocol\n"); 173 pr_info("%s: can't remove protocol\n", __func__);
174 if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0) 174 if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0)
175 printk(KERN_INFO "ip ipcomp close: can't remove xfrm type\n"); 175 pr_info("%s: can't remove xfrm type\n", __func__);
176} 176}
177 177
178module_init(ipcomp4_init); 178module_init(ipcomp4_init);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 6e412a60a91f..92ac7e7363a0 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -214,7 +214,7 @@ static int __init ic_open_devs(void)
214 if (!(dev->flags & IFF_LOOPBACK)) 214 if (!(dev->flags & IFF_LOOPBACK))
215 continue; 215 continue;
216 if (dev_change_flags(dev, dev->flags | IFF_UP) < 0) 216 if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
217 printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); 217 pr_err("IP-Config: Failed to open %s\n", dev->name);
218 } 218 }
219 219
220 for_each_netdev(&init_net, dev) { 220 for_each_netdev(&init_net, dev) {
@@ -223,7 +223,8 @@ static int __init ic_open_devs(void)
223 if (dev->mtu >= 364) 223 if (dev->mtu >= 364)
224 able |= IC_BOOTP; 224 able |= IC_BOOTP;
225 else 225 else
226 printk(KERN_WARNING "DHCP/BOOTP: Ignoring device %s, MTU %d too small", dev->name, dev->mtu); 226 pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small",
227 dev->name, dev->mtu);
227 if (!(dev->flags & IFF_NOARP)) 228 if (!(dev->flags & IFF_NOARP))
228 able |= IC_RARP; 229 able |= IC_RARP;
229 able &= ic_proto_enabled; 230 able &= ic_proto_enabled;
@@ -231,7 +232,8 @@ static int __init ic_open_devs(void)
231 continue; 232 continue;
232 oflags = dev->flags; 233 oflags = dev->flags;
233 if (dev_change_flags(dev, oflags | IFF_UP) < 0) { 234 if (dev_change_flags(dev, oflags | IFF_UP) < 0) {
234 printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); 235 pr_err("IP-Config: Failed to open %s\n",
236 dev->name);
235 continue; 237 continue;
236 } 238 }
237 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { 239 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) {
@@ -273,9 +275,10 @@ have_carrier:
273 275
274 if (!ic_first_dev) { 276 if (!ic_first_dev) {
275 if (user_dev_name[0]) 277 if (user_dev_name[0])
276 printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); 278 pr_err("IP-Config: Device `%s' not found\n",
279 user_dev_name);
277 else 280 else
278 printk(KERN_ERR "IP-Config: No network devices available.\n"); 281 pr_err("IP-Config: No network devices available\n");
279 return -ENODEV; 282 return -ENODEV;
280 } 283 }
281 return 0; 284 return 0;
@@ -359,17 +362,20 @@ static int __init ic_setup_if(void)
359 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name); 362 strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name);
360 set_sockaddr(sin, ic_myaddr, 0); 363 set_sockaddr(sin, ic_myaddr, 0);
361 if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) { 364 if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) {
362 printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err); 365 pr_err("IP-Config: Unable to set interface address (%d)\n",
366 err);
363 return -1; 367 return -1;
364 } 368 }
365 set_sockaddr(sin, ic_netmask, 0); 369 set_sockaddr(sin, ic_netmask, 0);
366 if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) { 370 if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) {
367 printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err); 371 pr_err("IP-Config: Unable to set interface netmask (%d)\n",
372 err);
368 return -1; 373 return -1;
369 } 374 }
370 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0); 375 set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0);
371 if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) { 376 if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) {
372 printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err); 377 pr_err("IP-Config: Unable to set interface broadcast address (%d)\n",
378 err);
373 return -1; 379 return -1;
374 } 380 }
375 /* Handle the case where we need non-standard MTU on the boot link (a network 381 /* Handle the case where we need non-standard MTU on the boot link (a network
@@ -380,8 +386,8 @@ static int __init ic_setup_if(void)
380 strcpy(ir.ifr_name, ic_dev->name); 386 strcpy(ir.ifr_name, ic_dev->name);
381 ir.ifr_mtu = ic_dev_mtu; 387 ir.ifr_mtu = ic_dev_mtu;
382 if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0) 388 if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0)
383 printk(KERN_ERR "IP-Config: Unable to set interface mtu to %d (%d).\n", 389 pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n",
384 ic_dev_mtu, err); 390 ic_dev_mtu, err);
385 } 391 }
386 return 0; 392 return 0;
387} 393}
@@ -396,7 +402,7 @@ static int __init ic_setup_routes(void)
396 402
397 memset(&rm, 0, sizeof(rm)); 403 memset(&rm, 0, sizeof(rm));
398 if ((ic_gateway ^ ic_myaddr) & ic_netmask) { 404 if ((ic_gateway ^ ic_myaddr) & ic_netmask) {
399 printk(KERN_ERR "IP-Config: Gateway not on directly connected network.\n"); 405 pr_err("IP-Config: Gateway not on directly connected network\n");
400 return -1; 406 return -1;
401 } 407 }
402 set_sockaddr((struct sockaddr_in *) &rm.rt_dst, 0, 0); 408 set_sockaddr((struct sockaddr_in *) &rm.rt_dst, 0, 0);
@@ -404,7 +410,8 @@ static int __init ic_setup_routes(void)
404 set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0); 410 set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0);
405 rm.rt_flags = RTF_UP | RTF_GATEWAY; 411 rm.rt_flags = RTF_UP | RTF_GATEWAY;
406 if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) { 412 if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) {
407 printk(KERN_ERR "IP-Config: Cannot add default route (%d).\n", err); 413 pr_err("IP-Config: Cannot add default route (%d)\n",
414 err);
408 return -1; 415 return -1;
409 } 416 }
410 } 417 }
@@ -437,8 +444,8 @@ static int __init ic_defaults(void)
437 else if (IN_CLASSC(ntohl(ic_myaddr))) 444 else if (IN_CLASSC(ntohl(ic_myaddr)))
438 ic_netmask = htonl(IN_CLASSC_NET); 445 ic_netmask = htonl(IN_CLASSC_NET);
439 else { 446 else {
440 printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n", 447 pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
441 &ic_myaddr); 448 &ic_myaddr);
442 return -1; 449 return -1;
443 } 450 }
444 printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask); 451 printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask);
@@ -688,8 +695,8 @@ ic_dhcp_init_options(u8 *options)
688 e += len; 695 e += len;
689 } 696 }
690 if (*vendor_class_identifier) { 697 if (*vendor_class_identifier) {
691 printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", 698 pr_info("DHCP: sending class identifier \"%s\"\n",
692 vendor_class_identifier); 699 vendor_class_identifier);
693 *e++ = 60; /* Class-identifier */ 700 *e++ = 60; /* Class-identifier */
694 len = strlen(vendor_class_identifier); 701 len = strlen(vendor_class_identifier);
695 *e++ = len; 702 *e++ = len;
@@ -949,8 +956,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
949 /* Fragments are not supported */ 956 /* Fragments are not supported */
950 if (ip_is_fragment(h)) { 957 if (ip_is_fragment(h)) {
951 if (net_ratelimit()) 958 if (net_ratelimit())
952 printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented " 959 pr_err("DHCP/BOOTP: Ignoring fragmented reply\n");
953 "reply.\n");
954 goto drop; 960 goto drop;
955 } 961 }
956 962
@@ -999,8 +1005,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
999 if (b->op != BOOTP_REPLY || 1005 if (b->op != BOOTP_REPLY ||
1000 b->xid != d->xid) { 1006 b->xid != d->xid) {
1001 if (net_ratelimit()) 1007 if (net_ratelimit())
1002 printk(KERN_ERR "DHCP/BOOTP: Reply not for us, " 1008 pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
1003 "op[%x] xid[%x]\n",
1004 b->op, b->xid); 1009 b->op, b->xid);
1005 goto drop_unlock; 1010 goto drop_unlock;
1006 } 1011 }
@@ -1008,7 +1013,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1008 /* Is it a reply for the device we are configuring? */ 1013 /* Is it a reply for the device we are configuring? */
1009 if (b->xid != ic_dev_xid) { 1014 if (b->xid != ic_dev_xid) {
1010 if (net_ratelimit()) 1015 if (net_ratelimit())
1011 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n"); 1016 pr_err("DHCP/BOOTP: Ignoring delayed packet\n");
1012 goto drop_unlock; 1017 goto drop_unlock;
1013 } 1018 }
1014 1019
@@ -1146,17 +1151,17 @@ static int __init ic_dynamic(void)
1146 * are missing, and without DHCP/BOOTP/RARP we are unable to get it. 1151 * are missing, and without DHCP/BOOTP/RARP we are unable to get it.
1147 */ 1152 */
1148 if (!ic_proto_enabled) { 1153 if (!ic_proto_enabled) {
1149 printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n"); 1154 pr_err("IP-Config: Incomplete network configuration information\n");
1150 return -1; 1155 return -1;
1151 } 1156 }
1152 1157
1153#ifdef IPCONFIG_BOOTP 1158#ifdef IPCONFIG_BOOTP
1154 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_BOOTP) 1159 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_BOOTP)
1155 printk(KERN_ERR "DHCP/BOOTP: No suitable device found.\n"); 1160 pr_err("DHCP/BOOTP: No suitable device found\n");
1156#endif 1161#endif
1157#ifdef IPCONFIG_RARP 1162#ifdef IPCONFIG_RARP
1158 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_RARP) 1163 if ((ic_proto_enabled ^ ic_proto_have_if) & IC_RARP)
1159 printk(KERN_ERR "RARP: No suitable device found.\n"); 1164 pr_err("RARP: No suitable device found\n");
1160#endif 1165#endif
1161 1166
1162 if (!ic_proto_have_if) 1167 if (!ic_proto_have_if)
@@ -1183,11 +1188,11 @@ static int __init ic_dynamic(void)
1183 * [Actually we could now, but the nothing else running note still 1188 * [Actually we could now, but the nothing else running note still
1184 * applies.. - AC] 1189 * applies.. - AC]
1185 */ 1190 */
1186 printk(KERN_NOTICE "Sending %s%s%s requests .", 1191 pr_notice("Sending %s%s%s requests .",
1187 do_bootp 1192 do_bootp
1188 ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "", 1193 ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "",
1189 (do_bootp && do_rarp) ? " and " : "", 1194 (do_bootp && do_rarp) ? " and " : "",
1190 do_rarp ? "RARP" : ""); 1195 do_rarp ? "RARP" : "");
1191 1196
1192 start_jiffies = jiffies; 1197 start_jiffies = jiffies;
1193 d = ic_first_dev; 1198 d = ic_first_dev;
@@ -1216,13 +1221,13 @@ static int __init ic_dynamic(void)
1216 (ic_proto_enabled & IC_USE_DHCP) && 1221 (ic_proto_enabled & IC_USE_DHCP) &&
1217 ic_dhcp_msgtype != DHCPACK) { 1222 ic_dhcp_msgtype != DHCPACK) {
1218 ic_got_reply = 0; 1223 ic_got_reply = 0;
1219 printk(KERN_CONT ","); 1224 pr_cont(",");
1220 continue; 1225 continue;
1221 } 1226 }
1222#endif /* IPCONFIG_DHCP */ 1227#endif /* IPCONFIG_DHCP */
1223 1228
1224 if (ic_got_reply) { 1229 if (ic_got_reply) {
1225 printk(KERN_CONT " OK\n"); 1230 pr_cont(" OK\n");
1226 break; 1231 break;
1227 } 1232 }
1228 1233
@@ -1230,7 +1235,7 @@ static int __init ic_dynamic(void)
1230 continue; 1235 continue;
1231 1236
1232 if (! --retries) { 1237 if (! --retries) {
1233 printk(KERN_CONT " timed out!\n"); 1238 pr_cont(" timed out!\n");
1234 break; 1239 break;
1235 } 1240 }
1236 1241
@@ -1240,7 +1245,7 @@ static int __init ic_dynamic(void)
1240 if (timeout > CONF_TIMEOUT_MAX) 1245 if (timeout > CONF_TIMEOUT_MAX)
1241 timeout = CONF_TIMEOUT_MAX; 1246 timeout = CONF_TIMEOUT_MAX;
1242 1247
1243 printk(KERN_CONT "."); 1248 pr_cont(".");
1244 } 1249 }
1245 1250
1246#ifdef IPCONFIG_BOOTP 1251#ifdef IPCONFIG_BOOTP
@@ -1260,8 +1265,8 @@ static int __init ic_dynamic(void)
1260 printk("IP-Config: Got %s answer from %pI4, ", 1265 printk("IP-Config: Got %s answer from %pI4, ",
1261 ((ic_got_reply & IC_RARP) ? "RARP" 1266 ((ic_got_reply & IC_RARP) ? "RARP"
1262 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1267 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1263 &ic_servaddr); 1268 &ic_servaddr);
1264 printk(KERN_CONT "my address is %pI4\n", &ic_myaddr); 1269 pr_cont("my address is %pI4\n", &ic_myaddr);
1265 1270
1266 return 0; 1271 return 0;
1267} 1272}
@@ -1437,24 +1442,22 @@ static int __init ip_auto_config(void)
1437 */ 1442 */
1438#ifdef CONFIG_ROOT_NFS 1443#ifdef CONFIG_ROOT_NFS
1439 if (ROOT_DEV == Root_NFS) { 1444 if (ROOT_DEV == Root_NFS) {
1440 printk(KERN_ERR 1445 pr_err("IP-Config: Retrying forever (NFS root)...\n");
1441 "IP-Config: Retrying forever (NFS root)...\n");
1442 goto try_try_again; 1446 goto try_try_again;
1443 } 1447 }
1444#endif 1448#endif
1445 1449
1446 if (--retries) { 1450 if (--retries) {
1447 printk(KERN_ERR 1451 pr_err("IP-Config: Reopening network devices...\n");
1448 "IP-Config: Reopening network devices...\n");
1449 goto try_try_again; 1452 goto try_try_again;
1450 } 1453 }
1451 1454
1452 /* Oh, well. At least we tried. */ 1455 /* Oh, well. At least we tried. */
1453 printk(KERN_ERR "IP-Config: Auto-configuration of network failed.\n"); 1456 pr_err("IP-Config: Auto-configuration of network failed\n");
1454 return -1; 1457 return -1;
1455 } 1458 }
1456#else /* !DYNAMIC */ 1459#else /* !DYNAMIC */
1457 printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n"); 1460 pr_err("IP-Config: Incomplete network configuration information\n");
1458 ic_close_devs(); 1461 ic_close_devs();
1459 return -1; 1462 return -1;
1460#endif /* IPCONFIG_DYNAMIC */ 1463#endif /* IPCONFIG_DYNAMIC */
@@ -1492,19 +1495,16 @@ static int __init ip_auto_config(void)
1492 /* 1495 /*
1493 * Clue in the operator. 1496 * Clue in the operator.
1494 */ 1497 */
1495 printk("IP-Config: Complete:\n"); 1498 pr_info("IP-Config: Complete:\n");
1496 printk(" device=%s", ic_dev->name); 1499 pr_info(" device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n",
1497 printk(KERN_CONT ", addr=%pI4", &ic_myaddr); 1500 ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway);
1498 printk(KERN_CONT ", mask=%pI4", &ic_netmask); 1501 pr_info(" host=%s, domain=%s, nis-domain=%s\n",
1499 printk(KERN_CONT ", gw=%pI4", &ic_gateway); 1502 utsname()->nodename, ic_domain, utsname()->domainname);
1500 printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s", 1503 pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s",
1501 utsname()->nodename, ic_domain, utsname()->domainname); 1504 &ic_servaddr, &root_server_addr, root_server_path);
1502 printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
1503 printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
1504 printk(KERN_CONT ", rootpath=%s", root_server_path);
1505 if (ic_dev_mtu) 1505 if (ic_dev_mtu)
1506 printk(KERN_CONT ", mtu=%d", ic_dev_mtu); 1506 pr_cont(", mtu=%d", ic_dev_mtu);
1507 printk(KERN_CONT "\n"); 1507 pr_cont("\n");
1508#endif /* !SILENT */ 1508#endif /* !SILENT */
1509 1509
1510 return 0; 1510 return 0;
@@ -1637,8 +1637,8 @@ static int __init vendor_class_identifier_setup(char *addrs)
1637 if (strlcpy(vendor_class_identifier, addrs, 1637 if (strlcpy(vendor_class_identifier, addrs,
1638 sizeof(vendor_class_identifier)) 1638 sizeof(vendor_class_identifier))
1639 >= sizeof(vendor_class_identifier)) 1639 >= sizeof(vendor_class_identifier))
1640 printk(KERN_WARNING "DHCP: vendorclass too long, truncated to \"%s\"", 1640 pr_warn("DHCP: vendorclass too long, truncated to \"%s\"",
1641 vendor_class_identifier); 1641 vendor_class_identifier);
1642 return 1; 1642 return 1;
1643} 1643}
1644 1644
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 22a199315309..ae1413e3f2f8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -454,8 +454,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
454 dev->stats.tx_fifo_errors++; 454 dev->stats.tx_fifo_errors++;
455 goto tx_error; 455 goto tx_error;
456 } 456 }
457 if ((dst = rt->rt_gateway) == 0) 457 dst = rt->rt_gateway;
458 goto tx_error_icmp;
459 } 458 }
460 459
461 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 460 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
@@ -893,7 +892,7 @@ static int __init ipip_init(void)
893 err = xfrm4_tunnel_register(&ipip_handler, AF_INET); 892 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
894 if (err < 0) { 893 if (err < 0) {
895 unregister_pernet_device(&ipip_net_ops); 894 unregister_pernet_device(&ipip_net_ops);
896 printk(KERN_INFO "ipip init: can't register tunnel\n"); 895 pr_info("%s: can't register tunnel\n", __func__);
897 } 896 }
898 return err; 897 return err;
899} 898}
@@ -901,7 +900,7 @@ static int __init ipip_init(void)
901static void __exit ipip_fini(void) 900static void __exit ipip_fini(void)
902{ 901{
903 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) 902 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
904 printk(KERN_INFO "ipip close: can't deregister tunnel\n"); 903 pr_info("%s: can't deregister tunnel\n", __func__);
905 904
906 unregister_pernet_device(&ipip_net_ops); 905 unregister_pernet_device(&ipip_net_ops);
907} 906}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7bc2db6db8d4..0518a4fb177b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -951,7 +951,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
951 rcu_read_unlock(); 951 rcu_read_unlock();
952 if (ret < 0) { 952 if (ret < 0) {
953 if (net_ratelimit()) 953 if (net_ratelimit())
954 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 954 pr_warn("mroute: pending queue full, dropping entries\n");
955 kfree_skb(skb); 955 kfree_skb(skb);
956 } 956 }
957 957
@@ -2538,7 +2538,7 @@ int __init ip_mr_init(void)
2538 goto reg_notif_fail; 2538 goto reg_notif_fail;
2539#ifdef CONFIG_IP_PIMSM_V2 2539#ifdef CONFIG_IP_PIMSM_V2
2540 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { 2540 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2541 printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n"); 2541 pr_err("%s: can't add PIM protocol\n", __func__);
2542 err = -EAGAIN; 2542 err = -EAGAIN;
2543 goto add_proto_fail; 2543 goto add_proto_fail;
2544 } 2544 }
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 74dfc9e5211f..fcc543cd987a 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -123,15 +123,6 @@ config IP_NF_TARGET_REJECT
123 123
124 To compile it as a module, choose M here. If unsure, say N. 124 To compile it as a module, choose M here. If unsure, say N.
125 125
126config IP_NF_TARGET_LOG
127 tristate "LOG target support"
128 default m if NETFILTER_ADVANCED=n
129 help
130 This option adds a `LOG' target, which allows you to create rules in
131 any iptables table which records the packet header to the syslog.
132
133 To compile it as a module, choose M here. If unsure, say N.
134
135config IP_NF_TARGET_ULOG 126config IP_NF_TARGET_ULOG
136 tristate "ULOG target support" 127 tristate "ULOG target support"
137 default m if NETFILTER_ADVANCED=n 128 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 213a462b739b..240b68469a7a 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
54# targets 54# targets
55obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 55obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
56obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o 56obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
57obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
58obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o 57obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
59obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o 58obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
60obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o 59obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
deleted file mode 100644
index d76d6c9ed946..000000000000
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ /dev/null
@@ -1,516 +0,0 @@
1/*
2 * This is a module which is used for logging packets.
3 */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/module.h>
14#include <linux/spinlock.h>
15#include <linux/skbuff.h>
16#include <linux/if_arp.h>
17#include <linux/ip.h>
18#include <net/icmp.h>
19#include <net/udp.h>
20#include <net/tcp.h>
21#include <net/route.h>
22
23#include <linux/netfilter.h>
24#include <linux/netfilter/x_tables.h>
25#include <linux/netfilter_ipv4/ipt_LOG.h>
26#include <net/netfilter/nf_log.h>
27#include <net/netfilter/xt_log.h>
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
31MODULE_DESCRIPTION("Xtables: IPv4 packet logging to syslog");
32
33/* One level of recursion won't kill us */
34static void dump_packet(struct sbuff *m,
35 const struct nf_loginfo *info,
36 const struct sk_buff *skb,
37 unsigned int iphoff)
38{
39 struct iphdr _iph;
40 const struct iphdr *ih;
41 unsigned int logflags;
42
43 if (info->type == NF_LOG_TYPE_LOG)
44 logflags = info->u.log.logflags;
45 else
46 logflags = NF_LOG_MASK;
47
48 ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
49 if (ih == NULL) {
50 sb_add(m, "TRUNCATED");
51 return;
52 }
53
54 /* Important fields:
55 * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
56 /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
57 sb_add(m, "SRC=%pI4 DST=%pI4 ",
58 &ih->saddr, &ih->daddr);
59
60 /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
61 sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
62 ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
63 ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
64
65 /* Max length: 6 "CE DF MF " */
66 if (ntohs(ih->frag_off) & IP_CE)
67 sb_add(m, "CE ");
68 if (ntohs(ih->frag_off) & IP_DF)
69 sb_add(m, "DF ");
70 if (ntohs(ih->frag_off) & IP_MF)
71 sb_add(m, "MF ");
72
73 /* Max length: 11 "FRAG:65535 " */
74 if (ntohs(ih->frag_off) & IP_OFFSET)
75 sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
76
77 if ((logflags & IPT_LOG_IPOPT) &&
78 ih->ihl * 4 > sizeof(struct iphdr)) {
79 const unsigned char *op;
80 unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
81 unsigned int i, optsize;
82
83 optsize = ih->ihl * 4 - sizeof(struct iphdr);
84 op = skb_header_pointer(skb, iphoff+sizeof(_iph),
85 optsize, _opt);
86 if (op == NULL) {
87 sb_add(m, "TRUNCATED");
88 return;
89 }
90
91 /* Max length: 127 "OPT (" 15*4*2chars ") " */
92 sb_add(m, "OPT (");
93 for (i = 0; i < optsize; i++)
94 sb_add(m, "%02X", op[i]);
95 sb_add(m, ") ");
96 }
97
98 switch (ih->protocol) {
99 case IPPROTO_TCP: {
100 struct tcphdr _tcph;
101 const struct tcphdr *th;
102
103 /* Max length: 10 "PROTO=TCP " */
104 sb_add(m, "PROTO=TCP ");
105
106 if (ntohs(ih->frag_off) & IP_OFFSET)
107 break;
108
109 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
110 th = skb_header_pointer(skb, iphoff + ih->ihl * 4,
111 sizeof(_tcph), &_tcph);
112 if (th == NULL) {
113 sb_add(m, "INCOMPLETE [%u bytes] ",
114 skb->len - iphoff - ih->ihl*4);
115 break;
116 }
117
118 /* Max length: 20 "SPT=65535 DPT=65535 " */
119 sb_add(m, "SPT=%u DPT=%u ",
120 ntohs(th->source), ntohs(th->dest));
121 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
122 if (logflags & IPT_LOG_TCPSEQ)
123 sb_add(m, "SEQ=%u ACK=%u ",
124 ntohl(th->seq), ntohl(th->ack_seq));
125 /* Max length: 13 "WINDOW=65535 " */
126 sb_add(m, "WINDOW=%u ", ntohs(th->window));
127 /* Max length: 9 "RES=0x3F " */
128 sb_add(m, "RES=0x%02x ", (u8)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
129 /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
130 if (th->cwr)
131 sb_add(m, "CWR ");
132 if (th->ece)
133 sb_add(m, "ECE ");
134 if (th->urg)
135 sb_add(m, "URG ");
136 if (th->ack)
137 sb_add(m, "ACK ");
138 if (th->psh)
139 sb_add(m, "PSH ");
140 if (th->rst)
141 sb_add(m, "RST ");
142 if (th->syn)
143 sb_add(m, "SYN ");
144 if (th->fin)
145 sb_add(m, "FIN ");
146 /* Max length: 11 "URGP=65535 " */
147 sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
148
149 if ((logflags & IPT_LOG_TCPOPT) &&
150 th->doff * 4 > sizeof(struct tcphdr)) {
151 unsigned char _opt[4 * 15 - sizeof(struct tcphdr)];
152 const unsigned char *op;
153 unsigned int i, optsize;
154
155 optsize = th->doff * 4 - sizeof(struct tcphdr);
156 op = skb_header_pointer(skb,
157 iphoff+ih->ihl*4+sizeof(_tcph),
158 optsize, _opt);
159 if (op == NULL) {
160 sb_add(m, "TRUNCATED");
161 return;
162 }
163
164 /* Max length: 127 "OPT (" 15*4*2chars ") " */
165 sb_add(m, "OPT (");
166 for (i = 0; i < optsize; i++)
167 sb_add(m, "%02X", op[i]);
168 sb_add(m, ") ");
169 }
170 break;
171 }
172 case IPPROTO_UDP:
173 case IPPROTO_UDPLITE: {
174 struct udphdr _udph;
175 const struct udphdr *uh;
176
177 if (ih->protocol == IPPROTO_UDP)
178 /* Max length: 10 "PROTO=UDP " */
179 sb_add(m, "PROTO=UDP " );
180 else /* Max length: 14 "PROTO=UDPLITE " */
181 sb_add(m, "PROTO=UDPLITE ");
182
183 if (ntohs(ih->frag_off) & IP_OFFSET)
184 break;
185
186 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
187 uh = skb_header_pointer(skb, iphoff+ih->ihl*4,
188 sizeof(_udph), &_udph);
189 if (uh == NULL) {
190 sb_add(m, "INCOMPLETE [%u bytes] ",
191 skb->len - iphoff - ih->ihl*4);
192 break;
193 }
194
195 /* Max length: 20 "SPT=65535 DPT=65535 " */
196 sb_add(m, "SPT=%u DPT=%u LEN=%u ",
197 ntohs(uh->source), ntohs(uh->dest),
198 ntohs(uh->len));
199 break;
200 }
201 case IPPROTO_ICMP: {
202 struct icmphdr _icmph;
203 const struct icmphdr *ich;
204 static const size_t required_len[NR_ICMP_TYPES+1]
205 = { [ICMP_ECHOREPLY] = 4,
206 [ICMP_DEST_UNREACH]
207 = 8 + sizeof(struct iphdr),
208 [ICMP_SOURCE_QUENCH]
209 = 8 + sizeof(struct iphdr),
210 [ICMP_REDIRECT]
211 = 8 + sizeof(struct iphdr),
212 [ICMP_ECHO] = 4,
213 [ICMP_TIME_EXCEEDED]
214 = 8 + sizeof(struct iphdr),
215 [ICMP_PARAMETERPROB]
216 = 8 + sizeof(struct iphdr),
217 [ICMP_TIMESTAMP] = 20,
218 [ICMP_TIMESTAMPREPLY] = 20,
219 [ICMP_ADDRESS] = 12,
220 [ICMP_ADDRESSREPLY] = 12 };
221
222 /* Max length: 11 "PROTO=ICMP " */
223 sb_add(m, "PROTO=ICMP ");
224
225 if (ntohs(ih->frag_off) & IP_OFFSET)
226 break;
227
228 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
229 ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
230 sizeof(_icmph), &_icmph);
231 if (ich == NULL) {
232 sb_add(m, "INCOMPLETE [%u bytes] ",
233 skb->len - iphoff - ih->ihl*4);
234 break;
235 }
236
237 /* Max length: 18 "TYPE=255 CODE=255 " */
238 sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
239
240 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
241 if (ich->type <= NR_ICMP_TYPES &&
242 required_len[ich->type] &&
243 skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
244 sb_add(m, "INCOMPLETE [%u bytes] ",
245 skb->len - iphoff - ih->ihl*4);
246 break;
247 }
248
249 switch (ich->type) {
250 case ICMP_ECHOREPLY:
251 case ICMP_ECHO:
252 /* Max length: 19 "ID=65535 SEQ=65535 " */
253 sb_add(m, "ID=%u SEQ=%u ",
254 ntohs(ich->un.echo.id),
255 ntohs(ich->un.echo.sequence));
256 break;
257
258 case ICMP_PARAMETERPROB:
259 /* Max length: 14 "PARAMETER=255 " */
260 sb_add(m, "PARAMETER=%u ",
261 ntohl(ich->un.gateway) >> 24);
262 break;
263 case ICMP_REDIRECT:
264 /* Max length: 24 "GATEWAY=255.255.255.255 " */
265 sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
266 /* Fall through */
267 case ICMP_DEST_UNREACH:
268 case ICMP_SOURCE_QUENCH:
269 case ICMP_TIME_EXCEEDED:
270 /* Max length: 3+maxlen */
271 if (!iphoff) { /* Only recurse once. */
272 sb_add(m, "[");
273 dump_packet(m, info, skb,
274 iphoff + ih->ihl*4+sizeof(_icmph));
275 sb_add(m, "] ");
276 }
277
278 /* Max length: 10 "MTU=65535 " */
279 if (ich->type == ICMP_DEST_UNREACH &&
280 ich->code == ICMP_FRAG_NEEDED)
281 sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
282 }
283 break;
284 }
285 /* Max Length */
286 case IPPROTO_AH: {
287 struct ip_auth_hdr _ahdr;
288 const struct ip_auth_hdr *ah;
289
290 if (ntohs(ih->frag_off) & IP_OFFSET)
291 break;
292
293 /* Max length: 9 "PROTO=AH " */
294 sb_add(m, "PROTO=AH ");
295
296 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
297 ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
298 sizeof(_ahdr), &_ahdr);
299 if (ah == NULL) {
300 sb_add(m, "INCOMPLETE [%u bytes] ",
301 skb->len - iphoff - ih->ihl*4);
302 break;
303 }
304
305 /* Length: 15 "SPI=0xF1234567 " */
306 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
307 break;
308 }
309 case IPPROTO_ESP: {
310 struct ip_esp_hdr _esph;
311 const struct ip_esp_hdr *eh;
312
313 /* Max length: 10 "PROTO=ESP " */
314 sb_add(m, "PROTO=ESP ");
315
316 if (ntohs(ih->frag_off) & IP_OFFSET)
317 break;
318
319 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
320 eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
321 sizeof(_esph), &_esph);
322 if (eh == NULL) {
323 sb_add(m, "INCOMPLETE [%u bytes] ",
324 skb->len - iphoff - ih->ihl*4);
325 break;
326 }
327
328 /* Length: 15 "SPI=0xF1234567 " */
329 sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
330 break;
331 }
332 /* Max length: 10 "PROTO 255 " */
333 default:
334 sb_add(m, "PROTO=%u ", ih->protocol);
335 }
336
337 /* Max length: 15 "UID=4294967295 " */
338 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
339 read_lock_bh(&skb->sk->sk_callback_lock);
340 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
341 sb_add(m, "UID=%u GID=%u ",
342 skb->sk->sk_socket->file->f_cred->fsuid,
343 skb->sk->sk_socket->file->f_cred->fsgid);
344 read_unlock_bh(&skb->sk->sk_callback_lock);
345 }
346
347 /* Max length: 16 "MARK=0xFFFFFFFF " */
348 if (!iphoff && skb->mark)
349 sb_add(m, "MARK=0x%x ", skb->mark);
350
351 /* Proto Max log string length */
352 /* IP: 40+46+6+11+127 = 230 */
353 /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
354 /* UDP: 10+max(25,20) = 35 */
355 /* UDPLITE: 14+max(25,20) = 39 */
356 /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
357 /* ESP: 10+max(25)+15 = 50 */
358 /* AH: 9+max(25)+15 = 49 */
359 /* unknown: 10 */
360
361 /* (ICMP allows recursion one level deep) */
362 /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
363 /* maxlen = 230+ 91 + 230 + 252 = 803 */
364}
365
366static void dump_mac_header(struct sbuff *m,
367 const struct nf_loginfo *info,
368 const struct sk_buff *skb)
369{
370 struct net_device *dev = skb->dev;
371 unsigned int logflags = 0;
372
373 if (info->type == NF_LOG_TYPE_LOG)
374 logflags = info->u.log.logflags;
375
376 if (!(logflags & IPT_LOG_MACDECODE))
377 goto fallback;
378
379 switch (dev->type) {
380 case ARPHRD_ETHER:
381 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
382 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
383 ntohs(eth_hdr(skb)->h_proto));
384 return;
385 default:
386 break;
387 }
388
389fallback:
390 sb_add(m, "MAC=");
391 if (dev->hard_header_len &&
392 skb->mac_header != skb->network_header) {
393 const unsigned char *p = skb_mac_header(skb);
394 unsigned int i;
395
396 sb_add(m, "%02x", *p++);
397 for (i = 1; i < dev->hard_header_len; i++, p++)
398 sb_add(m, ":%02x", *p);
399 }
400 sb_add(m, " ");
401}
402
403static struct nf_loginfo default_loginfo = {
404 .type = NF_LOG_TYPE_LOG,
405 .u = {
406 .log = {
407 .level = 5,
408 .logflags = NF_LOG_MASK,
409 },
410 },
411};
412
413static void
414ipt_log_packet(u_int8_t pf,
415 unsigned int hooknum,
416 const struct sk_buff *skb,
417 const struct net_device *in,
418 const struct net_device *out,
419 const struct nf_loginfo *loginfo,
420 const char *prefix)
421{
422 struct sbuff *m = sb_open();
423
424 if (!loginfo)
425 loginfo = &default_loginfo;
426
427 sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
428 prefix,
429 in ? in->name : "",
430 out ? out->name : "");
431#ifdef CONFIG_BRIDGE_NETFILTER
432 if (skb->nf_bridge) {
433 const struct net_device *physindev;
434 const struct net_device *physoutdev;
435
436 physindev = skb->nf_bridge->physindev;
437 if (physindev && in != physindev)
438 sb_add(m, "PHYSIN=%s ", physindev->name);
439 physoutdev = skb->nf_bridge->physoutdev;
440 if (physoutdev && out != physoutdev)
441 sb_add(m, "PHYSOUT=%s ", physoutdev->name);
442 }
443#endif
444
445 if (in != NULL)
446 dump_mac_header(m, loginfo, skb);
447
448 dump_packet(m, loginfo, skb, 0);
449
450 sb_close(m);
451}
452
453static unsigned int
454log_tg(struct sk_buff *skb, const struct xt_action_param *par)
455{
456 const struct ipt_log_info *loginfo = par->targinfo;
457 struct nf_loginfo li;
458
459 li.type = NF_LOG_TYPE_LOG;
460 li.u.log.level = loginfo->level;
461 li.u.log.logflags = loginfo->logflags;
462
463 ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li,
464 loginfo->prefix);
465 return XT_CONTINUE;
466}
467
468static int log_tg_check(const struct xt_tgchk_param *par)
469{
470 const struct ipt_log_info *loginfo = par->targinfo;
471
472 if (loginfo->level >= 8) {
473 pr_debug("level %u >= 8\n", loginfo->level);
474 return -EINVAL;
475 }
476 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
477 pr_debug("prefix is not null-terminated\n");
478 return -EINVAL;
479 }
480 return 0;
481}
482
483static struct xt_target log_tg_reg __read_mostly = {
484 .name = "LOG",
485 .family = NFPROTO_IPV4,
486 .target = log_tg,
487 .targetsize = sizeof(struct ipt_log_info),
488 .checkentry = log_tg_check,
489 .me = THIS_MODULE,
490};
491
492static struct nf_logger ipt_log_logger __read_mostly = {
493 .name = "ipt_LOG",
494 .logfn = &ipt_log_packet,
495 .me = THIS_MODULE,
496};
497
498static int __init log_tg_init(void)
499{
500 int ret;
501
502 ret = xt_register_target(&log_tg_reg);
503 if (ret < 0)
504 return ret;
505 nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
506 return 0;
507}
508
509static void __exit log_tg_exit(void)
510{
511 nf_log_unregister(&ipt_log_logger);
512 xt_unregister_target(&log_tg_reg);
513}
514
515module_init(log_tg_init);
516module_exit(log_tg_exit);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index ab5b27a2916f..7cbe9cb261c2 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -75,25 +75,31 @@ static int icmp_print_tuple(struct seq_file *s,
75 ntohs(tuple->src.u.icmp.id)); 75 ntohs(tuple->src.u.icmp.id));
76} 76}
77 77
78static unsigned int *icmp_get_timeouts(struct net *net)
79{
80 return &nf_ct_icmp_timeout;
81}
82
78/* Returns verdict for packet, or -1 for invalid. */ 83/* Returns verdict for packet, or -1 for invalid. */
79static int icmp_packet(struct nf_conn *ct, 84static int icmp_packet(struct nf_conn *ct,
80 const struct sk_buff *skb, 85 const struct sk_buff *skb,
81 unsigned int dataoff, 86 unsigned int dataoff,
82 enum ip_conntrack_info ctinfo, 87 enum ip_conntrack_info ctinfo,
83 u_int8_t pf, 88 u_int8_t pf,
84 unsigned int hooknum) 89 unsigned int hooknum,
90 unsigned int *timeout)
85{ 91{
86 /* Do not immediately delete the connection after the first 92 /* Do not immediately delete the connection after the first
87 successful reply to avoid excessive conntrackd traffic 93 successful reply to avoid excessive conntrackd traffic
88 and also to handle correctly ICMP echo reply duplicates. */ 94 and also to handle correctly ICMP echo reply duplicates. */
89 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout); 95 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
90 96
91 return NF_ACCEPT; 97 return NF_ACCEPT;
92} 98}
93 99
94/* Called when a new connection for this protocol found. */ 100/* Called when a new connection for this protocol found. */
95static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, 101static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
96 unsigned int dataoff) 102 unsigned int dataoff, unsigned int *timeouts)
97{ 103{
98 static const u_int8_t valid_new[] = { 104 static const u_int8_t valid_new[] = {
99 [ICMP_ECHO] = 1, 105 [ICMP_ECHO] = 1,
@@ -263,6 +269,44 @@ static int icmp_nlattr_tuple_size(void)
263} 269}
264#endif 270#endif
265 271
272#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
273
274#include <linux/netfilter/nfnetlink.h>
275#include <linux/netfilter/nfnetlink_cttimeout.h>
276
277static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
278{
279 unsigned int *timeout = data;
280
281 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
282 *timeout =
283 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
284 } else {
285 /* Set default ICMP timeout. */
286 *timeout = nf_ct_icmp_timeout;
287 }
288 return 0;
289}
290
291static int
292icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
293{
294 const unsigned int *timeout = data;
295
296 NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
297
298 return 0;
299
300nla_put_failure:
301 return -ENOSPC;
302}
303
304static const struct nla_policy
305icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
306 [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
307};
308#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
309
266#ifdef CONFIG_SYSCTL 310#ifdef CONFIG_SYSCTL
267static struct ctl_table_header *icmp_sysctl_header; 311static struct ctl_table_header *icmp_sysctl_header;
268static struct ctl_table icmp_sysctl_table[] = { 312static struct ctl_table icmp_sysctl_table[] = {
@@ -298,6 +342,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
298 .invert_tuple = icmp_invert_tuple, 342 .invert_tuple = icmp_invert_tuple,
299 .print_tuple = icmp_print_tuple, 343 .print_tuple = icmp_print_tuple,
300 .packet = icmp_packet, 344 .packet = icmp_packet,
345 .get_timeouts = icmp_get_timeouts,
301 .new = icmp_new, 346 .new = icmp_new,
302 .error = icmp_error, 347 .error = icmp_error,
303 .destroy = NULL, 348 .destroy = NULL,
@@ -308,6 +353,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
308 .nlattr_to_tuple = icmp_nlattr_to_tuple, 353 .nlattr_to_tuple = icmp_nlattr_to_tuple,
309 .nla_policy = icmp_nla_policy, 354 .nla_policy = icmp_nla_policy,
310#endif 355#endif
356#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
357 .ctnl_timeout = {
358 .nlattr_to_obj = icmp_timeout_nlattr_to_obj,
359 .obj_to_nlattr = icmp_timeout_obj_to_nlattr,
360 .nlattr_max = CTA_TIMEOUT_ICMP_MAX,
361 .obj_size = sizeof(unsigned int),
362 .nla_policy = icmp_timeout_nla_policy,
363 },
364#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
311#ifdef CONFIG_SYSCTL 365#ifdef CONFIG_SYSCTL
312 .ctl_table_header = &icmp_sysctl_header, 366 .ctl_table_header = &icmp_sysctl_header,
313 .ctl_table = icmp_sysctl_table, 367 .ctl_table = icmp_sysctl_table,
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index a708933dc230..abb52adf5acd 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -686,6 +686,11 @@ static struct pernet_operations nf_nat_net_ops = {
686 .exit = nf_nat_net_exit, 686 .exit = nf_nat_net_exit,
687}; 687};
688 688
689static struct nf_ct_helper_expectfn follow_master_nat = {
690 .name = "nat-follow-master",
691 .expectfn = nf_nat_follow_master,
692};
693
689static int __init nf_nat_init(void) 694static int __init nf_nat_init(void)
690{ 695{
691 size_t i; 696 size_t i;
@@ -717,6 +722,8 @@ static int __init nf_nat_init(void)
717 722
718 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 723 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
719 724
725 nf_ct_helper_expectfn_register(&follow_master_nat);
726
720 BUG_ON(nf_nat_seq_adjust_hook != NULL); 727 BUG_ON(nf_nat_seq_adjust_hook != NULL);
721 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); 728 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
722 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 729 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -736,6 +743,7 @@ static void __exit nf_nat_cleanup(void)
736 unregister_pernet_subsys(&nf_nat_net_ops); 743 unregister_pernet_subsys(&nf_nat_net_ops);
737 nf_ct_l3proto_put(l3proto); 744 nf_ct_l3proto_put(l3proto);
738 nf_ct_extend_unregister(&nat_extend); 745 nf_ct_extend_unregister(&nat_extend);
746 nf_ct_helper_expectfn_unregister(&follow_master_nat);
739 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); 747 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
740 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 748 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
741 RCU_INIT_POINTER(nf_ct_nat_offset, NULL); 749 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index dc1dd912baf4..82536701e3a3 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -568,6 +568,16 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
568 return 0; 568 return 0;
569} 569}
570 570
571static struct nf_ct_helper_expectfn q931_nat = {
572 .name = "Q.931",
573 .expectfn = ip_nat_q931_expect,
574};
575
576static struct nf_ct_helper_expectfn callforwarding_nat = {
577 .name = "callforwarding",
578 .expectfn = ip_nat_callforwarding_expect,
579};
580
571/****************************************************************************/ 581/****************************************************************************/
572static int __init init(void) 582static int __init init(void)
573{ 583{
@@ -590,6 +600,8 @@ static int __init init(void)
590 RCU_INIT_POINTER(nat_h245_hook, nat_h245); 600 RCU_INIT_POINTER(nat_h245_hook, nat_h245);
591 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding); 601 RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
592 RCU_INIT_POINTER(nat_q931_hook, nat_q931); 602 RCU_INIT_POINTER(nat_q931_hook, nat_q931);
603 nf_ct_helper_expectfn_register(&q931_nat);
604 nf_ct_helper_expectfn_register(&callforwarding_nat);
593 return 0; 605 return 0;
594} 606}
595 607
@@ -605,6 +617,8 @@ static void __exit fini(void)
605 RCU_INIT_POINTER(nat_h245_hook, NULL); 617 RCU_INIT_POINTER(nat_h245_hook, NULL);
606 RCU_INIT_POINTER(nat_callforwarding_hook, NULL); 618 RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
607 RCU_INIT_POINTER(nat_q931_hook, NULL); 619 RCU_INIT_POINTER(nat_q931_hook, NULL);
620 nf_ct_helper_expectfn_unregister(&q931_nat);
621 nf_ct_helper_expectfn_unregister(&callforwarding_nat);
608 synchronize_rcu(); 622 synchronize_rcu();
609} 623}
610 624
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index d0319f96269f..57932c43960e 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -526,6 +526,11 @@ err1:
526 return NF_DROP; 526 return NF_DROP;
527} 527}
528 528
529static struct nf_ct_helper_expectfn sip_nat = {
530 .name = "sip",
531 .expectfn = ip_nat_sip_expected,
532};
533
529static void __exit nf_nat_sip_fini(void) 534static void __exit nf_nat_sip_fini(void)
530{ 535{
531 RCU_INIT_POINTER(nf_nat_sip_hook, NULL); 536 RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
@@ -535,6 +540,7 @@ static void __exit nf_nat_sip_fini(void)
535 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL); 540 RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
536 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL); 541 RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
537 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL); 542 RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
543 nf_ct_helper_expectfn_unregister(&sip_nat);
538 synchronize_rcu(); 544 synchronize_rcu();
539} 545}
540 546
@@ -554,6 +560,7 @@ static int __init nf_nat_sip_init(void)
554 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port); 560 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
555 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session); 561 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
556 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media); 562 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
563 nf_ct_helper_expectfn_register(&sip_nat);
557 return 0; 564 return 0;
558} 565}
559 566
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index b072386cee21..ab6b36e6da15 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,7 +156,7 @@ static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
156 struct hlist_nulls_node *hnode; 156 struct hlist_nulls_node *hnode;
157 157
158 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", 158 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
159 (int)ident, &daddr, dif); 159 (int)ident, &daddr, dif);
160 read_lock_bh(&ping_table.lock); 160 read_lock_bh(&ping_table.lock);
161 161
162 ping_portaddr_for_each_entry(sk, hnode, hslot) { 162 ping_portaddr_for_each_entry(sk, hnode, hslot) {
@@ -229,7 +229,7 @@ static int ping_init_sock(struct sock *sk)
229static void ping_close(struct sock *sk, long timeout) 229static void ping_close(struct sock *sk, long timeout)
230{ 230{
231 pr_debug("ping_close(sk=%p,sk->num=%u)\n", 231 pr_debug("ping_close(sk=%p,sk->num=%u)\n",
232 inet_sk(sk), inet_sk(sk)->inet_num); 232 inet_sk(sk), inet_sk(sk)->inet_num);
233 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); 233 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
234 234
235 sk_common_release(sk); 235 sk_common_release(sk);
@@ -252,7 +252,7 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
252 return -EINVAL; 252 return -EINVAL;
253 253
254 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n", 254 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
255 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port)); 255 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
256 256
257 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 257 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
258 if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) 258 if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
@@ -280,9 +280,9 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
280 } 280 }
281 281
282 pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n", 282 pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
283 (int)isk->inet_num, 283 (int)isk->inet_num,
284 &isk->inet_rcv_saddr, 284 &isk->inet_rcv_saddr,
285 (int)sk->sk_bound_dev_if); 285 (int)sk->sk_bound_dev_if);
286 286
287 err = 0; 287 err = 0;
288 if (isk->inet_rcv_saddr) 288 if (isk->inet_rcv_saddr)
@@ -335,7 +335,7 @@ void ping_err(struct sk_buff *skb, u32 info)
335 return; 335 return;
336 336
337 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type, 337 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
338 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); 338 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
339 339
340 sk = ping_v4_lookup(net, iph->daddr, iph->saddr, 340 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
341 ntohs(icmph->un.echo.id), skb->dev->ifindex); 341 ntohs(icmph->un.echo.id), skb->dev->ifindex);
@@ -556,7 +556,8 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
556 ipc.oif = inet->mc_index; 556 ipc.oif = inet->mc_index;
557 if (!saddr) 557 if (!saddr)
558 saddr = inet->mc_addr; 558 saddr = inet->mc_addr;
559 } 559 } else if (!ipc.oif)
560 ipc.oif = inet->uc_index;
560 561
561 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 562 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
562 RT_SCOPE_UNIVERSE, sk->sk_protocol, 563 RT_SCOPE_UNIVERSE, sk->sk_protocol,
@@ -678,7 +679,7 @@ out:
678static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 679static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
679{ 680{
680 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", 681 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
681 inet_sk(sk), inet_sk(sk)->inet_num, skb); 682 inet_sk(sk), inet_sk(sk)->inet_num, skb);
682 if (sock_queue_rcv_skb(sk, skb) < 0) { 683 if (sock_queue_rcv_skb(sk, skb) < 0) {
683 kfree_skb(skb); 684 kfree_skb(skb);
684 pr_debug("ping_queue_rcv_skb -> failed\n"); 685 pr_debug("ping_queue_rcv_skb -> failed\n");
@@ -704,7 +705,7 @@ void ping_rcv(struct sk_buff *skb)
704 /* We assume the packet has already been checked by icmp_rcv */ 705 /* We assume the packet has already been checked by icmp_rcv */
705 706
706 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", 707 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
707 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); 708 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
708 709
709 /* Push ICMP header back */ 710 /* Push ICMP header back */
710 skb_push(skb, skb->data - (u8 *)icmph); 711 skb_push(skb, skb->data - (u8 *)icmph);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6afc807ee2ad..8af0d44e4e22 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -256,6 +256,8 @@ static const struct snmp_mib snmp4_net_list[] = {
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), 257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
259 SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
260 SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE),
259 SNMP_MIB_SENTINEL 261 SNMP_MIB_SENTINEL
260}; 262};
261 263
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3ccda5ae8a27..bbd604c68e68 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -491,11 +491,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
491 if (msg->msg_namelen < sizeof(*usin)) 491 if (msg->msg_namelen < sizeof(*usin))
492 goto out; 492 goto out;
493 if (usin->sin_family != AF_INET) { 493 if (usin->sin_family != AF_INET) {
494 static int complained; 494 pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n",
495 if (!complained++) 495 __func__, current->comm);
496 printk(KERN_INFO "%s forgot to set AF_INET in "
497 "raw sendmsg. Fix it!\n",
498 current->comm);
499 err = -EAFNOSUPPORT; 496 err = -EAFNOSUPPORT;
500 if (usin->sin_family) 497 if (usin->sin_family)
501 goto out; 498 goto out;
@@ -563,7 +560,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
563 ipc.oif = inet->mc_index; 560 ipc.oif = inet->mc_index;
564 if (!saddr) 561 if (!saddr)
565 saddr = inet->mc_addr; 562 saddr = inet->mc_addr;
566 } 563 } else if (!ipc.oif)
564 ipc.oif = inet->uc_index;
567 565
568 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 566 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
569 RT_SCOPE_UNIVERSE, 567 RT_SCOPE_UNIVERSE,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 019774796174..12ccf880eb88 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -62,6 +62,8 @@
62 * 2 of the License, or (at your option) any later version. 62 * 2 of the License, or (at your option) any later version.
63 */ 63 */
64 64
65#define pr_fmt(fmt) "IPv4: " fmt
66
65#include <linux/module.h> 67#include <linux/module.h>
66#include <asm/uaccess.h> 68#include <asm/uaccess.h>
67#include <asm/system.h> 69#include <asm/system.h>
@@ -959,7 +961,7 @@ void rt_cache_flush_batch(struct net *net)
959static void rt_emergency_hash_rebuild(struct net *net) 961static void rt_emergency_hash_rebuild(struct net *net)
960{ 962{
961 if (net_ratelimit()) 963 if (net_ratelimit())
962 printk(KERN_WARNING "Route hash chain too long!\n"); 964 pr_warn("Route hash chain too long!\n");
963 rt_cache_invalidate(net); 965 rt_cache_invalidate(net);
964} 966}
965 967
@@ -1083,7 +1085,7 @@ static int rt_garbage_collect(struct dst_ops *ops)
1083 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size) 1085 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1084 goto out; 1086 goto out;
1085 if (net_ratelimit()) 1087 if (net_ratelimit())
1086 printk(KERN_WARNING "dst cache overflow\n"); 1088 pr_warn("dst cache overflow\n");
1087 RT_CACHE_STAT_INC(gc_dst_overflow); 1089 RT_CACHE_STAT_INC(gc_dst_overflow);
1088 return 1; 1090 return 1;
1089 1091
@@ -1116,12 +1118,17 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
1116 static const __be32 inaddr_any = 0; 1118 static const __be32 inaddr_any = 0;
1117 struct net_device *dev = dst->dev; 1119 struct net_device *dev = dst->dev;
1118 const __be32 *pkey = daddr; 1120 const __be32 *pkey = daddr;
1121 const struct rtable *rt;
1119 struct neighbour *n; 1122 struct neighbour *n;
1120 1123
1124 rt = (const struct rtable *) dst;
1125
1121 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) 1126 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1122 pkey = &inaddr_any; 1127 pkey = &inaddr_any;
1128 else if (rt->rt_gateway)
1129 pkey = (const __be32 *) &rt->rt_gateway;
1123 1130
1124 n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey); 1131 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
1125 if (n) 1132 if (n)
1126 return n; 1133 return n;
1127 return neigh_create(&arp_tbl, pkey, dev); 1134 return neigh_create(&arp_tbl, pkey, dev);
@@ -1176,8 +1183,7 @@ restart:
1176 int err = rt_bind_neighbour(rt); 1183 int err = rt_bind_neighbour(rt);
1177 if (err) { 1184 if (err) {
1178 if (net_ratelimit()) 1185 if (net_ratelimit())
1179 printk(KERN_WARNING 1186 pr_warn("Neighbour table failure & not caching routes\n");
1180 "Neighbour table failure & not caching routes.\n");
1181 ip_rt_put(rt); 1187 ip_rt_put(rt);
1182 return ERR_PTR(err); 1188 return ERR_PTR(err);
1183 } 1189 }
@@ -1253,7 +1259,7 @@ restart:
1253 struct net *net = dev_net(rt->dst.dev); 1259 struct net *net = dev_net(rt->dst.dev);
1254 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1260 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1255 if (!rt_caching(net)) { 1261 if (!rt_caching(net)) {
1256 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1262 pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
1257 rt->dst.dev->name, num); 1263 rt->dst.dev->name, num);
1258 } 1264 }
1259 rt_emergency_hash_rebuild(net); 1265 rt_emergency_hash_rebuild(net);
@@ -1294,7 +1300,7 @@ restart:
1294 } 1300 }
1295 1301
1296 if (net_ratelimit()) 1302 if (net_ratelimit())
1297 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n"); 1303 pr_warn("Neighbour table overflow\n");
1298 rt_drop(rt); 1304 rt_drop(rt);
1299 return ERR_PTR(-ENOBUFS); 1305 return ERR_PTR(-ENOBUFS);
1300 } 1306 }
@@ -1498,10 +1504,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1498reject_redirect: 1504reject_redirect:
1499#ifdef CONFIG_IP_ROUTE_VERBOSE 1505#ifdef CONFIG_IP_ROUTE_VERBOSE
1500 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 1506 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1501 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n" 1507 pr_info("Redirect from %pI4 on %s about %pI4 ignored\n"
1502 " Advised path = %pI4 -> %pI4\n", 1508 " Advised path = %pI4 -> %pI4\n",
1503 &old_gw, dev->name, &new_gw, 1509 &old_gw, dev->name, &new_gw,
1504 &saddr, &daddr); 1510 &saddr, &daddr);
1505#endif 1511#endif
1506 ; 1512 ;
1507} 1513}
@@ -1613,8 +1619,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1613 if (log_martians && 1619 if (log_martians &&
1614 peer->rate_tokens == ip_rt_redirect_number && 1620 peer->rate_tokens == ip_rt_redirect_number &&
1615 net_ratelimit()) 1621 net_ratelimit())
1616 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1622 pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1617 &ip_hdr(skb)->saddr, rt->rt_iif, 1623 &ip_hdr(skb)->saddr, rt->rt_iif,
1618 &rt->rt_dst, &rt->rt_gateway); 1624 &rt->rt_dst, &rt->rt_gateway);
1619#endif 1625#endif
1620 } 1626 }
@@ -2100,18 +2106,13 @@ static void ip_handle_martian_source(struct net_device *dev,
2100 * RFC1812 recommendation, if source is martian, 2106 * RFC1812 recommendation, if source is martian,
2101 * the only hint is MAC header. 2107 * the only hint is MAC header.
2102 */ 2108 */
2103 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n", 2109 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
2104 &daddr, &saddr, dev->name); 2110 &daddr, &saddr, dev->name);
2105 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 2111 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
2106 int i; 2112 print_hex_dump(KERN_WARNING, "ll header: ",
2107 const unsigned char *p = skb_mac_header(skb); 2113 DUMP_PREFIX_OFFSET, 16, 1,
2108 printk(KERN_WARNING "ll header: "); 2114 skb_mac_header(skb),
2109 for (i = 0; i < dev->hard_header_len; i++, p++) { 2115 dev->hard_header_len, true);
2110 printk("%02x", *p);
2111 if (i < (dev->hard_header_len - 1))
2112 printk(":");
2113 }
2114 printk("\n");
2115 } 2116 }
2116 } 2117 }
2117#endif 2118#endif
@@ -2135,8 +2136,7 @@ static int __mkroute_input(struct sk_buff *skb,
2135 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 2136 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
2136 if (out_dev == NULL) { 2137 if (out_dev == NULL) {
2137 if (net_ratelimit()) 2138 if (net_ratelimit())
2138 printk(KERN_CRIT "Bug in ip_route_input" \ 2139 pr_crit("Bug in ip_route_input_slow(). Please report.\n");
2139 "_slow(). Please, report\n");
2140 return -EINVAL; 2140 return -EINVAL;
2141 } 2141 }
2142 2142
@@ -2408,7 +2408,7 @@ martian_destination:
2408 RT_CACHE_STAT_INC(in_martian_dst); 2408 RT_CACHE_STAT_INC(in_martian_dst);
2409#ifdef CONFIG_IP_ROUTE_VERBOSE 2409#ifdef CONFIG_IP_ROUTE_VERBOSE
2410 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 2410 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2411 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n", 2411 pr_warn("martian destination %pI4 from %pI4, dev %s\n",
2412 &daddr, &saddr, dev->name); 2412 &daddr, &saddr, dev->name);
2413#endif 2413#endif
2414 2414
@@ -3485,7 +3485,7 @@ int __init ip_rt_init(void)
3485 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3485 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3486 3486
3487 if (ip_rt_proc_init()) 3487 if (ip_rt_proc_init())
3488 printk(KERN_ERR "Unable to create route proc files\n"); 3488 pr_err("Unable to create route proc files\n");
3489#ifdef CONFIG_XFRM 3489#ifdef CONFIG_XFRM
3490 xfrm_init(); 3490 xfrm_init();
3491 xfrm4_init(ip_rt_max_size); 3491 xfrm4_init(ip_rt_max_size);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 22ef5f9fd2ff..cfd7edda0a8e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -245,6 +245,8 @@
245 * TCP_CLOSE socket is finished 245 * TCP_CLOSE socket is finished
246 */ 246 */
247 247
248#define pr_fmt(fmt) "TCP: " fmt
249
248#include <linux/kernel.h> 250#include <linux/kernel.h>
249#include <linux/module.h> 251#include <linux/module.h>
250#include <linux/types.h> 252#include <linux/types.h>
@@ -1675,7 +1677,8 @@ do_prequeue:
1675 1677
1676 if (tp->ucopy.dma_cookie < 0) { 1678 if (tp->ucopy.dma_cookie < 0) {
1677 1679
1678 printk(KERN_ALERT "dma_cookie < 0\n"); 1680 pr_alert("%s: dma_cookie < 0\n",
1681 __func__);
1679 1682
1680 /* Exception. Bailout! */ 1683 /* Exception. Bailout! */
1681 if (!copied) 1684 if (!copied)
@@ -1884,9 +1887,9 @@ bool tcp_check_oom(struct sock *sk, int shift)
1884 out_of_socket_memory = tcp_out_of_memory(sk); 1887 out_of_socket_memory = tcp_out_of_memory(sk);
1885 1888
1886 if (too_many_orphans && net_ratelimit()) 1889 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n"); 1890 pr_info("too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit()) 1891 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n"); 1892 pr_info("out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory; 1893 return too_many_orphans || out_of_socket_memory;
1891} 1894}
1892 1895
@@ -3311,9 +3314,8 @@ void __init tcp_init(void)
3311 sysctl_tcp_rmem[1] = 87380; 3314 sysctl_tcp_rmem[1] = 87380;
3312 sysctl_tcp_rmem[2] = max(87380, max_share); 3315 sysctl_tcp_rmem[2] = max(87380, max_share);
3313 3316
3314 printk(KERN_INFO "TCP: Hash tables configured " 3317 pr_info("Hash tables configured (established %u bind %u)\n",
3315 "(established %u bind %u)\n", 3318 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3316 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3317 3319
3318 tcp_register_congestion_control(&tcp_reno); 3320 tcp_register_congestion_control(&tcp_reno);
3319 3321
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index fc6d475f488f..272a84593c85 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -6,6 +6,8 @@
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> 6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7 */ 7 */
8 8
9#define pr_fmt(fmt) "TCP: " fmt
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/mm.h> 12#include <linux/mm.h>
11#include <linux/types.h> 13#include <linux/types.h>
@@ -41,18 +43,17 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
41 43
42 /* all algorithms must implement ssthresh and cong_avoid ops */ 44 /* all algorithms must implement ssthresh and cong_avoid ops */
43 if (!ca->ssthresh || !ca->cong_avoid) { 45 if (!ca->ssthresh || !ca->cong_avoid) {
44 printk(KERN_ERR "TCP %s does not implement required ops\n", 46 pr_err("%s does not implement required ops\n", ca->name);
45 ca->name);
46 return -EINVAL; 47 return -EINVAL;
47 } 48 }
48 49
49 spin_lock(&tcp_cong_list_lock); 50 spin_lock(&tcp_cong_list_lock);
50 if (tcp_ca_find(ca->name)) { 51 if (tcp_ca_find(ca->name)) {
51 printk(KERN_NOTICE "TCP %s already registered\n", ca->name); 52 pr_notice("%s already registered\n", ca->name);
52 ret = -EEXIST; 53 ret = -EEXIST;
53 } else { 54 } else {
54 list_add_tail_rcu(&ca->list, &tcp_cong_list); 55 list_add_tail_rcu(&ca->list, &tcp_cong_list);
55 printk(KERN_INFO "TCP %s registered\n", ca->name); 56 pr_info("%s registered\n", ca->name);
56 } 57 }
57 spin_unlock(&tcp_cong_list_lock); 58 spin_unlock(&tcp_cong_list_lock);
58 59
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5e315f13641..e886e2f7fa8d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -61,6 +61,8 @@
61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
62 */ 62 */
63 63
64#define pr_fmt(fmt) "TCP: " fmt
65
64#include <linux/mm.h> 66#include <linux/mm.h>
65#include <linux/slab.h> 67#include <linux/slab.h>
66#include <linux/module.h> 68#include <linux/module.h>
@@ -3867,9 +3869,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
3867 opt_rx->wscale_ok = 1; 3869 opt_rx->wscale_ok = 1;
3868 if (snd_wscale > 14) { 3870 if (snd_wscale > 14) {
3869 if (net_ratelimit()) 3871 if (net_ratelimit())
3870 printk(KERN_INFO "tcp_parse_options: Illegal window " 3872 pr_info("%s: Illegal window scaling value %d >14 received\n",
3871 "scaling value %d >14 received.\n", 3873 __func__,
3872 snd_wscale); 3874 snd_wscale);
3873 snd_wscale = 14; 3875 snd_wscale = 14;
3874 } 3876 }
3875 opt_rx->snd_wscale = snd_wscale; 3877 opt_rx->snd_wscale = snd_wscale;
@@ -4191,7 +4193,7 @@ static void tcp_fin(struct sock *sk)
4191 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4193 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
4192 * cases we should never reach this piece of code. 4194 * cases we should never reach this piece of code.
4193 */ 4195 */
4194 printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", 4196 pr_err("%s: Impossible, sk->sk_state=%d\n",
4195 __func__, sk->sk_state); 4197 __func__, sk->sk_state);
4196 break; 4198 break;
4197 } 4199 }
@@ -4444,6 +4446,137 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4444 return 0; 4446 return 0;
4445} 4447}
4446 4448
4449static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4450{
4451 struct tcp_sock *tp = tcp_sk(sk);
4452 struct sk_buff *skb1;
4453 u32 seq, end_seq;
4454
4455 TCP_ECN_check_ce(tp, skb);
4456
4457 if (tcp_try_rmem_schedule(sk, skb->truesize)) {
4458 /* TODO: should increment a counter */
4459 __kfree_skb(skb);
4460 return;
4461 }
4462
4463 /* Disable header prediction. */
4464 tp->pred_flags = 0;
4465 inet_csk_schedule_ack(sk);
4466
4467 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4468 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4469
4470 skb1 = skb_peek_tail(&tp->out_of_order_queue);
4471 if (!skb1) {
4472 /* Initial out of order segment, build 1 SACK. */
4473 if (tcp_is_sack(tp)) {
4474 tp->rx_opt.num_sacks = 1;
4475 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
4476 tp->selective_acks[0].end_seq =
4477 TCP_SKB_CB(skb)->end_seq;
4478 }
4479 __skb_queue_head(&tp->out_of_order_queue, skb);
4480 goto end;
4481 }
4482
4483 seq = TCP_SKB_CB(skb)->seq;
4484 end_seq = TCP_SKB_CB(skb)->end_seq;
4485
4486 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4487 /* Packets in ofo can stay in queue a long time.
4488 * Better try to coalesce them right now
4489 * to avoid future tcp_collapse_ofo_queue(),
4490 * probably the most expensive function in tcp stack.
4491 */
4492 if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
4493 NET_INC_STATS_BH(sock_net(sk),
4494 LINUX_MIB_TCPRCVCOALESCE);
4495 BUG_ON(skb_copy_bits(skb, 0,
4496 skb_put(skb1, skb->len),
4497 skb->len));
4498 TCP_SKB_CB(skb1)->end_seq = end_seq;
4499 TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
4500 __kfree_skb(skb);
4501 skb = NULL;
4502 } else {
4503 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4504 }
4505
4506 if (!tp->rx_opt.num_sacks ||
4507 tp->selective_acks[0].end_seq != seq)
4508 goto add_sack;
4509
4510 /* Common case: data arrive in order after hole. */
4511 tp->selective_acks[0].end_seq = end_seq;
4512 goto end;
4513 }
4514
4515 /* Find place to insert this segment. */
4516 while (1) {
4517 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4518 break;
4519 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4520 skb1 = NULL;
4521 break;
4522 }
4523 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4524 }
4525
4526 /* Do skb overlap to previous one? */
4527 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4528 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4529 /* All the bits are present. Drop. */
4530 __kfree_skb(skb);
4531 skb = NULL;
4532 tcp_dsack_set(sk, seq, end_seq);
4533 goto add_sack;
4534 }
4535 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4536 /* Partial overlap. */
4537 tcp_dsack_set(sk, seq,
4538 TCP_SKB_CB(skb1)->end_seq);
4539 } else {
4540 if (skb_queue_is_first(&tp->out_of_order_queue,
4541 skb1))
4542 skb1 = NULL;
4543 else
4544 skb1 = skb_queue_prev(
4545 &tp->out_of_order_queue,
4546 skb1);
4547 }
4548 }
4549 if (!skb1)
4550 __skb_queue_head(&tp->out_of_order_queue, skb);
4551 else
4552 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4553
4554 /* And clean segments covered by new one as whole. */
4555 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4556 skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4557
4558 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4559 break;
4560 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4561 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4562 end_seq);
4563 break;
4564 }
4565 __skb_unlink(skb1, &tp->out_of_order_queue);
4566 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4567 TCP_SKB_CB(skb1)->end_seq);
4568 __kfree_skb(skb1);
4569 }
4570
4571add_sack:
4572 if (tcp_is_sack(tp))
4573 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4574end:
4575 if (skb)
4576 skb_set_owner_r(skb, sk);
4577}
4578
4579
4447static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4580static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4448{ 4581{
4449 const struct tcphdr *th = tcp_hdr(skb); 4582 const struct tcphdr *th = tcp_hdr(skb);
@@ -4559,105 +4692,7 @@ drop:
4559 goto queue_and_out; 4692 goto queue_and_out;
4560 } 4693 }
4561 4694
4562 TCP_ECN_check_ce(tp, skb); 4695 tcp_data_queue_ofo(sk, skb);
4563
4564 if (tcp_try_rmem_schedule(sk, skb->truesize))
4565 goto drop;
4566
4567 /* Disable header prediction. */
4568 tp->pred_flags = 0;
4569 inet_csk_schedule_ack(sk);
4570
4571 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4572 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4573
4574 skb_set_owner_r(skb, sk);
4575
4576 if (!skb_peek(&tp->out_of_order_queue)) {
4577 /* Initial out of order segment, build 1 SACK. */
4578 if (tcp_is_sack(tp)) {
4579 tp->rx_opt.num_sacks = 1;
4580 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
4581 tp->selective_acks[0].end_seq =
4582 TCP_SKB_CB(skb)->end_seq;
4583 }
4584 __skb_queue_head(&tp->out_of_order_queue, skb);
4585 } else {
4586 struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
4587 u32 seq = TCP_SKB_CB(skb)->seq;
4588 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4589
4590 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4591 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4592
4593 if (!tp->rx_opt.num_sacks ||
4594 tp->selective_acks[0].end_seq != seq)
4595 goto add_sack;
4596
4597 /* Common case: data arrive in order after hole. */
4598 tp->selective_acks[0].end_seq = end_seq;
4599 return;
4600 }
4601
4602 /* Find place to insert this segment. */
4603 while (1) {
4604 if (!after(TCP_SKB_CB(skb1)->seq, seq))
4605 break;
4606 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4607 skb1 = NULL;
4608 break;
4609 }
4610 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4611 }
4612
4613 /* Do skb overlap to previous one? */
4614 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4615 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4616 /* All the bits are present. Drop. */
4617 __kfree_skb(skb);
4618 tcp_dsack_set(sk, seq, end_seq);
4619 goto add_sack;
4620 }
4621 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4622 /* Partial overlap. */
4623 tcp_dsack_set(sk, seq,
4624 TCP_SKB_CB(skb1)->end_seq);
4625 } else {
4626 if (skb_queue_is_first(&tp->out_of_order_queue,
4627 skb1))
4628 skb1 = NULL;
4629 else
4630 skb1 = skb_queue_prev(
4631 &tp->out_of_order_queue,
4632 skb1);
4633 }
4634 }
4635 if (!skb1)
4636 __skb_queue_head(&tp->out_of_order_queue, skb);
4637 else
4638 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4639
4640 /* And clean segments covered by new one as whole. */
4641 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4642 skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4643
4644 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4645 break;
4646 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4647 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4648 end_seq);
4649 break;
4650 }
4651 __skb_unlink(skb1, &tp->out_of_order_queue);
4652 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4653 TCP_SKB_CB(skb1)->end_seq);
4654 __kfree_skb(skb1);
4655 }
4656
4657add_sack:
4658 if (tcp_is_sack(tp))
4659 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4660 }
4661} 4696}
4662 4697
4663static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4698static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fd54c5f8a255..3a25cf743f8b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -50,6 +50,7 @@
50 * a single port at the same time. 50 * a single port at the same time.
51 */ 51 */
52 52
53#define pr_fmt(fmt) "TCP: " fmt
53 54
54#include <linux/bottom_half.h> 55#include <linux/bottom_half.h>
55#include <linux/types.h> 56#include <linux/types.h>
@@ -90,16 +91,8 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 91
91 92
92#ifdef CONFIG_TCP_MD5SIG 93#ifdef CONFIG_TCP_MD5SIG
93static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 addr);
95static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th); 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97#else
98static inline
99struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
100{
101 return NULL;
102}
103#endif 96#endif
104 97
105struct inet_hashinfo tcp_hashinfo; 98struct inet_hashinfo tcp_hashinfo;
@@ -601,6 +594,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
601 struct ip_reply_arg arg; 594 struct ip_reply_arg arg;
602#ifdef CONFIG_TCP_MD5SIG 595#ifdef CONFIG_TCP_MD5SIG
603 struct tcp_md5sig_key *key; 596 struct tcp_md5sig_key *key;
597 const __u8 *hash_location = NULL;
598 unsigned char newhash[16];
599 int genhash;
600 struct sock *sk1 = NULL;
604#endif 601#endif
605 struct net *net; 602 struct net *net;
606 603
@@ -631,7 +628,36 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
631 arg.iov[0].iov_len = sizeof(rep.th); 628 arg.iov[0].iov_len = sizeof(rep.th);
632 629
633#ifdef CONFIG_TCP_MD5SIG 630#ifdef CONFIG_TCP_MD5SIG
634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; 631 hash_location = tcp_parse_md5sig_option(th);
632 if (!sk && hash_location) {
633 /*
634 * active side is lost. Try to find listening socket through
635 * source port, and then find md5 key through listening socket.
636 * we are not loose security here:
637 * Incoming packet is checked with md5 hash with finding key,
638 * no RST generated if md5 hash doesn't match.
639 */
640 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
641 &tcp_hashinfo, ip_hdr(skb)->daddr,
642 ntohs(th->source), inet_iif(skb));
643 /* don't send rst if it can't find key */
644 if (!sk1)
645 return;
646 rcu_read_lock();
647 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
648 &ip_hdr(skb)->saddr, AF_INET);
649 if (!key)
650 goto release_sk1;
651
652 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
653 if (genhash || memcmp(hash_location, newhash, 16) != 0)
654 goto release_sk1;
655 } else {
656 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
657 &ip_hdr(skb)->saddr,
658 AF_INET) : NULL;
659 }
660
635 if (key) { 661 if (key) {
636 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 662 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_NOP << 16) | 663 (TCPOPT_NOP << 16) |
@@ -664,6 +690,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
664 690
665 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 691 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
666 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 692 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
693
694#ifdef CONFIG_TCP_MD5SIG
695release_sk1:
696 if (sk1) {
697 rcu_read_unlock();
698 sock_put(sk1);
699 }
700#endif
667} 701}
668 702
669/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 703/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
@@ -764,7 +798,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
764 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 798 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
765 req->ts_recent, 799 req->ts_recent,
766 0, 800 0,
767 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), 801 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
802 AF_INET),
768 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 803 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
769 ip_hdr(skb)->tos); 804 ip_hdr(skb)->tos);
770} 805}
@@ -842,8 +877,7 @@ int tcp_syn_flood_action(struct sock *sk,
842 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; 877 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
843 if (!lopt->synflood_warned) { 878 if (!lopt->synflood_warned) {
844 lopt->synflood_warned = 1; 879 lopt->synflood_warned = 1;
845 pr_info("%s: Possible SYN flooding on port %d. %s. " 880 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
846 " Check SNMP counters.\n",
847 proto, ntohs(tcp_hdr(skb)->dest), msg); 881 proto, ntohs(tcp_hdr(skb)->dest), msg);
848 } 882 }
849 return want_cookie; 883 return want_cookie;
@@ -881,153 +915,138 @@ static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
881 */ 915 */
882 916
883/* Find the Key structure for an address. */ 917/* Find the Key structure for an address. */
884static struct tcp_md5sig_key * 918struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
885 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) 919 const union tcp_md5_addr *addr,
920 int family)
886{ 921{
887 struct tcp_sock *tp = tcp_sk(sk); 922 struct tcp_sock *tp = tcp_sk(sk);
888 int i; 923 struct tcp_md5sig_key *key;
889 924 struct hlist_node *pos;
890 if (!tp->md5sig_info || !tp->md5sig_info->entries4) 925 unsigned int size = sizeof(struct in_addr);
926 struct tcp_md5sig_info *md5sig;
927
928 /* caller either holds rcu_read_lock() or socket lock */
929 md5sig = rcu_dereference_check(tp->md5sig_info,
930 sock_owned_by_user(sk) ||
931 lockdep_is_held(&sk->sk_lock.slock));
932 if (!md5sig)
891 return NULL; 933 return NULL;
892 for (i = 0; i < tp->md5sig_info->entries4; i++) { 934#if IS_ENABLED(CONFIG_IPV6)
893 if (tp->md5sig_info->keys4[i].addr == addr) 935 if (family == AF_INET6)
894 return &tp->md5sig_info->keys4[i].base; 936 size = sizeof(struct in6_addr);
937#endif
938 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
939 if (key->family != family)
940 continue;
941 if (!memcmp(&key->addr, addr, size))
942 return key;
895 } 943 }
896 return NULL; 944 return NULL;
897} 945}
946EXPORT_SYMBOL(tcp_md5_do_lookup);
898 947
899struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 948struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
900 struct sock *addr_sk) 949 struct sock *addr_sk)
901{ 950{
902 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); 951 union tcp_md5_addr *addr;
952
953 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
954 return tcp_md5_do_lookup(sk, addr, AF_INET);
903} 955}
904EXPORT_SYMBOL(tcp_v4_md5_lookup); 956EXPORT_SYMBOL(tcp_v4_md5_lookup);
905 957
906static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, 958static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
907 struct request_sock *req) 959 struct request_sock *req)
908{ 960{
909 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); 961 union tcp_md5_addr *addr;
962
963 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
964 return tcp_md5_do_lookup(sk, addr, AF_INET);
910} 965}
911 966
912/* This can be called on a newly created socket, from other files */ 967/* This can be called on a newly created socket, from other files */
913int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, 968int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
914 u8 *newkey, u8 newkeylen) 969 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
915{ 970{
916 /* Add Key to the list */ 971 /* Add Key to the list */
917 struct tcp_md5sig_key *key; 972 struct tcp_md5sig_key *key;
918 struct tcp_sock *tp = tcp_sk(sk); 973 struct tcp_sock *tp = tcp_sk(sk);
919 struct tcp4_md5sig_key *keys; 974 struct tcp_md5sig_info *md5sig;
920 975
921 key = tcp_v4_md5_do_lookup(sk, addr); 976 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
922 if (key) { 977 if (key) {
923 /* Pre-existing entry - just update that one. */ 978 /* Pre-existing entry - just update that one. */
924 kfree(key->key); 979 memcpy(key->key, newkey, newkeylen);
925 key->key = newkey;
926 key->keylen = newkeylen; 980 key->keylen = newkeylen;
927 } else { 981 return 0;
928 struct tcp_md5sig_info *md5sig; 982 }
929
930 if (!tp->md5sig_info) {
931 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
932 GFP_ATOMIC);
933 if (!tp->md5sig_info) {
934 kfree(newkey);
935 return -ENOMEM;
936 }
937 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
938 }
939 983
940 md5sig = tp->md5sig_info; 984 md5sig = rcu_dereference_protected(tp->md5sig_info,
941 if (md5sig->entries4 == 0 && 985 sock_owned_by_user(sk));
942 tcp_alloc_md5sig_pool(sk) == NULL) { 986 if (!md5sig) {
943 kfree(newkey); 987 md5sig = kmalloc(sizeof(*md5sig), gfp);
988 if (!md5sig)
944 return -ENOMEM; 989 return -ENOMEM;
945 }
946
947 if (md5sig->alloced4 == md5sig->entries4) {
948 keys = kmalloc((sizeof(*keys) *
949 (md5sig->entries4 + 1)), GFP_ATOMIC);
950 if (!keys) {
951 kfree(newkey);
952 if (md5sig->entries4 == 0)
953 tcp_free_md5sig_pool();
954 return -ENOMEM;
955 }
956 990
957 if (md5sig->entries4) 991 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
958 memcpy(keys, md5sig->keys4, 992 INIT_HLIST_HEAD(&md5sig->head);
959 sizeof(*keys) * md5sig->entries4); 993 rcu_assign_pointer(tp->md5sig_info, md5sig);
994 }
960 995
961 /* Free old key list, and reference new one */ 996 key = sock_kmalloc(sk, sizeof(*key), gfp);
962 kfree(md5sig->keys4); 997 if (!key)
963 md5sig->keys4 = keys; 998 return -ENOMEM;
964 md5sig->alloced4++; 999 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
965 } 1000 sock_kfree_s(sk, key, sizeof(*key));
966 md5sig->entries4++; 1001 return -ENOMEM;
967 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
968 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
969 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
970 } 1002 }
971 return 0;
972}
973EXPORT_SYMBOL(tcp_v4_md5_do_add);
974 1003
975static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, 1004 memcpy(key->key, newkey, newkeylen);
976 u8 *newkey, u8 newkeylen) 1005 key->keylen = newkeylen;
977{ 1006 key->family = family;
978 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr, 1007 memcpy(&key->addr, addr,
979 newkey, newkeylen); 1008 (family == AF_INET6) ? sizeof(struct in6_addr) :
1009 sizeof(struct in_addr));
1010 hlist_add_head_rcu(&key->node, &md5sig->head);
1011 return 0;
980} 1012}
1013EXPORT_SYMBOL(tcp_md5_do_add);
981 1014
982int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) 1015int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
983{ 1016{
984 struct tcp_sock *tp = tcp_sk(sk); 1017 struct tcp_sock *tp = tcp_sk(sk);
985 int i; 1018 struct tcp_md5sig_key *key;
986 1019 struct tcp_md5sig_info *md5sig;
987 for (i = 0; i < tp->md5sig_info->entries4; i++) { 1020
988 if (tp->md5sig_info->keys4[i].addr == addr) { 1021 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
989 /* Free the key */ 1022 if (!key)
990 kfree(tp->md5sig_info->keys4[i].base.key); 1023 return -ENOENT;
991 tp->md5sig_info->entries4--; 1024 hlist_del_rcu(&key->node);
992 1025 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
993 if (tp->md5sig_info->entries4 == 0) { 1026 kfree_rcu(key, rcu);
994 kfree(tp->md5sig_info->keys4); 1027 md5sig = rcu_dereference_protected(tp->md5sig_info,
995 tp->md5sig_info->keys4 = NULL; 1028 sock_owned_by_user(sk));
996 tp->md5sig_info->alloced4 = 0; 1029 if (hlist_empty(&md5sig->head))
997 tcp_free_md5sig_pool(); 1030 tcp_free_md5sig_pool();
998 } else if (tp->md5sig_info->entries4 != i) { 1031 return 0;
999 /* Need to do some manipulation */
1000 memmove(&tp->md5sig_info->keys4[i],
1001 &tp->md5sig_info->keys4[i+1],
1002 (tp->md5sig_info->entries4 - i) *
1003 sizeof(struct tcp4_md5sig_key));
1004 }
1005 return 0;
1006 }
1007 }
1008 return -ENOENT;
1009} 1032}
1010EXPORT_SYMBOL(tcp_v4_md5_do_del); 1033EXPORT_SYMBOL(tcp_md5_do_del);
1011 1034
1012static void tcp_v4_clear_md5_list(struct sock *sk) 1035void tcp_clear_md5_list(struct sock *sk)
1013{ 1036{
1014 struct tcp_sock *tp = tcp_sk(sk); 1037 struct tcp_sock *tp = tcp_sk(sk);
1038 struct tcp_md5sig_key *key;
1039 struct hlist_node *pos, *n;
1040 struct tcp_md5sig_info *md5sig;
1015 1041
1016 /* Free each key, then the set of key keys, 1042 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1017 * the crypto element, and then decrement our 1043
1018 * hold on the last resort crypto. 1044 if (!hlist_empty(&md5sig->head))
1019 */
1020 if (tp->md5sig_info->entries4) {
1021 int i;
1022 for (i = 0; i < tp->md5sig_info->entries4; i++)
1023 kfree(tp->md5sig_info->keys4[i].base.key);
1024 tp->md5sig_info->entries4 = 0;
1025 tcp_free_md5sig_pool(); 1045 tcp_free_md5sig_pool();
1026 } 1046 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1027 if (tp->md5sig_info->keys4) { 1047 hlist_del_rcu(&key->node);
1028 kfree(tp->md5sig_info->keys4); 1048 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1029 tp->md5sig_info->keys4 = NULL; 1049 kfree_rcu(key, rcu);
1030 tp->md5sig_info->alloced4 = 0;
1031 } 1050 }
1032} 1051}
1033 1052
@@ -1036,7 +1055,6 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1036{ 1055{
1037 struct tcp_md5sig cmd; 1056 struct tcp_md5sig cmd;
1038 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; 1057 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1039 u8 *newkey;
1040 1058
1041 if (optlen < sizeof(cmd)) 1059 if (optlen < sizeof(cmd))
1042 return -EINVAL; 1060 return -EINVAL;
@@ -1047,32 +1065,16 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1047 if (sin->sin_family != AF_INET) 1065 if (sin->sin_family != AF_INET)
1048 return -EINVAL; 1066 return -EINVAL;
1049 1067
1050 if (!cmd.tcpm_key || !cmd.tcpm_keylen) { 1068 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1051 if (!tcp_sk(sk)->md5sig_info) 1069 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1052 return -ENOENT; 1070 AF_INET);
1053 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1054 }
1055 1071
1056 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 1072 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1057 return -EINVAL; 1073 return -EINVAL;
1058 1074
1059 if (!tcp_sk(sk)->md5sig_info) { 1075 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1060 struct tcp_sock *tp = tcp_sk(sk); 1076 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1061 struct tcp_md5sig_info *p; 1077 GFP_KERNEL);
1062
1063 p = kzalloc(sizeof(*p), sk->sk_allocation);
1064 if (!p)
1065 return -EINVAL;
1066
1067 tp->md5sig_info = p;
1068 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1069 }
1070
1071 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1072 if (!newkey)
1073 return -ENOMEM;
1074 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1075 newkey, cmd.tcpm_keylen);
1076} 1078}
1077 1079
1078static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 1080static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1098,7 +1100,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1098 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); 1100 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1099} 1101}
1100 1102
1101static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 1103static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1102 __be32 daddr, __be32 saddr, const struct tcphdr *th) 1104 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1103{ 1105{
1104 struct tcp_md5sig_pool *hp; 1106 struct tcp_md5sig_pool *hp;
@@ -1198,7 +1200,8 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1198 int genhash; 1200 int genhash;
1199 unsigned char newhash[16]; 1201 unsigned char newhash[16];
1200 1202
1201 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1203 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1204 AF_INET);
1202 hash_location = tcp_parse_md5sig_option(th); 1205 hash_location = tcp_parse_md5sig_option(th);
1203 1206
1204 /* We've parsed the options - do we have a hash? */ 1207 /* We've parsed the options - do we have a hash? */
@@ -1224,10 +1227,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1224 1227
1225 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1228 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1226 if (net_ratelimit()) { 1229 if (net_ratelimit()) {
1227 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", 1230 pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1228 &iph->saddr, ntohs(th->source), 1231 &iph->saddr, ntohs(th->source),
1229 &iph->daddr, ntohs(th->dest), 1232 &iph->daddr, ntohs(th->dest),
1230 genhash ? " tcp_v4_calc_md5_hash failed" : ""); 1233 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1231 } 1234 }
1232 return 1; 1235 return 1;
1233 } 1236 }
@@ -1396,7 +1399,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1396 * to destinations, already remembered 1399 * to destinations, already remembered
1397 * to the moment of synflood. 1400 * to the moment of synflood.
1398 */ 1401 */
1399 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n", 1402 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1400 &saddr, ntohs(tcp_hdr(skb)->source)); 1403 &saddr, ntohs(tcp_hdr(skb)->source));
1401 goto drop_and_release; 1404 goto drop_and_release;
1402 } 1405 }
@@ -1461,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1461 ireq->opt = NULL; 1464 ireq->opt = NULL;
1462 newinet->mc_index = inet_iif(skb); 1465 newinet->mc_index = inet_iif(skb);
1463 newinet->mc_ttl = ip_hdr(skb)->ttl; 1466 newinet->mc_ttl = ip_hdr(skb)->ttl;
1467 newinet->rcv_tos = ip_hdr(skb)->tos;
1464 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1468 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1465 if (inet_opt) 1469 if (inet_opt)
1466 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1470 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -1490,7 +1494,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1490 1494
1491#ifdef CONFIG_TCP_MD5SIG 1495#ifdef CONFIG_TCP_MD5SIG
1492 /* Copy over the MD5 key from the original socket */ 1496 /* Copy over the MD5 key from the original socket */
1493 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr); 1497 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1498 AF_INET);
1494 if (key != NULL) { 1499 if (key != NULL) {
1495 /* 1500 /*
1496 * We're using one, so create a matching key 1501 * We're using one, so create a matching key
@@ -1498,10 +1503,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1498 * memory, then we end up not copying the key 1503 * memory, then we end up not copying the key
1499 * across. Shucks. 1504 * across. Shucks.
1500 */ 1505 */
1501 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1506 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1502 if (newkey != NULL) 1507 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1503 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1504 newkey, key->keylen);
1505 sk_nocaps_add(newsk, NETIF_F_GSO_MASK); 1508 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1506 } 1509 }
1507#endif 1510#endif
@@ -1862,7 +1865,6 @@ EXPORT_SYMBOL(ipv4_specific);
1862static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1865static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1863 .md5_lookup = tcp_v4_md5_lookup, 1866 .md5_lookup = tcp_v4_md5_lookup,
1864 .calc_md5_hash = tcp_v4_md5_hash_skb, 1867 .calc_md5_hash = tcp_v4_md5_hash_skb,
1865 .md5_add = tcp_v4_md5_add_func,
1866 .md5_parse = tcp_v4_parse_md5_keys, 1868 .md5_parse = tcp_v4_parse_md5_keys,
1867}; 1869};
1868#endif 1870#endif
@@ -1951,8 +1953,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
1951#ifdef CONFIG_TCP_MD5SIG 1953#ifdef CONFIG_TCP_MD5SIG
1952 /* Clean up the MD5 key list, if any */ 1954 /* Clean up the MD5 key list, if any */
1953 if (tp->md5sig_info) { 1955 if (tp->md5sig_info) {
1954 tcp_v4_clear_md5_list(sk); 1956 tcp_clear_md5_list(sk);
1955 kfree(tp->md5sig_info); 1957 kfree_rcu(tp->md5sig_info, rcu);
1956 tp->md5sig_info = NULL; 1958 tp->md5sig_info = NULL;
1957 } 1959 }
1958#endif 1960#endif
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 550e755747e0..3cabafb5cdd1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -359,13 +359,11 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
359 */ 359 */
360 do { 360 do {
361 struct tcp_md5sig_key *key; 361 struct tcp_md5sig_key *key;
362 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); 362 tcptw->tw_md5_key = NULL;
363 tcptw->tw_md5_keylen = 0;
364 key = tp->af_specific->md5_lookup(sk, sk); 363 key = tp->af_specific->md5_lookup(sk, sk);
365 if (key != NULL) { 364 if (key != NULL) {
366 memcpy(&tcptw->tw_md5_key, key->key, key->keylen); 365 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
367 tcptw->tw_md5_keylen = key->keylen; 366 if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
368 if (tcp_alloc_md5sig_pool(sk) == NULL)
369 BUG(); 367 BUG();
370 } 368 }
371 } while (0); 369 } while (0);
@@ -405,8 +403,10 @@ void tcp_twsk_destructor(struct sock *sk)
405{ 403{
406#ifdef CONFIG_TCP_MD5SIG 404#ifdef CONFIG_TCP_MD5SIG
407 struct tcp_timewait_sock *twsk = tcp_twsk(sk); 405 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
408 if (twsk->tw_md5_keylen) 406 if (twsk->tw_md5_key) {
409 tcp_free_md5sig_pool(); 407 tcp_free_md5sig_pool();
408 kfree_rcu(twsk->tw_md5_key, rcu);
409 }
410#endif 410#endif
411} 411}
412EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 412EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4ff3b6dc74fc..364784a91939 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2306,8 +2306,10 @@ begin_fwd:
2306 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2306 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2307 continue; 2307 continue;
2308 2308
2309 if (tcp_retransmit_skb(sk, skb)) 2309 if (tcp_retransmit_skb(sk, skb)) {
2310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2310 return; 2311 return;
2312 }
2311 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2313 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2312 2314
2313 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) 2315 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 85ee7eb7e38e..a981cdc0a6e9 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -18,6 +18,8 @@
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/kprobes.h> 24#include <linux/kprobes.h>
23#include <linux/socket.h> 25#include <linux/socket.h>
@@ -239,7 +241,7 @@ static __init int tcpprobe_init(void)
239 if (ret) 241 if (ret)
240 goto err1; 242 goto err1;
241 243
242 pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); 244 pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
243 return 0; 245 return 0;
244 err1: 246 err1:
245 proc_net_remove(&init_net, procname); 247 proc_net_remove(&init_net, procname);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index cd2e0723266d..34d4a02c2f16 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -333,16 +333,18 @@ void tcp_retransmit_timer(struct sock *sk)
333 */ 333 */
334 struct inet_sock *inet = inet_sk(sk); 334 struct inet_sock *inet = inet_sk(sk);
335 if (sk->sk_family == AF_INET) { 335 if (sk->sk_family == AF_INET) {
336 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 336 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
337 &inet->inet_daddr, ntohs(inet->inet_dport), 337 &inet->inet_daddr,
338 inet->inet_num, tp->snd_una, tp->snd_nxt); 338 ntohs(inet->inet_dport), inet->inet_num,
339 tp->snd_una, tp->snd_nxt);
339 } 340 }
340#if IS_ENABLED(CONFIG_IPV6) 341#if IS_ENABLED(CONFIG_IPV6)
341 else if (sk->sk_family == AF_INET6) { 342 else if (sk->sk_family == AF_INET6) {
342 struct ipv6_pinfo *np = inet6_sk(sk); 343 struct ipv6_pinfo *np = inet6_sk(sk);
343 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 344 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
344 &np->daddr, ntohs(inet->inet_dport), 345 &np->daddr,
345 inet->inet_num, tp->snd_una, tp->snd_nxt); 346 ntohs(inet->inet_dport), inet->inet_num,
347 tp->snd_una, tp->snd_nxt);
346 } 348 }
347#endif 349#endif
348 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 350 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 01775983b997..0d0171830620 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -164,12 +164,12 @@ static const struct net_protocol tunnel64_protocol = {
164static int __init tunnel4_init(void) 164static int __init tunnel4_init(void)
165{ 165{
166 if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) { 166 if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) {
167 printk(KERN_ERR "tunnel4 init: can't add protocol\n"); 167 pr_err("%s: can't add protocol\n", __func__);
168 return -EAGAIN; 168 return -EAGAIN;
169 } 169 }
170#if IS_ENABLED(CONFIG_IPV6) 170#if IS_ENABLED(CONFIG_IPV6)
171 if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { 171 if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
172 printk(KERN_ERR "tunnel64 init: can't add protocol\n"); 172 pr_err("tunnel64 init: can't add protocol\n");
173 inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); 173 inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
174 return -EAGAIN; 174 return -EAGAIN;
175 } 175 }
@@ -181,10 +181,10 @@ static void __exit tunnel4_fini(void)
181{ 181{
182#if IS_ENABLED(CONFIG_IPV6) 182#if IS_ENABLED(CONFIG_IPV6)
183 if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) 183 if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
184 printk(KERN_ERR "tunnel64 close: can't remove protocol\n"); 184 pr_err("tunnel64 close: can't remove protocol\n");
185#endif 185#endif
186 if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) 186 if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP))
187 printk(KERN_ERR "tunnel4 close: can't remove protocol\n"); 187 pr_err("tunnel4 close: can't remove protocol\n");
188} 188}
189 189
190module_init(tunnel4_init); 190module_init(tunnel4_init);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5d075b5f70fc..d6f5feeb3eaf 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -77,6 +77,8 @@
77 * 2 of the License, or (at your option) any later version. 77 * 2 of the License, or (at your option) any later version.
78 */ 78 */
79 79
80#define pr_fmt(fmt) "UDP: " fmt
81
80#include <asm/system.h> 82#include <asm/system.h>
81#include <asm/uaccess.h> 83#include <asm/uaccess.h>
82#include <asm/ioctls.h> 84#include <asm/ioctls.h>
@@ -917,7 +919,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
917 if (!saddr) 919 if (!saddr)
918 saddr = inet->mc_addr; 920 saddr = inet->mc_addr;
919 connected = 0; 921 connected = 0;
920 } 922 } else if (!ipc.oif)
923 ipc.oif = inet->uc_index;
921 924
922 if (connected) 925 if (connected)
923 rt = (struct rtable *)sk_dst_check(sk, 0); 926 rt = (struct rtable *)sk_dst_check(sk, 0);
@@ -974,7 +977,7 @@ back_from_confirm:
974 /* ... which is an evident application bug. --ANK */ 977 /* ... which is an evident application bug. --ANK */
975 release_sock(sk); 978 release_sock(sk);
976 979
977 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); 980 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n"));
978 err = -EINVAL; 981 err = -EINVAL;
979 goto out; 982 goto out;
980 } 983 }
@@ -1053,7 +1056,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1053 if (unlikely(!up->pending)) { 1056 if (unlikely(!up->pending)) {
1054 release_sock(sk); 1057 release_sock(sk);
1055 1058
1056 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); 1059 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n"));
1057 return -EINVAL; 1060 return -EINVAL;
1058 } 1061 }
1059 1062
@@ -1166,7 +1169,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1166 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1169 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1167 struct sk_buff *skb; 1170 struct sk_buff *skb;
1168 unsigned int ulen, copied; 1171 unsigned int ulen, copied;
1169 int peeked; 1172 int peeked, off = 0;
1170 int err; 1173 int err;
1171 int is_udplite = IS_UDPLITE(sk); 1174 int is_udplite = IS_UDPLITE(sk);
1172 bool slow; 1175 bool slow;
@@ -1182,7 +1185,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1182 1185
1183try_again: 1186try_again:
1184 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 1187 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
1185 &peeked, &err); 1188 &peeked, &off, &err);
1186 if (!skb) 1189 if (!skb)
1187 goto out; 1190 goto out;
1188 1191
@@ -1446,9 +1449,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1446 * provided by the application." 1449 * provided by the application."
1447 */ 1450 */
1448 if (up->pcrlen == 0) { /* full coverage was set */ 1451 if (up->pcrlen == 0) { /* full coverage was set */
1449 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " 1452 LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n",
1450 "%d while full coverage %d requested\n", 1453 UDP_SKB_CB(skb)->cscov, skb->len);
1451 UDP_SKB_CB(skb)->cscov, skb->len);
1452 goto drop; 1454 goto drop;
1453 } 1455 }
1454 /* The next case involves violating the min. coverage requested 1456 /* The next case involves violating the min. coverage requested
@@ -1458,9 +1460,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1458 * Therefore the above ...()->partial_cov statement is essential. 1460 * Therefore the above ...()->partial_cov statement is essential.
1459 */ 1461 */
1460 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 1462 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
1461 LIMIT_NETDEBUG(KERN_WARNING 1463 LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n",
1462 "UDPLITE: coverage %d too small, need min %d\n", 1464 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1463 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1464 goto drop; 1465 goto drop;
1465 } 1466 }
1466 } 1467 }
@@ -1688,13 +1689,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1688 1689
1689short_packet: 1690short_packet:
1690 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", 1691 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1691 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1692 proto == IPPROTO_UDPLITE ? "Lite" : "",
1692 &saddr, 1693 &saddr, ntohs(uh->source),
1693 ntohs(uh->source), 1694 ulen, skb->len,
1694 ulen, 1695 &daddr, ntohs(uh->dest));
1695 skb->len,
1696 &daddr,
1697 ntohs(uh->dest));
1698 goto drop; 1696 goto drop;
1699 1697
1700csum_error: 1698csum_error:
@@ -1703,11 +1701,8 @@ csum_error:
1703 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1701 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1704 */ 1702 */
1705 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", 1703 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1706 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1704 proto == IPPROTO_UDPLITE ? "Lite" : "",
1707 &saddr, 1705 &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
1708 ntohs(uh->source),
1709 &daddr,
1710 ntohs(uh->dest),
1711 ulen); 1706 ulen);
1712drop: 1707drop:
1713 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1708 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 12e9499a1a6c..2c46acd4cc36 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -10,6 +10,9 @@
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13
14#define pr_fmt(fmt) "UDPLite: " fmt
15
13#include <linux/export.h> 16#include <linux/export.h>
14#include "udp_impl.h" 17#include "udp_impl.h"
15 18
@@ -129,11 +132,11 @@ void __init udplite4_register(void)
129 inet_register_protosw(&udplite4_protosw); 132 inet_register_protosw(&udplite4_protosw);
130 133
131 if (udplite4_proc_init()) 134 if (udplite4_proc_init())
132 printk(KERN_ERR "%s: Cannot register /proc!\n", __func__); 135 pr_err("%s: Cannot register /proc!\n", __func__);
133 return; 136 return;
134 137
135out_unregister_proto: 138out_unregister_proto:
136 proto_unregister(&udplite_prot); 139 proto_unregister(&udplite_prot);
137out_register_err: 140out_register_err:
138 printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); 141 pr_crit("%s: Cannot add UDP-Lite protocol\n", __func__);
139} 142}
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 9247d9d70e9d..05a5df2febc9 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -3,6 +3,8 @@
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#define pr_fmt(fmt) "IPsec: " fmt
7
6#include <linux/skbuff.h> 8#include <linux/skbuff.h>
7#include <linux/module.h> 9#include <linux/module.h>
8#include <linux/mutex.h> 10#include <linux/mutex.h>
@@ -75,18 +77,18 @@ static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
75static int __init ipip_init(void) 77static int __init ipip_init(void)
76{ 78{
77 if (xfrm_register_type(&ipip_type, AF_INET) < 0) { 79 if (xfrm_register_type(&ipip_type, AF_INET) < 0) {
78 printk(KERN_INFO "ipip init: can't add xfrm type\n"); 80 pr_info("%s: can't add xfrm type\n", __func__);
79 return -EAGAIN; 81 return -EAGAIN;
80 } 82 }
81 83
82 if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) { 84 if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) {
83 printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET\n"); 85 pr_info("%s: can't add xfrm handler for AF_INET\n", __func__);
84 xfrm_unregister_type(&ipip_type, AF_INET); 86 xfrm_unregister_type(&ipip_type, AF_INET);
85 return -EAGAIN; 87 return -EAGAIN;
86 } 88 }
87#if IS_ENABLED(CONFIG_IPV6) 89#if IS_ENABLED(CONFIG_IPV6)
88 if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { 90 if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
89 printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n"); 91 pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__);
90 xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); 92 xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
91 xfrm_unregister_type(&ipip_type, AF_INET); 93 xfrm_unregister_type(&ipip_type, AF_INET);
92 return -EAGAIN; 94 return -EAGAIN;
@@ -99,12 +101,14 @@ static void __exit ipip_fini(void)
99{ 101{
100#if IS_ENABLED(CONFIG_IPV6) 102#if IS_ENABLED(CONFIG_IPV6)
101 if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) 103 if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
102 printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n"); 104 pr_info("%s: can't remove xfrm handler for AF_INET6\n",
105 __func__);
103#endif 106#endif
104 if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET)) 107 if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET))
105 printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET\n"); 108 pr_info("%s: can't remove xfrm handler for AF_INET\n",
109 __func__);
106 if (xfrm_unregister_type(&ipip_type, AF_INET) < 0) 110 if (xfrm_unregister_type(&ipip_type, AF_INET) < 0)
107 printk(KERN_INFO "ipip close: can't remove xfrm type\n"); 111 pr_info("%s: can't remove xfrm type\n", __func__);
108} 112}
109 113
110module_init(ipip_init); 114module_init(ipip_init);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6b8ebc5da0e1..6a3bb6077e19 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -435,7 +435,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 435 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
436 436
437 /* Join all-router multicast group if forwarding is set */ 437 /* Join all-router multicast group if forwarding is set */
438 if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST)) 438 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
439 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 439 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
440 440
441 return ndev; 441 return ndev;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 273f48d1df2e..5605f9dca87e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -214,6 +214,7 @@ lookup_protocol:
214 inet->mc_ttl = 1; 214 inet->mc_ttl = 1;
215 inet->mc_index = 0; 215 inet->mc_index = 0;
216 inet->mc_list = NULL; 216 inet->mc_list = NULL;
217 inet->rcv_tos = 0;
217 218
218 if (ipv4_config.no_pmtu_disc) 219 if (ipv4_config.no_pmtu_disc)
219 inet->pmtudisc = IP_PMTUDISC_DONT; 220 inet->pmtudisc = IP_PMTUDISC_DONT;
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 59402b4637f9..db00d27ffb16 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -211,35 +211,6 @@ void ipv6_sock_ac_close(struct sock *sk)
211 rcu_read_unlock(); 211 rcu_read_unlock();
212} 212}
213 213
214#if 0
215/* The function is not used, which is funny. Apparently, author
216 * supposed to use it to filter out datagrams inside udp/raw but forgot.
217 *
218 * It is OK, anycasts are not special comparing to delivery to unicasts.
219 */
220
221int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex)
222{
223 struct ipv6_ac_socklist *pac;
224 struct ipv6_pinfo *np = inet6_sk(sk);
225 int found;
226
227 found = 0;
228 read_lock(&ipv6_sk_ac_lock);
229 for (pac=np->ipv6_ac_list; pac; pac=pac->acl_next) {
230 if (ifindex && pac->acl_ifindex != ifindex)
231 continue;
232 found = ipv6_addr_equal(&pac->acl_addr, addr);
233 if (found)
234 break;
235 }
236 read_unlock(&ipv6_sk_ac_lock);
237
238 return found;
239}
240
241#endif
242
243static void aca_put(struct ifacaddr6 *ac) 214static void aca_put(struct ifacaddr6 *ac)
244{ 215{
245 if (atomic_dec_and_test(&ac->aca_refcnt)) { 216 if (atomic_dec_and_test(&ac->aca_refcnt)) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 251e7cd75e89..76832c8dc89d 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -485,7 +485,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
485 } 485 }
486 486
487 if (np->rxopt.bits.rxtclass) { 487 if (np->rxopt.bits.rxtclass) {
488 int tclass = (ntohl(*(__be32 *)ipv6_hdr(skb)) >> 20) & 0xff; 488 int tclass = ipv6_tclass(ipv6_hdr(skb));
489 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); 489 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
490 } 490 }
491 491
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 01d46bff63c3..af88934e4d79 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -468,6 +468,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
468 468
469 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 469 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
470 fl6.flowi6_oif = np->mcast_oif; 470 fl6.flowi6_oif = np->mcast_oif;
471 else if (!fl6.flowi6_oif)
472 fl6.flowi6_oif = np->ucast_oif;
471 473
472 dst = icmpv6_route_lookup(net, skb, sk, &fl6); 474 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
473 if (IS_ERR(dst)) 475 if (IS_ERR(dst))
@@ -553,6 +555,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
553 555
554 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 556 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
555 fl6.flowi6_oif = np->mcast_oif; 557 fl6.flowi6_oif = np->mcast_oif;
558 else if (!fl6.flowi6_oif)
559 fl6.flowi6_oif = np->ucast_oif;
556 560
557 err = ip6_dst_lookup(sk, &dst, &fl6); 561 err = ip6_dst_lookup(sk, &dst, &fl6);
558 if (err) 562 if (err)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b82bcde53f7a..5b27fbcae346 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1552,11 +1552,20 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1552 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { 1552 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
1553 RT6_TRACE("aging clone %p\n", rt); 1553 RT6_TRACE("aging clone %p\n", rt);
1554 return -1; 1554 return -1;
1555 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1555 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1556 (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) { 1556 struct neighbour *neigh;
1557 RT6_TRACE("purging route %p via non-router but gateway\n", 1557 __u8 neigh_flags = 0;
1558 rt); 1558
1559 return -1; 1559 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1560 if (neigh) {
1561 neigh_flags = neigh->flags;
1562 neigh_release(neigh);
1563 }
1564 if (neigh_flags & NTF_ROUTER) {
1565 RT6_TRACE("purging route %p via non-router but gateway\n",
1566 rt);
1567 return -1;
1568 }
1560 } 1569 }
1561 gc_args.more++; 1570 gc_args.more++;
1562 } 1571 }
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d97e07183ce9..b7ca46161cb9 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -388,7 +388,6 @@ int ip6_forward(struct sk_buff *skb)
388 struct ipv6hdr *hdr = ipv6_hdr(skb); 388 struct ipv6hdr *hdr = ipv6_hdr(skb);
389 struct inet6_skb_parm *opt = IP6CB(skb); 389 struct inet6_skb_parm *opt = IP6CB(skb);
390 struct net *net = dev_net(dst->dev); 390 struct net *net = dev_net(dst->dev);
391 struct neighbour *n;
392 u32 mtu; 391 u32 mtu;
393 392
394 if (net->ipv6.devconf_all->forwarding == 0) 393 if (net->ipv6.devconf_all->forwarding == 0)
@@ -463,8 +462,7 @@ int ip6_forward(struct sk_buff *skb)
463 send redirects to source routed frames. 462 send redirects to source routed frames.
464 We don't send redirects to frames decapsulated from IPsec. 463 We don't send redirects to frames decapsulated from IPsec.
465 */ 464 */
466 n = dst_get_neighbour_noref(dst); 465 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
467 if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
468 struct in6_addr *target = NULL; 466 struct in6_addr *target = NULL;
469 struct rt6_info *rt; 467 struct rt6_info *rt;
470 468
@@ -474,8 +472,8 @@ int ip6_forward(struct sk_buff *skb)
474 */ 472 */
475 473
476 rt = (struct rt6_info *) dst; 474 rt = (struct rt6_info *) dst;
477 if ((rt->rt6i_flags & RTF_GATEWAY)) 475 if (rt->rt6i_flags & RTF_GATEWAY)
478 target = (struct in6_addr*)&n->primary_key; 476 target = &rt->rt6i_gateway;
479 else 477 else
480 target = &hdr->daddr; 478 target = &hdr->daddr;
481 479
@@ -486,7 +484,7 @@ int ip6_forward(struct sk_buff *skb)
486 and by source (inside ndisc_send_redirect) 484 and by source (inside ndisc_send_redirect)
487 */ 485 */
488 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) 486 if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
489 ndisc_send_redirect(skb, n, target); 487 ndisc_send_redirect(skb, target);
490 } else { 488 } else {
491 int addrtype = ipv6_addr_type(&hdr->saddr); 489 int addrtype = ipv6_addr_type(&hdr->saddr);
492 490
@@ -1416,8 +1414,9 @@ alloc_new_skb:
1416 */ 1414 */
1417 skb->ip_summed = csummode; 1415 skb->ip_summed = csummode;
1418 skb->csum = 0; 1416 skb->csum = 0;
1419 /* reserve for fragmentation */ 1417 /* reserve for fragmentation and ipsec header */
1420 skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); 1418 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1419 dst_exthdrlen);
1421 1420
1422 if (sk->sk_type == SOCK_DGRAM) 1421 if (sk->sk_type == SOCK_DGRAM)
1423 skb_shinfo(skb)->tx_flags = tx_flags; 1422 skb_shinfo(skb)->tx_flags = tx_flags;
@@ -1425,9 +1424,9 @@ alloc_new_skb:
1425 /* 1424 /*
1426 * Find where to start putting bytes 1425 * Find where to start putting bytes
1427 */ 1426 */
1428 data = skb_put(skb, fraglen + dst_exthdrlen); 1427 data = skb_put(skb, fraglen);
1429 skb_set_network_header(skb, exthdrlen + dst_exthdrlen); 1428 skb_set_network_header(skb, exthdrlen);
1430 data += fragheaderlen + dst_exthdrlen; 1429 data += fragheaderlen;
1431 skb->transport_header = (skb->network_header + 1430 skb->transport_header = (skb->network_header +
1432 fragheaderlen); 1431 fragheaderlen);
1433 if (fraggap) { 1432 if (fraggap) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 18a2719003c3..63dd1f89ed7d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -516,6 +516,36 @@ done:
516 retv = 0; 516 retv = 0;
517 break; 517 break;
518 518
519 case IPV6_UNICAST_IF:
520 {
521 struct net_device *dev = NULL;
522 int ifindex;
523
524 if (optlen != sizeof(int))
525 goto e_inval;
526
527 ifindex = (__force int)ntohl((__force __be32)val);
528 if (ifindex == 0) {
529 np->ucast_oif = 0;
530 retv = 0;
531 break;
532 }
533
534 dev = dev_get_by_index(net, ifindex);
535 retv = -EADDRNOTAVAIL;
536 if (!dev)
537 break;
538 dev_put(dev);
539
540 retv = -EINVAL;
541 if (sk->sk_bound_dev_if)
542 break;
543
544 np->ucast_oif = ifindex;
545 retv = 0;
546 break;
547 }
548
519 case IPV6_MULTICAST_IF: 549 case IPV6_MULTICAST_IF:
520 if (sk->sk_type == SOCK_STREAM) 550 if (sk->sk_type == SOCK_STREAM)
521 break; 551 break;
@@ -987,6 +1017,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
987 int hlim = np->mcast_hops; 1017 int hlim = np->mcast_hops;
988 put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim); 1018 put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
989 } 1019 }
1020 if (np->rxopt.bits.rxtclass) {
1021 int tclass = np->rcv_tclass;
1022 put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
1023 }
990 if (np->rxopt.bits.rxoinfo) { 1024 if (np->rxopt.bits.rxoinfo) {
991 struct in6_pktinfo src_info; 1025 struct in6_pktinfo src_info;
992 src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : 1026 src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
@@ -1160,6 +1194,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1160 val = np->mcast_oif; 1194 val = np->mcast_oif;
1161 break; 1195 break;
1162 1196
1197 case IPV6_UNICAST_IF:
1198 val = (__force int)htonl((__u32) np->ucast_oif);
1199 break;
1200
1163 case IPV6_MTU_DISCOVER: 1201 case IPV6_MTU_DISCOVER:
1164 val = np->pmtudisc; 1202 val = np->pmtudisc;
1165 break; 1203 break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c964958ac470..3dcdb81ec3e8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1223,11 +1223,17 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1223 1223
1224 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); 1224 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
1225 1225
1226 if (rt) 1226 if (rt) {
1227 neigh = dst_get_neighbour_noref(&rt->dst); 1227 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
1228 1228 if (!neigh) {
1229 ND_PRINTK0(KERN_ERR
1230 "ICMPv6 RA: %s() got default router without neighbour.\n",
1231 __func__);
1232 dst_release(&rt->dst);
1233 return;
1234 }
1235 }
1229 if (rt && lifetime == 0) { 1236 if (rt && lifetime == 0) {
1230 neigh_clone(neigh);
1231 ip6_del_rt(rt); 1237 ip6_del_rt(rt);
1232 rt = NULL; 1238 rt = NULL;
1233 } 1239 }
@@ -1244,7 +1250,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1244 return; 1250 return;
1245 } 1251 }
1246 1252
1247 neigh = dst_get_neighbour_noref(&rt->dst); 1253 neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
1248 if (neigh == NULL) { 1254 if (neigh == NULL) {
1249 ND_PRINTK0(KERN_ERR 1255 ND_PRINTK0(KERN_ERR
1250 "ICMPv6 RA: %s() got default router without neighbour.\n", 1256 "ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1411,7 +1417,7 @@ skip_routeinfo:
1411out: 1417out:
1412 if (rt) 1418 if (rt)
1413 dst_release(&rt->dst); 1419 dst_release(&rt->dst);
1414 else if (neigh) 1420 if (neigh)
1415 neigh_release(neigh); 1421 neigh_release(neigh);
1416} 1422}
1417 1423
@@ -1506,8 +1512,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1506 } 1512 }
1507} 1513}
1508 1514
1509void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, 1515void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1510 const struct in6_addr *target)
1511{ 1516{
1512 struct net_device *dev = skb->dev; 1517 struct net_device *dev = skb->dev;
1513 struct net *net = dev_net(dev); 1518 struct net *net = dev_net(dev);
@@ -1566,6 +1571,13 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1566 goto release; 1571 goto release;
1567 1572
1568 if (dev->addr_len) { 1573 if (dev->addr_len) {
1574 struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
1575 if (!neigh) {
1576 ND_PRINTK2(KERN_WARNING
1577 "ICMPv6 Redirect: no neigh for target address\n");
1578 goto release;
1579 }
1580
1569 read_lock_bh(&neigh->lock); 1581 read_lock_bh(&neigh->lock);
1570 if (neigh->nud_state & NUD_VALID) { 1582 if (neigh->nud_state & NUD_VALID) {
1571 memcpy(ha_buf, neigh->ha, dev->addr_len); 1583 memcpy(ha_buf, neigh->ha, dev->addr_len);
@@ -1574,6 +1586,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1574 len += ndisc_opt_addr_space(dev); 1586 len += ndisc_opt_addr_space(dev);
1575 } else 1587 } else
1576 read_unlock_bh(&neigh->lock); 1588 read_unlock_bh(&neigh->lock);
1589
1590 neigh_release(neigh);
1577 } 1591 }
1578 1592
1579 rd_len = min_t(unsigned int, 1593 rd_len = min_t(unsigned int,
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 9a68fb5b9e77..d33cddd16fbb 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -154,15 +154,6 @@ config IP6_NF_TARGET_HL
154 (e.g. when running oldconfig). It selects 154 (e.g. when running oldconfig). It selects
155 CONFIG_NETFILTER_XT_TARGET_HL. 155 CONFIG_NETFILTER_XT_TARGET_HL.
156 156
157config IP6_NF_TARGET_LOG
158 tristate "LOG target support"
159 default m if NETFILTER_ADVANCED=n
160 help
161 This option adds a `LOG' target, which allows you to create rules in
162 any iptables table which records the packet header to the syslog.
163
164 To compile it as a module, choose M here. If unsure, say N.
165
166config IP6_NF_FILTER 157config IP6_NF_FILTER
167 tristate "Packet filtering" 158 tristate "Packet filtering"
168 default m if NETFILTER_ADVANCED=n 159 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2eaed96db02c..d4dfd0a21097 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -31,5 +31,4 @@ obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
31obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 31obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
32 32
33# targets 33# targets
34obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
35obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 34obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
deleted file mode 100644
index e6af8d72f26b..000000000000
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ /dev/null
@@ -1,527 +0,0 @@
1/*
2 * This is a module which is used for logging packets.
3 */
4
5/* (C) 2001 Jan Rekorajski <baggins@pld.org.pl>
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/if_arp.h>
16#include <linux/ip.h>
17#include <linux/spinlock.h>
18#include <linux/icmpv6.h>
19#include <net/udp.h>
20#include <net/tcp.h>
21#include <net/ipv6.h>
22#include <linux/netfilter.h>
23#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter_ipv6/ip6_tables.h>
25#include <net/netfilter/nf_log.h>
26#include <net/netfilter/xt_log.h>
27
28MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
29MODULE_DESCRIPTION("Xtables: IPv6 packet logging to syslog");
30MODULE_LICENSE("GPL");
31
32struct in_device;
33#include <net/route.h>
34#include <linux/netfilter_ipv6/ip6t_LOG.h>
35
36/* One level of recursion won't kill us */
37static void dump_packet(struct sbuff *m,
38 const struct nf_loginfo *info,
39 const struct sk_buff *skb, unsigned int ip6hoff,
40 int recurse)
41{
42 u_int8_t currenthdr;
43 int fragment;
44 struct ipv6hdr _ip6h;
45 const struct ipv6hdr *ih;
46 unsigned int ptr;
47 unsigned int hdrlen = 0;
48 unsigned int logflags;
49
50 if (info->type == NF_LOG_TYPE_LOG)
51 logflags = info->u.log.logflags;
52 else
53 logflags = NF_LOG_MASK;
54
55 ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
56 if (ih == NULL) {
57 sb_add(m, "TRUNCATED");
58 return;
59 }
60
61 /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
62 sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
63
64 /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
65 sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
66 ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
67 (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
68 ih->hop_limit,
69 (ntohl(*(__be32 *)ih) & 0x000fffff));
70
71 fragment = 0;
72 ptr = ip6hoff + sizeof(struct ipv6hdr);
73 currenthdr = ih->nexthdr;
74 while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
75 struct ipv6_opt_hdr _hdr;
76 const struct ipv6_opt_hdr *hp;
77
78 hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
79 if (hp == NULL) {
80 sb_add(m, "TRUNCATED");
81 return;
82 }
83
84 /* Max length: 48 "OPT (...) " */
85 if (logflags & IP6T_LOG_IPOPT)
86 sb_add(m, "OPT ( ");
87
88 switch (currenthdr) {
89 case IPPROTO_FRAGMENT: {
90 struct frag_hdr _fhdr;
91 const struct frag_hdr *fh;
92
93 sb_add(m, "FRAG:");
94 fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
95 &_fhdr);
96 if (fh == NULL) {
97 sb_add(m, "TRUNCATED ");
98 return;
99 }
100
101 /* Max length: 6 "65535 " */
102 sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
103
104 /* Max length: 11 "INCOMPLETE " */
105 if (fh->frag_off & htons(0x0001))
106 sb_add(m, "INCOMPLETE ");
107
108 sb_add(m, "ID:%08x ", ntohl(fh->identification));
109
110 if (ntohs(fh->frag_off) & 0xFFF8)
111 fragment = 1;
112
113 hdrlen = 8;
114
115 break;
116 }
117 case IPPROTO_DSTOPTS:
118 case IPPROTO_ROUTING:
119 case IPPROTO_HOPOPTS:
120 if (fragment) {
121 if (logflags & IP6T_LOG_IPOPT)
122 sb_add(m, ")");
123 return;
124 }
125 hdrlen = ipv6_optlen(hp);
126 break;
127 /* Max Length */
128 case IPPROTO_AH:
129 if (logflags & IP6T_LOG_IPOPT) {
130 struct ip_auth_hdr _ahdr;
131 const struct ip_auth_hdr *ah;
132
133 /* Max length: 3 "AH " */
134 sb_add(m, "AH ");
135
136 if (fragment) {
137 sb_add(m, ")");
138 return;
139 }
140
141 ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
142 &_ahdr);
143 if (ah == NULL) {
144 /*
145 * Max length: 26 "INCOMPLETE [65535
146 * bytes] )"
147 */
148 sb_add(m, "INCOMPLETE [%u bytes] )",
149 skb->len - ptr);
150 return;
151 }
152
153 /* Length: 15 "SPI=0xF1234567 */
154 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
155
156 }
157
158 hdrlen = (hp->hdrlen+2)<<2;
159 break;
160 case IPPROTO_ESP:
161 if (logflags & IP6T_LOG_IPOPT) {
162 struct ip_esp_hdr _esph;
163 const struct ip_esp_hdr *eh;
164
165 /* Max length: 4 "ESP " */
166 sb_add(m, "ESP ");
167
168 if (fragment) {
169 sb_add(m, ")");
170 return;
171 }
172
173 /*
174 * Max length: 26 "INCOMPLETE [65535 bytes] )"
175 */
176 eh = skb_header_pointer(skb, ptr, sizeof(_esph),
177 &_esph);
178 if (eh == NULL) {
179 sb_add(m, "INCOMPLETE [%u bytes] )",
180 skb->len - ptr);
181 return;
182 }
183
184 /* Length: 16 "SPI=0xF1234567 )" */
185 sb_add(m, "SPI=0x%x )", ntohl(eh->spi) );
186
187 }
188 return;
189 default:
190 /* Max length: 20 "Unknown Ext Hdr 255" */
191 sb_add(m, "Unknown Ext Hdr %u", currenthdr);
192 return;
193 }
194 if (logflags & IP6T_LOG_IPOPT)
195 sb_add(m, ") ");
196
197 currenthdr = hp->nexthdr;
198 ptr += hdrlen;
199 }
200
201 switch (currenthdr) {
202 case IPPROTO_TCP: {
203 struct tcphdr _tcph;
204 const struct tcphdr *th;
205
206 /* Max length: 10 "PROTO=TCP " */
207 sb_add(m, "PROTO=TCP ");
208
209 if (fragment)
210 break;
211
212 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
213 th = skb_header_pointer(skb, ptr, sizeof(_tcph), &_tcph);
214 if (th == NULL) {
215 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
216 return;
217 }
218
219 /* Max length: 20 "SPT=65535 DPT=65535 " */
220 sb_add(m, "SPT=%u DPT=%u ",
221 ntohs(th->source), ntohs(th->dest));
222 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
223 if (logflags & IP6T_LOG_TCPSEQ)
224 sb_add(m, "SEQ=%u ACK=%u ",
225 ntohl(th->seq), ntohl(th->ack_seq));
226 /* Max length: 13 "WINDOW=65535 " */
227 sb_add(m, "WINDOW=%u ", ntohs(th->window));
228 /* Max length: 9 "RES=0x3C " */
229 sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
230 /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
231 if (th->cwr)
232 sb_add(m, "CWR ");
233 if (th->ece)
234 sb_add(m, "ECE ");
235 if (th->urg)
236 sb_add(m, "URG ");
237 if (th->ack)
238 sb_add(m, "ACK ");
239 if (th->psh)
240 sb_add(m, "PSH ");
241 if (th->rst)
242 sb_add(m, "RST ");
243 if (th->syn)
244 sb_add(m, "SYN ");
245 if (th->fin)
246 sb_add(m, "FIN ");
247 /* Max length: 11 "URGP=65535 " */
248 sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
249
250 if ((logflags & IP6T_LOG_TCPOPT) &&
251 th->doff * 4 > sizeof(struct tcphdr)) {
252 u_int8_t _opt[60 - sizeof(struct tcphdr)];
253 const u_int8_t *op;
254 unsigned int i;
255 unsigned int optsize = th->doff * 4
256 - sizeof(struct tcphdr);
257
258 op = skb_header_pointer(skb,
259 ptr + sizeof(struct tcphdr),
260 optsize, _opt);
261 if (op == NULL) {
262 sb_add(m, "OPT (TRUNCATED)");
263 return;
264 }
265
266 /* Max length: 127 "OPT (" 15*4*2chars ") " */
267 sb_add(m, "OPT (");
268 for (i =0; i < optsize; i++)
269 sb_add(m, "%02X", op[i]);
270 sb_add(m, ") ");
271 }
272 break;
273 }
274 case IPPROTO_UDP:
275 case IPPROTO_UDPLITE: {
276 struct udphdr _udph;
277 const struct udphdr *uh;
278
279 if (currenthdr == IPPROTO_UDP)
280 /* Max length: 10 "PROTO=UDP " */
281 sb_add(m, "PROTO=UDP " );
282 else /* Max length: 14 "PROTO=UDPLITE " */
283 sb_add(m, "PROTO=UDPLITE ");
284
285 if (fragment)
286 break;
287
288 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
289 uh = skb_header_pointer(skb, ptr, sizeof(_udph), &_udph);
290 if (uh == NULL) {
291 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
292 return;
293 }
294
295 /* Max length: 20 "SPT=65535 DPT=65535 " */
296 sb_add(m, "SPT=%u DPT=%u LEN=%u ",
297 ntohs(uh->source), ntohs(uh->dest),
298 ntohs(uh->len));
299 break;
300 }
301 case IPPROTO_ICMPV6: {
302 struct icmp6hdr _icmp6h;
303 const struct icmp6hdr *ic;
304
305 /* Max length: 13 "PROTO=ICMPv6 " */
306 sb_add(m, "PROTO=ICMPv6 ");
307
308 if (fragment)
309 break;
310
311 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
312 ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
313 if (ic == NULL) {
314 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
315 return;
316 }
317
318 /* Max length: 18 "TYPE=255 CODE=255 " */
319 sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
320
321 switch (ic->icmp6_type) {
322 case ICMPV6_ECHO_REQUEST:
323 case ICMPV6_ECHO_REPLY:
324 /* Max length: 19 "ID=65535 SEQ=65535 " */
325 sb_add(m, "ID=%u SEQ=%u ",
326 ntohs(ic->icmp6_identifier),
327 ntohs(ic->icmp6_sequence));
328 break;
329 case ICMPV6_MGM_QUERY:
330 case ICMPV6_MGM_REPORT:
331 case ICMPV6_MGM_REDUCTION:
332 break;
333
334 case ICMPV6_PARAMPROB:
335 /* Max length: 17 "POINTER=ffffffff " */
336 sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
337 /* Fall through */
338 case ICMPV6_DEST_UNREACH:
339 case ICMPV6_PKT_TOOBIG:
340 case ICMPV6_TIME_EXCEED:
341 /* Max length: 3+maxlen */
342 if (recurse) {
343 sb_add(m, "[");
344 dump_packet(m, info, skb,
345 ptr + sizeof(_icmp6h), 0);
346 sb_add(m, "] ");
347 }
348
349 /* Max length: 10 "MTU=65535 " */
350 if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
351 sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
352 }
353 break;
354 }
355 /* Max length: 10 "PROTO=255 " */
356 default:
357 sb_add(m, "PROTO=%u ", currenthdr);
358 }
359
360 /* Max length: 15 "UID=4294967295 " */
361 if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
362 read_lock_bh(&skb->sk->sk_callback_lock);
363 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
364 sb_add(m, "UID=%u GID=%u ",
365 skb->sk->sk_socket->file->f_cred->fsuid,
366 skb->sk->sk_socket->file->f_cred->fsgid);
367 read_unlock_bh(&skb->sk->sk_callback_lock);
368 }
369
370 /* Max length: 16 "MARK=0xFFFFFFFF " */
371 if (!recurse && skb->mark)
372 sb_add(m, "MARK=0x%x ", skb->mark);
373}
374
375static void dump_mac_header(struct sbuff *m,
376 const struct nf_loginfo *info,
377 const struct sk_buff *skb)
378{
379 struct net_device *dev = skb->dev;
380 unsigned int logflags = 0;
381
382 if (info->type == NF_LOG_TYPE_LOG)
383 logflags = info->u.log.logflags;
384
385 if (!(logflags & IP6T_LOG_MACDECODE))
386 goto fallback;
387
388 switch (dev->type) {
389 case ARPHRD_ETHER:
390 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
391 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
392 ntohs(eth_hdr(skb)->h_proto));
393 return;
394 default:
395 break;
396 }
397
398fallback:
399 sb_add(m, "MAC=");
400 if (dev->hard_header_len &&
401 skb->mac_header != skb->network_header) {
402 const unsigned char *p = skb_mac_header(skb);
403 unsigned int len = dev->hard_header_len;
404 unsigned int i;
405
406 if (dev->type == ARPHRD_SIT &&
407 (p -= ETH_HLEN) < skb->head)
408 p = NULL;
409
410 if (p != NULL) {
411 sb_add(m, "%02x", *p++);
412 for (i = 1; i < len; i++)
413 sb_add(m, ":%02x", *p++);
414 }
415 sb_add(m, " ");
416
417 if (dev->type == ARPHRD_SIT) {
418 const struct iphdr *iph =
419 (struct iphdr *)skb_mac_header(skb);
420 sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
421 }
422 } else
423 sb_add(m, " ");
424}
425
426static struct nf_loginfo default_loginfo = {
427 .type = NF_LOG_TYPE_LOG,
428 .u = {
429 .log = {
430 .level = 5,
431 .logflags = NF_LOG_MASK,
432 },
433 },
434};
435
436static void
437ip6t_log_packet(u_int8_t pf,
438 unsigned int hooknum,
439 const struct sk_buff *skb,
440 const struct net_device *in,
441 const struct net_device *out,
442 const struct nf_loginfo *loginfo,
443 const char *prefix)
444{
445 struct sbuff *m = sb_open();
446
447 if (!loginfo)
448 loginfo = &default_loginfo;
449
450 sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
451 prefix,
452 in ? in->name : "",
453 out ? out->name : "");
454
455 if (in != NULL)
456 dump_mac_header(m, loginfo, skb);
457
458 dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
459
460 sb_close(m);
461}
462
463static unsigned int
464log_tg6(struct sk_buff *skb, const struct xt_action_param *par)
465{
466 const struct ip6t_log_info *loginfo = par->targinfo;
467 struct nf_loginfo li;
468
469 li.type = NF_LOG_TYPE_LOG;
470 li.u.log.level = loginfo->level;
471 li.u.log.logflags = loginfo->logflags;
472
473 ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out,
474 &li, loginfo->prefix);
475 return XT_CONTINUE;
476}
477
478
479static int log_tg6_check(const struct xt_tgchk_param *par)
480{
481 const struct ip6t_log_info *loginfo = par->targinfo;
482
483 if (loginfo->level >= 8) {
484 pr_debug("level %u >= 8\n", loginfo->level);
485 return -EINVAL;
486 }
487 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
488 pr_debug("prefix not null-terminated\n");
489 return -EINVAL;
490 }
491 return 0;
492}
493
494static struct xt_target log_tg6_reg __read_mostly = {
495 .name = "LOG",
496 .family = NFPROTO_IPV6,
497 .target = log_tg6,
498 .targetsize = sizeof(struct ip6t_log_info),
499 .checkentry = log_tg6_check,
500 .me = THIS_MODULE,
501};
502
503static struct nf_logger ip6t_logger __read_mostly = {
504 .name = "ip6t_LOG",
505 .logfn = &ip6t_log_packet,
506 .me = THIS_MODULE,
507};
508
509static int __init log_tg6_init(void)
510{
511 int ret;
512
513 ret = xt_register_target(&log_tg6_reg);
514 if (ret < 0)
515 return ret;
516 nf_log_register(NFPROTO_IPV6, &ip6t_logger);
517 return 0;
518}
519
520static void __exit log_tg6_exit(void)
521{
522 nf_log_unregister(&ip6t_logger);
523 xt_unregister_target(&log_tg6_reg);
524}
525
526module_init(log_tg6_init);
527module_exit(log_tg6_exit);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 7c05e7eacbc6..92cc9f2931ae 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -88,25 +88,31 @@ static int icmpv6_print_tuple(struct seq_file *s,
88 ntohs(tuple->src.u.icmp.id)); 88 ntohs(tuple->src.u.icmp.id));
89} 89}
90 90
91static unsigned int *icmpv6_get_timeouts(struct net *net)
92{
93 return &nf_ct_icmpv6_timeout;
94}
95
91/* Returns verdict for packet, or -1 for invalid. */ 96/* Returns verdict for packet, or -1 for invalid. */
92static int icmpv6_packet(struct nf_conn *ct, 97static int icmpv6_packet(struct nf_conn *ct,
93 const struct sk_buff *skb, 98 const struct sk_buff *skb,
94 unsigned int dataoff, 99 unsigned int dataoff,
95 enum ip_conntrack_info ctinfo, 100 enum ip_conntrack_info ctinfo,
96 u_int8_t pf, 101 u_int8_t pf,
97 unsigned int hooknum) 102 unsigned int hooknum,
103 unsigned int *timeout)
98{ 104{
99 /* Do not immediately delete the connection after the first 105 /* Do not immediately delete the connection after the first
100 successful reply to avoid excessive conntrackd traffic 106 successful reply to avoid excessive conntrackd traffic
101 and also to handle correctly ICMP echo reply duplicates. */ 107 and also to handle correctly ICMP echo reply duplicates. */
102 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout); 108 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
103 109
104 return NF_ACCEPT; 110 return NF_ACCEPT;
105} 111}
106 112
107/* Called when a new connection for this protocol found. */ 113/* Called when a new connection for this protocol found. */
108static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, 114static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
109 unsigned int dataoff) 115 unsigned int dataoff, unsigned int *timeouts)
110{ 116{
111 static const u_int8_t valid_new[] = { 117 static const u_int8_t valid_new[] = {
112 [ICMPV6_ECHO_REQUEST - 128] = 1, 118 [ICMPV6_ECHO_REQUEST - 128] = 1,
@@ -270,6 +276,44 @@ static int icmpv6_nlattr_tuple_size(void)
270} 276}
271#endif 277#endif
272 278
279#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
280
281#include <linux/netfilter/nfnetlink.h>
282#include <linux/netfilter/nfnetlink_cttimeout.h>
283
284static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
285{
286 unsigned int *timeout = data;
287
288 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
289 *timeout =
290 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
291 } else {
292 /* Set default ICMPv6 timeout. */
293 *timeout = nf_ct_icmpv6_timeout;
294 }
295 return 0;
296}
297
298static int
299icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
300{
301 const unsigned int *timeout = data;
302
303 NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
304
305 return 0;
306
307nla_put_failure:
308 return -ENOSPC;
309}
310
311static const struct nla_policy
312icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
313 [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
314};
315#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
316
273#ifdef CONFIG_SYSCTL 317#ifdef CONFIG_SYSCTL
274static struct ctl_table_header *icmpv6_sysctl_header; 318static struct ctl_table_header *icmpv6_sysctl_header;
275static struct ctl_table icmpv6_sysctl_table[] = { 319static struct ctl_table icmpv6_sysctl_table[] = {
@@ -293,6 +337,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
293 .invert_tuple = icmpv6_invert_tuple, 337 .invert_tuple = icmpv6_invert_tuple,
294 .print_tuple = icmpv6_print_tuple, 338 .print_tuple = icmpv6_print_tuple,
295 .packet = icmpv6_packet, 339 .packet = icmpv6_packet,
340 .get_timeouts = icmpv6_get_timeouts,
296 .new = icmpv6_new, 341 .new = icmpv6_new,
297 .error = icmpv6_error, 342 .error = icmpv6_error,
298#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 343#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
@@ -301,6 +346,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
301 .nlattr_to_tuple = icmpv6_nlattr_to_tuple, 346 .nlattr_to_tuple = icmpv6_nlattr_to_tuple,
302 .nla_policy = icmpv6_nla_policy, 347 .nla_policy = icmpv6_nla_policy,
303#endif 348#endif
349#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
350 .ctnl_timeout = {
351 .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
352 .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
353 .nlattr_max = CTA_TIMEOUT_ICMP_MAX,
354 .obj_size = sizeof(unsigned int),
355 .nla_policy = icmpv6_timeout_nla_policy,
356 },
357#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
304#ifdef CONFIG_SYSCTL 358#ifdef CONFIG_SYSCTL
305 .ctl_table_header = &icmpv6_sysctl_header, 359 .ctl_table_header = &icmpv6_sysctl_header,
306 .ctl_table = icmpv6_sysctl_table, 360 .ctl_table = icmpv6_sysctl_table,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d02f7e4dd611..5bddea778840 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -856,6 +856,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
856 856
857 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 857 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
858 fl6.flowi6_oif = np->mcast_oif; 858 fl6.flowi6_oif = np->mcast_oif;
859 else if (!fl6.flowi6_oif)
860 fl6.flowi6_oif = np->ucast_oif;
859 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 861 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
860 862
861 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); 863 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b69fae76a6f1..9447bd69873a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -336,12 +336,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
336 } 336 }
337 337
338found: 338found:
339 /* RFC5722, Section 4: 339 /* RFC5722, Section 4, amended by Errata ID : 3089
340 * When reassembling an IPv6 datagram, if 340 * When reassembling an IPv6 datagram, if
341 * one or more its constituent fragments is determined to be an 341 * one or more its constituent fragments is determined to be an
342 * overlapping fragment, the entire datagram (and any constituent 342 * overlapping fragment, the entire datagram (and any constituent
343 * fragments, including those not yet received) MUST be silently 343 * fragments) MUST be silently discarded.
344 * discarded.
345 */ 344 */
346 345
347 /* Check for overlap with preceding fragment. */ 346 /* Check for overlap with preceding fragment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 22b766407de1..24c456e8aa1d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -121,9 +121,22 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
121 return p; 121 return p;
122} 122}
123 123
124static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
125{
126 struct in6_addr *p = &rt->rt6i_gateway;
127
128 if (!ipv6_addr_any(p))
129 return (const void *) p;
130 return daddr;
131}
132
124static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) 133static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
125{ 134{
126 struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); 135 struct rt6_info *rt = (struct rt6_info *) dst;
136 struct neighbour *n;
137
138 daddr = choose_neigh_daddr(rt, daddr);
139 n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
127 if (n) 140 if (n)
128 return n; 141 return n;
129 return neigh_create(&nd_tbl, daddr, dst->dev); 142 return neigh_create(&nd_tbl, daddr, dst->dev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 133768e52912..c4ffd1743528 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -680,9 +680,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
680 /* ISATAP (RFC4214) - must come before 6to4 */ 680 /* ISATAP (RFC4214) - must come before 6to4 */
681 if (dev->priv_flags & IFF_ISATAP) { 681 if (dev->priv_flags & IFF_ISATAP) {
682 struct neighbour *neigh = NULL; 682 struct neighbour *neigh = NULL;
683 bool do_tx_error = false;
683 684
684 if (skb_dst(skb)) 685 if (skb_dst(skb))
685 neigh = dst_get_neighbour_noref(skb_dst(skb)); 686 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
686 687
687 if (neigh == NULL) { 688 if (neigh == NULL) {
688 if (net_ratelimit()) 689 if (net_ratelimit())
@@ -697,6 +698,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
697 ipv6_addr_is_isatap(addr6)) 698 ipv6_addr_is_isatap(addr6))
698 dst = addr6->s6_addr32[3]; 699 dst = addr6->s6_addr32[3];
699 else 700 else
701 do_tx_error = true;
702
703 neigh_release(neigh);
704 if (do_tx_error)
700 goto tx_error; 705 goto tx_error;
701 } 706 }
702 707
@@ -705,9 +710,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
705 710
706 if (!dst) { 711 if (!dst) {
707 struct neighbour *neigh = NULL; 712 struct neighbour *neigh = NULL;
713 bool do_tx_error = false;
708 714
709 if (skb_dst(skb)) 715 if (skb_dst(skb))
710 neigh = dst_get_neighbour_noref(skb_dst(skb)); 716 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
711 717
712 if (neigh == NULL) { 718 if (neigh == NULL) {
713 if (net_ratelimit()) 719 if (net_ratelimit())
@@ -723,10 +729,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
723 addr_type = ipv6_addr_type(addr6); 729 addr_type = ipv6_addr_type(addr6);
724 } 730 }
725 731
726 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 732 if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
727 goto tx_error_icmp; 733 dst = addr6->s6_addr32[3];
734 else
735 do_tx_error = true;
728 736
729 dst = addr6->s6_addr32[3]; 737 neigh_release(neigh);
738 if (do_tx_error)
739 goto tx_error;
730 } 740 }
731 741
732 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 742 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3edd05ae4388..12c6ece67f39 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -540,19 +540,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr) 541 const struct in6_addr *addr)
542{ 542{
543 struct tcp_sock *tp = tcp_sk(sk); 543 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
553 return &tp->md5sig_info->keys6[i].base;
554 }
555 return NULL;
556} 544}
557 545
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 546static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
@@ -567,136 +555,11 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 555 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568} 556}
569 557
570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
574 struct tcp_md5sig_key *key;
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
578 key = tcp_v6_md5_do_lookup(sk, peer);
579 if (key) {
580 /* modify existing entry - just update that one */
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 }
594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
596 kfree(newkey);
597 return -ENOMEM;
598 }
599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
602
603 if (!keys) {
604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
607 return -ENOMEM;
608 }
609
610 if (tp->md5sig_info->entries6)
611 memmove(keys, tp->md5sig_info->keys6,
612 (sizeof (tp->md5sig_info->keys6[0]) *
613 tp->md5sig_info->entries6));
614
615 kfree(tp->md5sig_info->keys6);
616 tp->md5sig_info->keys6 = keys;
617 tp->md5sig_info->alloced6++;
618 }
619
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
623
624 tp->md5sig_info->entries6++;
625 }
626 return 0;
627}
628
629static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
630 u8 *newkey, __u8 newkeylen)
631{
632 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
633 newkey, newkeylen);
634}
635
636static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
637{
638 struct tcp_sock *tp = tcp_sk(sk);
639 int i;
640
641 for (i = 0; i < tp->md5sig_info->entries6; i++) {
642 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
643 /* Free the key */
644 kfree(tp->md5sig_info->keys6[i].base.key);
645 tp->md5sig_info->entries6--;
646
647 if (tp->md5sig_info->entries6 == 0) {
648 kfree(tp->md5sig_info->keys6);
649 tp->md5sig_info->keys6 = NULL;
650 tp->md5sig_info->alloced6 = 0;
651 tcp_free_md5sig_pool();
652 } else {
653 /* shrink the database */
654 if (tp->md5sig_info->entries6 != i)
655 memmove(&tp->md5sig_info->keys6[i],
656 &tp->md5sig_info->keys6[i+1],
657 (tp->md5sig_info->entries6 - i)
658 * sizeof (tp->md5sig_info->keys6[0]));
659 }
660 return 0;
661 }
662 }
663 return -ENOENT;
664}
665
666static void tcp_v6_clear_md5_list (struct sock *sk)
667{
668 struct tcp_sock *tp = tcp_sk(sk);
669 int i;
670
671 if (tp->md5sig_info->entries6) {
672 for (i = 0; i < tp->md5sig_info->entries6; i++)
673 kfree(tp->md5sig_info->keys6[i].base.key);
674 tp->md5sig_info->entries6 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys6);
679 tp->md5sig_info->keys6 = NULL;
680 tp->md5sig_info->alloced6 = 0;
681
682 if (tp->md5sig_info->entries4) {
683 for (i = 0; i < tp->md5sig_info->entries4; i++)
684 kfree(tp->md5sig_info->keys4[i].base.key);
685 tp->md5sig_info->entries4 = 0;
686 tcp_free_md5sig_pool();
687 }
688
689 kfree(tp->md5sig_info->keys4);
690 tp->md5sig_info->keys4 = NULL;
691 tp->md5sig_info->alloced4 = 0;
692}
693
694static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 558static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
695 int optlen) 559 int optlen)
696{ 560{
697 struct tcp_md5sig cmd; 561 struct tcp_md5sig cmd;
698 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 562 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
699 u8 *newkey;
700 563
701 if (optlen < sizeof(cmd)) 564 if (optlen < sizeof(cmd))
702 return -EINVAL; 565 return -EINVAL;
@@ -708,36 +571,22 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
708 return -EINVAL; 571 return -EINVAL;
709 572
710 if (!cmd.tcpm_keylen) { 573 if (!cmd.tcpm_keylen) {
711 if (!tcp_sk(sk)->md5sig_info)
712 return -ENOENT;
713 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 574 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
714 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); 575 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
715 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); 576 AF_INET);
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
578 AF_INET6);
716 } 579 }
717 580
718 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 581 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
719 return -EINVAL; 582 return -EINVAL;
720 583
721 if (!tcp_sk(sk)->md5sig_info) { 584 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
722 struct tcp_sock *tp = tcp_sk(sk); 585 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
723 struct tcp_md5sig_info *p; 586 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
724
725 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
726 if (!p)
727 return -ENOMEM;
728 587
729 tp->md5sig_info = p; 588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
730 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 589 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
731 }
732
733 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
734 if (!newkey)
735 return -ENOMEM;
736 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
737 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
738 newkey, cmd.tcpm_keylen);
739 }
740 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
741} 590}
742 591
743static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 592static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
@@ -1074,6 +923,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1074 const struct tcphdr *th = tcp_hdr(skb); 923 const struct tcphdr *th = tcp_hdr(skb);
1075 u32 seq = 0, ack_seq = 0; 924 u32 seq = 0, ack_seq = 0;
1076 struct tcp_md5sig_key *key = NULL; 925 struct tcp_md5sig_key *key = NULL;
926#ifdef CONFIG_TCP_MD5SIG
927 const __u8 *hash_location = NULL;
928 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 unsigned char newhash[16];
930 int genhash;
931 struct sock *sk1 = NULL;
932#endif
1077 933
1078 if (th->rst) 934 if (th->rst)
1079 return; 935 return;
@@ -1082,8 +938,32 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1082 return; 938 return;
1083 939
1084#ifdef CONFIG_TCP_MD5SIG 940#ifdef CONFIG_TCP_MD5SIG
1085 if (sk) 941 hash_location = tcp_parse_md5sig_option(th);
1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); 942 if (!sk && hash_location) {
943 /*
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
949 */
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, &ipv6h->daddr,
952 ntohs(th->source), inet6_iif(skb));
953 if (!sk1)
954 return;
955
956 rcu_read_lock();
957 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
958 if (!key)
959 goto release_sk1;
960
961 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 goto release_sk1;
964 } else {
965 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
966 }
1087#endif 967#endif
1088 968
1089 if (th->ack) 969 if (th->ack)
@@ -1093,6 +973,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1093 (th->doff << 2); 973 (th->doff << 2);
1094 974
1095 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); 975 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
976
977#ifdef CONFIG_TCP_MD5SIG
978release_sk1:
979 if (sk1) {
980 rcu_read_unlock();
981 sock_put(sk1);
982 }
983#endif
1096} 984}
1097 985
1098static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 986static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
@@ -1394,6 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1394 newnp->opt = NULL; 1282 newnp->opt = NULL;
1395 newnp->mcast_oif = inet6_iif(skb); 1283 newnp->mcast_oif = inet6_iif(skb);
1396 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1284 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1285 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1397 1286
1398 /* 1287 /*
1399 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1288 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1472,6 +1361,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1472 newnp->opt = NULL; 1361 newnp->opt = NULL;
1473 newnp->mcast_oif = inet6_iif(skb); 1362 newnp->mcast_oif = inet6_iif(skb);
1474 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1363 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1364 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1475 1365
1476 /* Clone native IPv6 options from listening socket (if any) 1366 /* Clone native IPv6 options from listening socket (if any)
1477 1367
@@ -1510,10 +1400,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1510 * memory, then we end up not copying the key 1400 * memory, then we end up not copying the key
1511 * across. Shucks. 1401 * across. Shucks.
1512 */ 1402 */
1513 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1403 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1514 if (newkey != NULL) 1404 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1515 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1516 newkey, key->keylen);
1517 } 1405 }
1518#endif 1406#endif
1519 1407
@@ -1676,6 +1564,8 @@ ipv6_pktoptions:
1676 np->mcast_oif = inet6_iif(opt_skb); 1564 np->mcast_oif = inet6_iif(opt_skb);
1677 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1565 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1678 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1566 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1567 if (np->rxopt.bits.rxtclass)
1568 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1679 if (ipv6_opt_accepted(sk, opt_skb)) { 1569 if (ipv6_opt_accepted(sk, opt_skb)) {
1680 skb_set_owner_r(opt_skb, sk); 1570 skb_set_owner_r(opt_skb, sk);
1681 opt_skb = xchg(&np->pktoptions, opt_skb); 1571 opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1898,7 +1788,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1898static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1788static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1899 .md5_lookup = tcp_v6_md5_lookup, 1789 .md5_lookup = tcp_v6_md5_lookup,
1900 .calc_md5_hash = tcp_v6_md5_hash_skb, 1790 .calc_md5_hash = tcp_v6_md5_hash_skb,
1901 .md5_add = tcp_v6_md5_add_func,
1902 .md5_parse = tcp_v6_parse_md5_keys, 1791 .md5_parse = tcp_v6_parse_md5_keys,
1903}; 1792};
1904#endif 1793#endif
@@ -1930,7 +1819,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1930static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1819static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1931 .md5_lookup = tcp_v4_md5_lookup, 1820 .md5_lookup = tcp_v4_md5_lookup,
1932 .calc_md5_hash = tcp_v4_md5_hash_skb, 1821 .calc_md5_hash = tcp_v4_md5_hash_skb,
1933 .md5_add = tcp_v6_md5_add_func,
1934 .md5_parse = tcp_v6_parse_md5_keys, 1822 .md5_parse = tcp_v6_parse_md5_keys,
1935}; 1823};
1936#endif 1824#endif
@@ -2004,11 +1892,6 @@ static int tcp_v6_init_sock(struct sock *sk)
2004 1892
2005static void tcp_v6_destroy_sock(struct sock *sk) 1893static void tcp_v6_destroy_sock(struct sock *sk)
2006{ 1894{
2007#ifdef CONFIG_TCP_MD5SIG
2008 /* Clean up the MD5 key list */
2009 if (tcp_sk(sk)->md5sig_info)
2010 tcp_v6_clear_md5_list(sk);
2011#endif
2012 tcp_v4_destroy_sock(sk); 1895 tcp_v4_destroy_sock(sk);
2013 inet6_destroy_sock(sk); 1896 inet6_destroy_sock(sk);
2014} 1897}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4f96b5c63685..37b0699e95e5 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -342,7 +342,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
342 struct inet_sock *inet = inet_sk(sk); 342 struct inet_sock *inet = inet_sk(sk);
343 struct sk_buff *skb; 343 struct sk_buff *skb;
344 unsigned int ulen, copied; 344 unsigned int ulen, copied;
345 int peeked; 345 int peeked, off = 0;
346 int err; 346 int err;
347 int is_udplite = IS_UDPLITE(sk); 347 int is_udplite = IS_UDPLITE(sk);
348 int is_udp4; 348 int is_udp4;
@@ -359,7 +359,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
359 359
360try_again: 360try_again:
361 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 361 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
362 &peeked, &err); 362 &peeked, &off, &err);
363 if (!skb) 363 if (!skb)
364 goto out; 364 goto out;
365 365
@@ -1130,7 +1130,8 @@ do_udp_sendmsg:
1130 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1130 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1131 fl6.flowi6_oif = np->mcast_oif; 1131 fl6.flowi6_oif = np->mcast_oif;
1132 connected = 0; 1132 connected = 0;
1133 } 1133 } else if (!fl6.flowi6_oif)
1134 fl6.flowi6_oif = np->ucast_oif;
1134 1135
1135 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1136 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1136 1137
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 4eeff89c1aaa..8755a3079d0f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -146,7 +146,7 @@ static int __xfrm6_output(struct sk_buff *skb)
146 return -EMSGSIZE; 146 return -EMSGSIZE;
147 } 147 }
148 148
149 if ((x && x->props.mode == XFRM_MODE_TUNNEL) && 149 if (x->props.mode == XFRM_MODE_TUNNEL &&
150 ((skb->len > mtu && !skb_is_gso(skb)) || 150 ((skb->len > mtu && !skb_is_gso(skb)) ||
151 dst_allfrag(skb_dst(skb)))) { 151 dst_allfrag(skb_dst(skb)))) {
152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 152 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 979ecb2435a7..564eb0b8afa3 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -254,7 +254,7 @@
254#include <linux/init.h> 254#include <linux/init.h>
255 255
256#include <linux/ppp_defs.h> 256#include <linux/ppp_defs.h>
257#include <linux/if_ppp.h> 257#include <linux/ppp-ioctl.h>
258#include <linux/ppp_channel.h> 258#include <linux/ppp_channel.h>
259 259
260#include <net/irda/irda.h> 260#include <net/irda/irda.h>
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d5c5b8fd1d01..07d7d55a1b93 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -90,6 +90,7 @@ do { \
90 90
91static void iucv_sock_kill(struct sock *sk); 91static void iucv_sock_kill(struct sock *sk);
92static void iucv_sock_close(struct sock *sk); 92static void iucv_sock_close(struct sock *sk);
93static void iucv_sever_path(struct sock *, int);
93 94
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 95static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev); 96 struct packet_type *pt, struct net_device *orig_dev);
@@ -130,17 +131,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
130 memcpy(&dst[8], src, 8); 131 memcpy(&dst[8], src, 8);
131} 132}
132 133
133static void iucv_skb_queue_purge(struct sk_buff_head *list)
134{
135 struct sk_buff *skb;
136
137 while ((skb = skb_dequeue(list)) != NULL) {
138 if (skb->dev)
139 dev_put(skb->dev);
140 kfree_skb(skb);
141 }
142}
143
144static int afiucv_pm_prepare(struct device *dev) 134static int afiucv_pm_prepare(struct device *dev)
145{ 135{
146#ifdef CONFIG_PM_DEBUG 136#ifdef CONFIG_PM_DEBUG
@@ -175,17 +165,11 @@ static int afiucv_pm_freeze(struct device *dev)
175 read_lock(&iucv_sk_list.lock); 165 read_lock(&iucv_sk_list.lock);
176 sk_for_each(sk, node, &iucv_sk_list.head) { 166 sk_for_each(sk, node, &iucv_sk_list.head) {
177 iucv = iucv_sk(sk); 167 iucv = iucv_sk(sk);
178 iucv_skb_queue_purge(&iucv->send_skb_q);
179 skb_queue_purge(&iucv->backlog_skb_q);
180 switch (sk->sk_state) { 168 switch (sk->sk_state) {
181 case IUCV_DISCONN: 169 case IUCV_DISCONN:
182 case IUCV_CLOSING: 170 case IUCV_CLOSING:
183 case IUCV_CONNECTED: 171 case IUCV_CONNECTED:
184 if (iucv->path) { 172 iucv_sever_path(sk, 0);
185 err = pr_iucv->path_sever(iucv->path, NULL);
186 iucv_path_free(iucv->path);
187 iucv->path = NULL;
188 }
189 break; 173 break;
190 case IUCV_OPEN: 174 case IUCV_OPEN:
191 case IUCV_BOUND: 175 case IUCV_BOUND:
@@ -194,6 +178,8 @@ static int afiucv_pm_freeze(struct device *dev)
194 default: 178 default:
195 break; 179 break;
196 } 180 }
181 skb_queue_purge(&iucv->send_skb_q);
182 skb_queue_purge(&iucv->backlog_skb_q);
197 } 183 }
198 read_unlock(&iucv_sk_list.lock); 184 read_unlock(&iucv_sk_list.lock);
199 return err; 185 return err;
@@ -338,7 +324,6 @@ static void iucv_sock_wake_msglim(struct sock *sk)
338static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 324static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
339 struct sk_buff *skb, u8 flags) 325 struct sk_buff *skb, u8 flags)
340{ 326{
341 struct net *net = sock_net(sock);
342 struct iucv_sock *iucv = iucv_sk(sock); 327 struct iucv_sock *iucv = iucv_sk(sock);
343 struct af_iucv_trans_hdr *phs_hdr; 328 struct af_iucv_trans_hdr *phs_hdr;
344 struct sk_buff *nskb; 329 struct sk_buff *nskb;
@@ -375,10 +360,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
375 if (imsg) 360 if (imsg)
376 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 361 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
377 362
378 skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if); 363 skb->dev = iucv->hs_dev;
379 if (!skb->dev) 364 if (!skb->dev)
380 return -ENODEV; 365 return -ENODEV;
381 if (!(skb->dev->flags & IFF_UP)) 366 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
382 return -ENETDOWN; 367 return -ENETDOWN;
383 if (skb->len > skb->dev->mtu) { 368 if (skb->len > skb->dev->mtu) {
384 if (sock->sk_type == SOCK_SEQPACKET) 369 if (sock->sk_type == SOCK_SEQPACKET)
@@ -393,15 +378,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
393 return -ENOMEM; 378 return -ENOMEM;
394 skb_queue_tail(&iucv->send_skb_q, nskb); 379 skb_queue_tail(&iucv->send_skb_q, nskb);
395 err = dev_queue_xmit(skb); 380 err = dev_queue_xmit(skb);
396 if (err) { 381 if (net_xmit_eval(err)) {
397 skb_unlink(nskb, &iucv->send_skb_q); 382 skb_unlink(nskb, &iucv->send_skb_q);
398 dev_put(nskb->dev);
399 kfree_skb(nskb); 383 kfree_skb(nskb);
400 } else { 384 } else {
401 atomic_sub(confirm_recv, &iucv->msg_recv); 385 atomic_sub(confirm_recv, &iucv->msg_recv);
402 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 386 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
403 } 387 }
404 return err; 388 return net_xmit_eval(err);
405} 389}
406 390
407static struct sock *__iucv_get_sock_by_name(char *nm) 391static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -419,7 +403,19 @@ static struct sock *__iucv_get_sock_by_name(char *nm)
419static void iucv_sock_destruct(struct sock *sk) 403static void iucv_sock_destruct(struct sock *sk)
420{ 404{
421 skb_queue_purge(&sk->sk_receive_queue); 405 skb_queue_purge(&sk->sk_receive_queue);
422 skb_queue_purge(&sk->sk_write_queue); 406 skb_queue_purge(&sk->sk_error_queue);
407
408 sk_mem_reclaim(sk);
409
410 if (!sock_flag(sk, SOCK_DEAD)) {
411 pr_err("Attempt to release alive iucv socket %p\n", sk);
412 return;
413 }
414
415 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
416 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
417 WARN_ON(sk->sk_wmem_queued);
418 WARN_ON(sk->sk_forward_alloc);
423} 419}
424 420
425/* Cleanup Listen */ 421/* Cleanup Listen */
@@ -447,14 +443,48 @@ static void iucv_sock_kill(struct sock *sk)
447 sock_put(sk); 443 sock_put(sk);
448} 444}
449 445
446/* Terminate an IUCV path */
447static void iucv_sever_path(struct sock *sk, int with_user_data)
448{
449 unsigned char user_data[16];
450 struct iucv_sock *iucv = iucv_sk(sk);
451 struct iucv_path *path = iucv->path;
452
453 if (iucv->path) {
454 iucv->path = NULL;
455 if (with_user_data) {
456 low_nmcpy(user_data, iucv->src_name);
457 high_nmcpy(user_data, iucv->dst_name);
458 ASCEBC(user_data, sizeof(user_data));
459 pr_iucv->path_sever(path, user_data);
460 } else
461 pr_iucv->path_sever(path, NULL);
462 iucv_path_free(path);
463 }
464}
465
466/* Send FIN through an IUCV socket for HIPER transport */
467static int iucv_send_ctrl(struct sock *sk, u8 flags)
468{
469 int err = 0;
470 int blen;
471 struct sk_buff *skb;
472
473 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
474 skb = sock_alloc_send_skb(sk, blen, 1, &err);
475 if (skb) {
476 skb_reserve(skb, blen);
477 err = afiucv_hs_send(NULL, sk, skb, flags);
478 }
479 return err;
480}
481
450/* Close an IUCV socket */ 482/* Close an IUCV socket */
451static void iucv_sock_close(struct sock *sk) 483static void iucv_sock_close(struct sock *sk)
452{ 484{
453 unsigned char user_data[16];
454 struct iucv_sock *iucv = iucv_sk(sk); 485 struct iucv_sock *iucv = iucv_sk(sk);
455 unsigned long timeo; 486 unsigned long timeo;
456 int err, blen; 487 int err = 0;
457 struct sk_buff *skb;
458 488
459 lock_sock(sk); 489 lock_sock(sk);
460 490
@@ -465,14 +495,7 @@ static void iucv_sock_close(struct sock *sk)
465 495
466 case IUCV_CONNECTED: 496 case IUCV_CONNECTED:
467 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 497 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
468 /* send fin */ 498 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
469 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
470 skb = sock_alloc_send_skb(sk, blen, 1, &err);
471 if (skb) {
472 skb_reserve(skb, blen);
473 err = afiucv_hs_send(NULL, sk, skb,
474 AF_IUCV_FLAG_FIN);
475 }
476 sk->sk_state = IUCV_DISCONN; 499 sk->sk_state = IUCV_DISCONN;
477 sk->sk_state_change(sk); 500 sk->sk_state_change(sk);
478 } 501 }
@@ -480,7 +503,7 @@ static void iucv_sock_close(struct sock *sk)
480 sk->sk_state = IUCV_CLOSING; 503 sk->sk_state = IUCV_CLOSING;
481 sk->sk_state_change(sk); 504 sk->sk_state_change(sk);
482 505
483 if (!skb_queue_empty(&iucv->send_skb_q)) { 506 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
484 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 507 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
485 timeo = sk->sk_lingertime; 508 timeo = sk->sk_lingertime;
486 else 509 else
@@ -494,25 +517,20 @@ static void iucv_sock_close(struct sock *sk)
494 sk->sk_state = IUCV_CLOSED; 517 sk->sk_state = IUCV_CLOSED;
495 sk->sk_state_change(sk); 518 sk->sk_state_change(sk);
496 519
497 if (iucv->path) {
498 low_nmcpy(user_data, iucv->src_name);
499 high_nmcpy(user_data, iucv->dst_name);
500 ASCEBC(user_data, sizeof(user_data));
501 pr_iucv->path_sever(iucv->path, user_data);
502 iucv_path_free(iucv->path);
503 iucv->path = NULL;
504 }
505
506 sk->sk_err = ECONNRESET; 520 sk->sk_err = ECONNRESET;
507 sk->sk_state_change(sk); 521 sk->sk_state_change(sk);
508 522
509 iucv_skb_queue_purge(&iucv->send_skb_q); 523 skb_queue_purge(&iucv->send_skb_q);
510 skb_queue_purge(&iucv->backlog_skb_q); 524 skb_queue_purge(&iucv->backlog_skb_q);
511 break;
512 525
513 default: 526 default: /* fall through */
514 /* nothing to do here */ 527 iucv_sever_path(sk, 1);
515 break; 528 }
529
530 if (iucv->hs_dev) {
531 dev_put(iucv->hs_dev);
532 iucv->hs_dev = NULL;
533 sk->sk_bound_dev_if = 0;
516 } 534 }
517 535
518 /* mark socket for deletion by iucv_sock_kill() */ 536 /* mark socket for deletion by iucv_sock_kill() */
@@ -706,7 +724,6 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
706 goto done_unlock; 724 goto done_unlock;
707 725
708 /* Bind the socket */ 726 /* Bind the socket */
709
710 if (pr_iucv) 727 if (pr_iucv)
711 if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) 728 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
712 goto vm_bind; /* VM IUCV transport */ 729 goto vm_bind; /* VM IUCV transport */
@@ -720,6 +737,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
720 memcpy(iucv->src_name, sa->siucv_name, 8); 737 memcpy(iucv->src_name, sa->siucv_name, 8);
721 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 738 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
722 sk->sk_bound_dev_if = dev->ifindex; 739 sk->sk_bound_dev_if = dev->ifindex;
740 iucv->hs_dev = dev;
741 dev_hold(dev);
723 sk->sk_state = IUCV_BOUND; 742 sk->sk_state = IUCV_BOUND;
724 iucv->transport = AF_IUCV_TRANS_HIPER; 743 iucv->transport = AF_IUCV_TRANS_HIPER;
725 if (!iucv->msglimit) 744 if (!iucv->msglimit)
@@ -780,26 +799,6 @@ static int iucv_sock_autobind(struct sock *sk)
780 return err; 799 return err;
781} 800}
782 801
783static int afiucv_hs_connect(struct socket *sock)
784{
785 struct sock *sk = sock->sk;
786 struct sk_buff *skb;
787 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
788 int err = 0;
789
790 /* send syn */
791 skb = sock_alloc_send_skb(sk, blen, 1, &err);
792 if (!skb) {
793 err = -ENOMEM;
794 goto done;
795 }
796 skb->dev = NULL;
797 skb_reserve(skb, blen);
798 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
799done:
800 return err;
801}
802
803static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) 802static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
804{ 803{
805 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 804 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
@@ -880,7 +879,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
880 memcpy(iucv->dst_name, sa->siucv_name, 8); 879 memcpy(iucv->dst_name, sa->siucv_name, 8);
881 880
882 if (iucv->transport == AF_IUCV_TRANS_HIPER) 881 if (iucv->transport == AF_IUCV_TRANS_HIPER)
883 err = afiucv_hs_connect(sock); 882 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
884 else 883 else
885 err = afiucv_path_connect(sock, addr); 884 err = afiucv_path_connect(sock, addr);
886 if (err) 885 if (err)
@@ -894,11 +893,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
894 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) 893 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
895 err = -ECONNREFUSED; 894 err = -ECONNREFUSED;
896 895
897 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) { 896 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
898 pr_iucv->path_sever(iucv->path, NULL); 897 iucv_sever_path(sk, 0);
899 iucv_path_free(iucv->path);
900 iucv->path = NULL;
901 }
902 898
903done: 899done:
904 release_sock(sk); 900 release_sock(sk);
@@ -1124,8 +1120,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1124 noblock, &err); 1120 noblock, &err);
1125 else 1121 else
1126 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1122 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1127 if (!skb) 1123 if (!skb) {
1124 err = -ENOMEM;
1128 goto out; 1125 goto out;
1126 }
1129 if (iucv->transport == AF_IUCV_TRANS_HIPER) 1127 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1130 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); 1128 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1131 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1129 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1148,6 +1146,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1148 /* increment and save iucv message tag for msg_completion cbk */ 1146 /* increment and save iucv message tag for msg_completion cbk */
1149 txmsg.tag = iucv->send_tag++; 1147 txmsg.tag = iucv->send_tag++;
1150 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1148 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1149
1151 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1150 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1152 atomic_inc(&iucv->msg_sent); 1151 atomic_inc(&iucv->msg_sent);
1153 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1152 err = afiucv_hs_send(&txmsg, sk, skb, 0);
@@ -1202,8 +1201,6 @@ release:
1202 return len; 1201 return len;
1203 1202
1204fail: 1203fail:
1205 if (skb->dev)
1206 dev_put(skb->dev);
1207 kfree_skb(skb); 1204 kfree_skb(skb);
1208out: 1205out:
1209 release_sock(sk); 1206 release_sock(sk);
@@ -1332,8 +1329,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1332 struct sock *sk = sock->sk; 1329 struct sock *sk = sock->sk;
1333 struct iucv_sock *iucv = iucv_sk(sk); 1330 struct iucv_sock *iucv = iucv_sk(sk);
1334 unsigned int copied, rlen; 1331 unsigned int copied, rlen;
1335 struct sk_buff *skb, *rskb, *cskb, *sskb; 1332 struct sk_buff *skb, *rskb, *cskb;
1336 int blen;
1337 int err = 0; 1333 int err = 0;
1338 1334
1339 if ((sk->sk_state == IUCV_DISCONN) && 1335 if ((sk->sk_state == IUCV_DISCONN) &&
@@ -1356,6 +1352,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1356 1352
1357 rlen = skb->len; /* real length of skb */ 1353 rlen = skb->len; /* real length of skb */
1358 copied = min_t(unsigned int, rlen, len); 1354 copied = min_t(unsigned int, rlen, len);
1355 if (!rlen)
1356 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1359 1357
1360 cskb = skb; 1358 cskb = skb;
1361 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1359 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
@@ -1396,7 +1394,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1396 } 1394 }
1397 1395
1398 kfree_skb(skb); 1396 kfree_skb(skb);
1399 atomic_inc(&iucv->msg_recv); 1397 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1398 atomic_inc(&iucv->msg_recv);
1399 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1400 WARN_ON(1);
1401 iucv_sock_close(sk);
1402 return -EFAULT;
1403 }
1404 }
1400 1405
1401 /* Queue backlog skbs */ 1406 /* Queue backlog skbs */
1402 spin_lock_bh(&iucv->message_q.lock); 1407 spin_lock_bh(&iucv->message_q.lock);
@@ -1415,15 +1420,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1415 iucv_process_message_q(sk); 1420 iucv_process_message_q(sk);
1416 if (atomic_read(&iucv->msg_recv) >= 1421 if (atomic_read(&iucv->msg_recv) >=
1417 iucv->msglimit / 2) { 1422 iucv->msglimit / 2) {
1418 /* send WIN to peer */ 1423 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1419 blen = sizeof(struct af_iucv_trans_hdr) +
1420 ETH_HLEN;
1421 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1422 if (sskb) {
1423 skb_reserve(sskb, blen);
1424 err = afiucv_hs_send(NULL, sk, sskb,
1425 AF_IUCV_FLAG_WIN);
1426 }
1427 if (err) { 1424 if (err) {
1428 sk->sk_state = IUCV_DISCONN; 1425 sk->sk_state = IUCV_DISCONN;
1429 sk->sk_state_change(sk); 1426 sk->sk_state_change(sk);
@@ -1486,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1486 if (sk->sk_state == IUCV_DISCONN) 1483 if (sk->sk_state == IUCV_DISCONN)
1487 mask |= POLLIN; 1484 mask |= POLLIN;
1488 1485
1489 if (sock_writeable(sk)) 1486 if (sock_writeable(sk) && iucv_below_msglim(sk))
1490 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1487 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1491 else 1488 else
1492 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1489 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1508,42 +1505,47 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
1508 1505
1509 lock_sock(sk); 1506 lock_sock(sk);
1510 switch (sk->sk_state) { 1507 switch (sk->sk_state) {
1508 case IUCV_LISTEN:
1511 case IUCV_DISCONN: 1509 case IUCV_DISCONN:
1512 case IUCV_CLOSING: 1510 case IUCV_CLOSING:
1513 case IUCV_CLOSED: 1511 case IUCV_CLOSED:
1514 err = -ENOTCONN; 1512 err = -ENOTCONN;
1515 goto fail; 1513 goto fail;
1516
1517 default: 1514 default:
1518 sk->sk_shutdown |= how;
1519 break; 1515 break;
1520 } 1516 }
1521 1517
1522 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1518 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1523 txmsg.class = 0; 1519 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1524 txmsg.tag = 0; 1520 txmsg.class = 0;
1525 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 1521 txmsg.tag = 0;
1526 0, (void *) iprm_shutdown, 8); 1522 err = pr_iucv->message_send(iucv->path, &txmsg,
1527 if (err) { 1523 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1528 switch (err) { 1524 if (err) {
1529 case 1: 1525 switch (err) {
1530 err = -ENOTCONN; 1526 case 1:
1531 break; 1527 err = -ENOTCONN;
1532 case 2: 1528 break;
1533 err = -ECONNRESET; 1529 case 2:
1534 break; 1530 err = -ECONNRESET;
1535 default: 1531 break;
1536 err = -ENOTCONN; 1532 default:
1537 break; 1533 err = -ENOTCONN;
1534 break;
1535 }
1538 } 1536 }
1539 } 1537 } else
1538 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1540 } 1539 }
1541 1540
1541 sk->sk_shutdown |= how;
1542 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1542 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1543 err = pr_iucv->path_quiesce(iucv->path, NULL); 1543 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1544 if (err) 1544 err = pr_iucv->path_quiesce(iucv->path, NULL);
1545 err = -ENOTCONN; 1545 if (err)
1546 1546 err = -ENOTCONN;
1547/* skb_queue_purge(&sk->sk_receive_queue); */
1548 }
1547 skb_queue_purge(&sk->sk_receive_queue); 1549 skb_queue_purge(&sk->sk_receive_queue);
1548 } 1550 }
1549 1551
@@ -1565,13 +1567,6 @@ static int iucv_sock_release(struct socket *sock)
1565 1567
1566 iucv_sock_close(sk); 1568 iucv_sock_close(sk);
1567 1569
1568 /* Unregister with IUCV base support */
1569 if (iucv_sk(sk)->path) {
1570 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1571 iucv_path_free(iucv_sk(sk)->path);
1572 iucv_sk(sk)->path = NULL;
1573 }
1574
1575 sock_orphan(sk); 1570 sock_orphan(sk);
1576 iucv_sock_kill(sk); 1571 iucv_sock_kill(sk);
1577 return err; 1572 return err;
@@ -1633,7 +1628,8 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1633{ 1628{
1634 struct sock *sk = sock->sk; 1629 struct sock *sk = sock->sk;
1635 struct iucv_sock *iucv = iucv_sk(sk); 1630 struct iucv_sock *iucv = iucv_sk(sk);
1636 int val, len; 1631 unsigned int val;
1632 int len;
1637 1633
1638 if (level != SOL_IUCV) 1634 if (level != SOL_IUCV)
1639 return -ENOPROTOOPT; 1635 return -ENOPROTOOPT;
@@ -1656,6 +1652,13 @@ static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1656 : iucv->msglimit; /* default */ 1652 : iucv->msglimit; /* default */
1657 release_sock(sk); 1653 release_sock(sk);
1658 break; 1654 break;
1655 case SO_MSGSIZE:
1656 if (sk->sk_state == IUCV_OPEN)
1657 return -EBADFD;
1658 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1659 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1660 0x7fffffff;
1661 break;
1659 default: 1662 default:
1660 return -ENOPROTOOPT; 1663 return -ENOPROTOOPT;
1661 } 1664 }
@@ -1750,8 +1753,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1750 path->msglim = iucv->msglimit; 1753 path->msglim = iucv->msglimit;
1751 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); 1754 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1752 if (err) { 1755 if (err) {
1753 err = pr_iucv->path_sever(path, user_data); 1756 iucv_sever_path(nsk, 1);
1754 iucv_path_free(path);
1755 iucv_sock_kill(nsk); 1757 iucv_sock_kill(nsk);
1756 goto fail; 1758 goto fail;
1757 } 1759 }
@@ -1828,6 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1828 struct sk_buff *list_skb = list->next; 1830 struct sk_buff *list_skb = list->next;
1829 unsigned long flags; 1831 unsigned long flags;
1830 1832
1833 bh_lock_sock(sk);
1831 if (!skb_queue_empty(list)) { 1834 if (!skb_queue_empty(list)) {
1832 spin_lock_irqsave(&list->lock, flags); 1835 spin_lock_irqsave(&list->lock, flags);
1833 1836
@@ -1849,7 +1852,6 @@ static void iucv_callback_txdone(struct iucv_path *path,
1849 iucv_sock_wake_msglim(sk); 1852 iucv_sock_wake_msglim(sk);
1850 } 1853 }
1851 } 1854 }
1852 BUG_ON(!this);
1853 1855
1854 if (sk->sk_state == IUCV_CLOSING) { 1856 if (sk->sk_state == IUCV_CLOSING) {
1855 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1857 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
@@ -1857,6 +1859,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1857 sk->sk_state_change(sk); 1859 sk->sk_state_change(sk);
1858 } 1860 }
1859 } 1861 }
1862 bh_unlock_sock(sk);
1860 1863
1861} 1864}
1862 1865
@@ -1864,9 +1867,15 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1864{ 1867{
1865 struct sock *sk = path->private; 1868 struct sock *sk = path->private;
1866 1869
1870 if (sk->sk_state == IUCV_CLOSED)
1871 return;
1872
1873 bh_lock_sock(sk);
1874 iucv_sever_path(sk, 1);
1867 sk->sk_state = IUCV_DISCONN; 1875 sk->sk_state = IUCV_DISCONN;
1868 1876
1869 sk->sk_state_change(sk); 1877 sk->sk_state_change(sk);
1878 bh_unlock_sock(sk);
1870} 1879}
1871 1880
1872/* called if the other communication side shuts down its RECV direction; 1881/* called if the other communication side shuts down its RECV direction;
@@ -1954,6 +1963,8 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1954 memcpy(niucv->src_name, iucv->src_name, 8); 1963 memcpy(niucv->src_name, iucv->src_name, 8);
1955 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1964 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1956 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; 1965 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1966 niucv->hs_dev = iucv->hs_dev;
1967 dev_hold(niucv->hs_dev);
1957 afiucv_swap_src_dest(skb); 1968 afiucv_swap_src_dest(skb);
1958 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; 1969 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1959 trans_hdr->window = niucv->msglimit; 1970 trans_hdr->window = niucv->msglimit;
@@ -2022,12 +2033,15 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2022 struct iucv_sock *iucv = iucv_sk(sk); 2033 struct iucv_sock *iucv = iucv_sk(sk);
2023 2034
2024 /* other end of connection closed */ 2035 /* other end of connection closed */
2025 if (iucv) { 2036 if (!iucv)
2026 bh_lock_sock(sk); 2037 goto out;
2038 bh_lock_sock(sk);
2039 if (sk->sk_state == IUCV_CONNECTED) {
2027 sk->sk_state = IUCV_DISCONN; 2040 sk->sk_state = IUCV_DISCONN;
2028 sk->sk_state_change(sk); 2041 sk->sk_state_change(sk);
2029 bh_unlock_sock(sk);
2030 } 2042 }
2043 bh_unlock_sock(sk);
2044out:
2031 kfree_skb(skb); 2045 kfree_skb(skb);
2032 return NET_RX_SUCCESS; 2046 return NET_RX_SUCCESS;
2033} 2047}
@@ -2069,8 +2083,13 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2069 return NET_RX_SUCCESS; 2083 return NET_RX_SUCCESS;
2070 } 2084 }
2071 2085
2086 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2087 kfree_skb(skb);
2088 return NET_RX_SUCCESS;
2089 }
2090
2072 /* write stuff from iucv_msg to skb cb */ 2091 /* write stuff from iucv_msg to skb cb */
2073 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) { 2092 if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2074 kfree_skb(skb); 2093 kfree_skb(skb);
2075 return NET_RX_SUCCESS; 2094 return NET_RX_SUCCESS;
2076 } 2095 }
@@ -2172,11 +2191,14 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2172 break; 2191 break;
2173 case (AF_IUCV_FLAG_WIN): 2192 case (AF_IUCV_FLAG_WIN):
2174 err = afiucv_hs_callback_win(sk, skb); 2193 err = afiucv_hs_callback_win(sk, skb);
2175 if (skb->len > sizeof(struct af_iucv_trans_hdr)) 2194 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2176 err = afiucv_hs_callback_rx(sk, skb); 2195 kfree_skb(skb);
2177 else 2196 break;
2178 kfree(skb); 2197 }
2179 break; 2198 /* fall through and receive non-zero length data */
2199 case (AF_IUCV_FLAG_SHT):
2200 /* shutdown request */
2201 /* fall through and receive zero length data */
2180 case 0: 2202 case 0:
2181 /* plain data frame */ 2203 /* plain data frame */
2182 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2204 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
@@ -2202,65 +2224,64 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2202 struct iucv_sock *iucv = NULL; 2224 struct iucv_sock *iucv = NULL;
2203 struct sk_buff_head *list; 2225 struct sk_buff_head *list;
2204 struct sk_buff *list_skb; 2226 struct sk_buff *list_skb;
2205 struct sk_buff *this = NULL; 2227 struct sk_buff *nskb;
2206 unsigned long flags; 2228 unsigned long flags;
2207 struct hlist_node *node; 2229 struct hlist_node *node;
2208 2230
2209 read_lock(&iucv_sk_list.lock); 2231 read_lock_irqsave(&iucv_sk_list.lock, flags);
2210 sk_for_each(sk, node, &iucv_sk_list.head) 2232 sk_for_each(sk, node, &iucv_sk_list.head)
2211 if (sk == isk) { 2233 if (sk == isk) {
2212 iucv = iucv_sk(sk); 2234 iucv = iucv_sk(sk);
2213 break; 2235 break;
2214 } 2236 }
2215 read_unlock(&iucv_sk_list.lock); 2237 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2216 2238
2217 if (!iucv) 2239 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2218 return; 2240 return;
2219 2241
2220 bh_lock_sock(sk);
2221 list = &iucv->send_skb_q; 2242 list = &iucv->send_skb_q;
2222 list_skb = list->next; 2243 spin_lock_irqsave(&list->lock, flags);
2223 if (skb_queue_empty(list)) 2244 if (skb_queue_empty(list))
2224 goto out_unlock; 2245 goto out_unlock;
2225 2246 list_skb = list->next;
2226 spin_lock_irqsave(&list->lock, flags); 2247 nskb = list_skb->next;
2227 while (list_skb != (struct sk_buff *)list) { 2248 while (list_skb != (struct sk_buff *)list) {
2228 if (skb_shinfo(list_skb) == skb_shinfo(skb)) { 2249 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2229 this = list_skb;
2230 switch (n) { 2250 switch (n) {
2231 case TX_NOTIFY_OK: 2251 case TX_NOTIFY_OK:
2232 __skb_unlink(this, list); 2252 __skb_unlink(list_skb, list);
2253 kfree_skb(list_skb);
2233 iucv_sock_wake_msglim(sk); 2254 iucv_sock_wake_msglim(sk);
2234 dev_put(this->dev);
2235 kfree_skb(this);
2236 break; 2255 break;
2237 case TX_NOTIFY_PENDING: 2256 case TX_NOTIFY_PENDING:
2238 atomic_inc(&iucv->pendings); 2257 atomic_inc(&iucv->pendings);
2239 break; 2258 break;
2240 case TX_NOTIFY_DELAYED_OK: 2259 case TX_NOTIFY_DELAYED_OK:
2241 __skb_unlink(this, list); 2260 __skb_unlink(list_skb, list);
2242 atomic_dec(&iucv->pendings); 2261 atomic_dec(&iucv->pendings);
2243 if (atomic_read(&iucv->pendings) <= 0) 2262 if (atomic_read(&iucv->pendings) <= 0)
2244 iucv_sock_wake_msglim(sk); 2263 iucv_sock_wake_msglim(sk);
2245 dev_put(this->dev); 2264 kfree_skb(list_skb);
2246 kfree_skb(this);
2247 break; 2265 break;
2248 case TX_NOTIFY_UNREACHABLE: 2266 case TX_NOTIFY_UNREACHABLE:
2249 case TX_NOTIFY_DELAYED_UNREACHABLE: 2267 case TX_NOTIFY_DELAYED_UNREACHABLE:
2250 case TX_NOTIFY_TPQFULL: /* not yet used */ 2268 case TX_NOTIFY_TPQFULL: /* not yet used */
2251 case TX_NOTIFY_GENERALERROR: 2269 case TX_NOTIFY_GENERALERROR:
2252 case TX_NOTIFY_DELAYED_GENERALERROR: 2270 case TX_NOTIFY_DELAYED_GENERALERROR:
2253 __skb_unlink(this, list); 2271 __skb_unlink(list_skb, list);
2254 dev_put(this->dev); 2272 kfree_skb(list_skb);
2255 kfree_skb(this); 2273 if (sk->sk_state == IUCV_CONNECTED) {
2256 sk->sk_state = IUCV_DISCONN; 2274 sk->sk_state = IUCV_DISCONN;
2257 sk->sk_state_change(sk); 2275 sk->sk_state_change(sk);
2276 }
2258 break; 2277 break;
2259 } 2278 }
2260 break; 2279 break;
2261 } 2280 }
2262 list_skb = list_skb->next; 2281 list_skb = nskb;
2282 nskb = nskb->next;
2263 } 2283 }
2284out_unlock:
2264 spin_unlock_irqrestore(&list->lock, flags); 2285 spin_unlock_irqrestore(&list->lock, flags);
2265 2286
2266 if (sk->sk_state == IUCV_CLOSING) { 2287 if (sk->sk_state == IUCV_CLOSING) {
@@ -2270,9 +2291,45 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2270 } 2291 }
2271 } 2292 }
2272 2293
2273out_unlock:
2274 bh_unlock_sock(sk);
2275} 2294}
2295
2296/*
2297 * afiucv_netdev_event: handle netdev notifier chain events
2298 */
2299static int afiucv_netdev_event(struct notifier_block *this,
2300 unsigned long event, void *ptr)
2301{
2302 struct net_device *event_dev = (struct net_device *)ptr;
2303 struct hlist_node *node;
2304 struct sock *sk;
2305 struct iucv_sock *iucv;
2306
2307 switch (event) {
2308 case NETDEV_REBOOT:
2309 case NETDEV_GOING_DOWN:
2310 sk_for_each(sk, node, &iucv_sk_list.head) {
2311 iucv = iucv_sk(sk);
2312 if ((iucv->hs_dev == event_dev) &&
2313 (sk->sk_state == IUCV_CONNECTED)) {
2314 if (event == NETDEV_GOING_DOWN)
2315 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2316 sk->sk_state = IUCV_DISCONN;
2317 sk->sk_state_change(sk);
2318 }
2319 }
2320 break;
2321 case NETDEV_DOWN:
2322 case NETDEV_UNREGISTER:
2323 default:
2324 break;
2325 }
2326 return NOTIFY_DONE;
2327}
2328
2329static struct notifier_block afiucv_netdev_notifier = {
2330 .notifier_call = afiucv_netdev_event,
2331};
2332
2276static const struct proto_ops iucv_sock_ops = { 2333static const struct proto_ops iucv_sock_ops = {
2277 .family = PF_IUCV, 2334 .family = PF_IUCV,
2278 .owner = THIS_MODULE, 2335 .owner = THIS_MODULE,
@@ -2372,7 +2429,8 @@ static int __init afiucv_init(void)
2372 err = afiucv_iucv_init(); 2429 err = afiucv_iucv_init();
2373 if (err) 2430 if (err)
2374 goto out_sock; 2431 goto out_sock;
2375 } 2432 } else
2433 register_netdevice_notifier(&afiucv_netdev_notifier);
2376 dev_add_pack(&iucv_packet_type); 2434 dev_add_pack(&iucv_packet_type);
2377 return 0; 2435 return 0;
2378 2436
@@ -2393,7 +2451,8 @@ static void __exit afiucv_exit(void)
2393 driver_unregister(&af_iucv_driver); 2451 driver_unregister(&af_iucv_driver);
2394 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2452 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2395 symbol_put(iucv_if); 2453 symbol_put(iucv_if);
2396 } 2454 } else
2455 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2397 dev_remove_pack(&iucv_packet_type); 2456 dev_remove_pack(&iucv_packet_type);
2398 sock_unregister(PF_IUCV); 2457 sock_unregister(PF_IUCV);
2399 proto_unregister(&iucv_proto); 2458 proto_unregister(&iucv_proto);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index d2726a74597d..63fe5f353f04 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -64,7 +64,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
64 struct l2tp_eth *priv = netdev_priv(dev); 64 struct l2tp_eth *priv = netdev_priv(dev);
65 65
66 priv->dev = dev; 66 priv->dev = dev;
67 random_ether_addr(dev->dev_addr); 67 eth_hw_addr_random(dev);
68 memset(&dev->broadcast[0], 0xff, 6); 68 memset(&dev->broadcast[0], 0xff, 6);
69 69
70 return 0; 70 return 0;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8a90d756c904..9b071910b4ba 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -82,7 +82,7 @@
82#include <net/sock.h> 82#include <net/sock.h>
83#include <linux/ppp_channel.h> 83#include <linux/ppp_channel.h>
84#include <linux/ppp_defs.h> 84#include <linux/ppp_defs.h>
85#include <linux/if_ppp.h> 85#include <linux/ppp-ioctl.h>
86#include <linux/file.h> 86#include <linux/file.h>
87#include <linux/hash.h> 87#include <linux/hash.h>
88#include <linux/sort.h> 88#include <linux/sort.h>
@@ -915,7 +915,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
915 goto end_put_sess; 915 goto end_put_sess;
916 } 916 }
917 917
918 inet = inet_sk(sk); 918 inet = inet_sk(tunnel->sock);
919 if (tunnel->version == 2) { 919 if (tunnel->version == 2) {
920 struct sockaddr_pppol2tp sp; 920 struct sockaddr_pppol2tp sp;
921 len = sizeof(sp); 921 len = sizeof(sp);
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index d540c3b160f3..1be7a454aa77 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,7 @@ mac80211-y := \
9 scan.o offchannel.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 mlme.o work.o \ 12 work.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
@@ -25,7 +25,7 @@ mac80211-y := \
25 wme.o \ 25 wme.o \
26 event.o \ 26 event.o \
27 chan.o \ 27 chan.o \
28 driver-trace.o 28 driver-trace.o mlme.o
29 29
30mac80211-$(CONFIG_MAC80211_LEDS) += led.o 30mac80211-$(CONFIG_MAC80211_LEDS) += led.o
31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 31mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 296620d6ca0c..677d65929780 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -336,6 +336,20 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
336 rate->mcs = idx; 336 rate->mcs = idx;
337} 337}
338 338
339void sta_set_rate_info_tx(struct sta_info *sta,
340 const struct ieee80211_tx_rate *rate,
341 struct rate_info *rinfo)
342{
343 rinfo->flags = 0;
344 if (rate->flags & IEEE80211_TX_RC_MCS)
345 rinfo->flags |= RATE_INFO_FLAGS_MCS;
346 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
347 rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
348 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
349 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
350 rate_idx_to_bitrate(rinfo, sta, rate->idx);
351}
352
339static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 353static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
340{ 354{
341 struct ieee80211_sub_if_data *sdata = sta->sdata; 355 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -378,14 +392,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
378 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); 392 sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
379 } 393 }
380 394
381 sinfo->txrate.flags = 0; 395 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
382 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
383 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
384 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
385 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
386 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI)
387 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
388 rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx);
389 396
390 sinfo->rxrate.flags = 0; 397 sinfo->rxrate.flags = 0;
391 if (sta->last_rx_rate_flag & RX_FLAG_HT) 398 if (sta->last_rx_rate_flag & RX_FLAG_HT)
@@ -489,27 +496,13 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
489 return ret; 496 return ret;
490} 497}
491 498
492static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
493 struct beacon_parameters *params)
494{
495 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
496
497 bss_conf->ssid_len = params->ssid_len;
498
499 if (params->ssid_len)
500 memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
501
502 bss_conf->hidden_ssid =
503 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
504}
505
506static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 499static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
507 u8 *resp, size_t resp_len) 500 const u8 *resp, size_t resp_len)
508{ 501{
509 struct sk_buff *new, *old; 502 struct sk_buff *new, *old;
510 503
511 if (!resp || !resp_len) 504 if (!resp || !resp_len)
512 return -EINVAL; 505 return 1;
513 506
514 old = rtnl_dereference(sdata->u.ap.probe_resp); 507 old = rtnl_dereference(sdata->u.ap.probe_resp);
515 508
@@ -520,50 +513,28 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
520 memcpy(skb_put(new, resp_len), resp, resp_len); 513 memcpy(skb_put(new, resp_len), resp, resp_len);
521 514
522 rcu_assign_pointer(sdata->u.ap.probe_resp, new); 515 rcu_assign_pointer(sdata->u.ap.probe_resp, new);
523 synchronize_rcu(); 516 if (old) {
524 517 /* TODO: use call_rcu() */
525 if (old) 518 synchronize_rcu();
526 dev_kfree_skb(old); 519 dev_kfree_skb(old);
520 }
527 521
528 return 0; 522 return 0;
529} 523}
530 524
531/* 525static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
532 * This handles both adding a beacon and setting new beacon info 526 struct cfg80211_beacon_data *params)
533 */
534static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
535 struct beacon_parameters *params)
536{ 527{
537 struct beacon_data *new, *old; 528 struct beacon_data *new, *old;
538 int new_head_len, new_tail_len; 529 int new_head_len, new_tail_len;
539 int size; 530 int size, err;
540 int err = -EINVAL; 531 u32 changed = BSS_CHANGED_BEACON;
541 u32 changed = 0;
542 532
543 old = rtnl_dereference(sdata->u.ap.beacon); 533 old = rtnl_dereference(sdata->u.ap.beacon);
544 534
545 /* head must not be zero-length */
546 if (params->head && !params->head_len)
547 return -EINVAL;
548
549 /*
550 * This is a kludge. beacon interval should really be part
551 * of the beacon information.
552 */
553 if (params->interval &&
554 (sdata->vif.bss_conf.beacon_int != params->interval)) {
555 sdata->vif.bss_conf.beacon_int = params->interval;
556 ieee80211_bss_info_change_notify(sdata,
557 BSS_CHANGED_BEACON_INT);
558 }
559
560 /* Need to have a beacon head if we don't have one yet */ 535 /* Need to have a beacon head if we don't have one yet */
561 if (!params->head && !old) 536 if (!params->head && !old)
562 return err; 537 return -EINVAL;
563
564 /* sorry, no way to start beaconing without dtim period */
565 if (!params->dtim_period && !old)
566 return err;
567 538
568 /* new or old head? */ 539 /* new or old head? */
569 if (params->head) 540 if (params->head)
@@ -586,12 +557,6 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
586 557
587 /* start filling the new info now */ 558 /* start filling the new info now */
588 559
589 /* new or old dtim period? */
590 if (params->dtim_period)
591 new->dtim_period = params->dtim_period;
592 else
593 new->dtim_period = old->dtim_period;
594
595 /* 560 /*
596 * pointers go into the block we allocated, 561 * pointers go into the block we allocated,
597 * memory is | beacon_data | head | tail | 562 * memory is | beacon_data | head | tail |
@@ -614,46 +579,37 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
614 if (old) 579 if (old)
615 memcpy(new->tail, old->tail, new_tail_len); 580 memcpy(new->tail, old->tail, new_tail_len);
616 581
617 sdata->vif.bss_conf.dtim_period = new->dtim_period;
618
619 rcu_assign_pointer(sdata->u.ap.beacon, new);
620
621 synchronize_rcu();
622
623 kfree(old);
624
625 err = ieee80211_set_probe_resp(sdata, params->probe_resp, 582 err = ieee80211_set_probe_resp(sdata, params->probe_resp,
626 params->probe_resp_len); 583 params->probe_resp_len);
627 if (!err) 584 if (err < 0)
585 return err;
586 if (err == 0)
628 changed |= BSS_CHANGED_AP_PROBE_RESP; 587 changed |= BSS_CHANGED_AP_PROBE_RESP;
629 588
630 ieee80211_config_ap_ssid(sdata, params); 589 rcu_assign_pointer(sdata->u.ap.beacon, new);
631 changed |= BSS_CHANGED_BEACON_ENABLED |
632 BSS_CHANGED_BEACON |
633 BSS_CHANGED_SSID;
634 590
635 ieee80211_bss_info_change_notify(sdata, changed); 591 if (old)
636 return 0; 592 kfree_rcu(old, rcu_head);
593
594 return changed;
637} 595}
638 596
639static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 597static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
640 struct beacon_parameters *params) 598 struct cfg80211_ap_settings *params)
641{ 599{
642 struct ieee80211_sub_if_data *sdata; 600 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
643 struct beacon_data *old; 601 struct beacon_data *old;
644 struct ieee80211_sub_if_data *vlan; 602 struct ieee80211_sub_if_data *vlan;
645 int ret; 603 u32 changed = BSS_CHANGED_BEACON_INT |
646 604 BSS_CHANGED_BEACON_ENABLED |
647 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 605 BSS_CHANGED_BEACON |
606 BSS_CHANGED_SSID;
607 int err;
648 608
649 old = rtnl_dereference(sdata->u.ap.beacon); 609 old = rtnl_dereference(sdata->u.ap.beacon);
650 if (old) 610 if (old)
651 return -EALREADY; 611 return -EALREADY;
652 612
653 ret = ieee80211_config_beacon(sdata, params);
654 if (ret)
655 return ret;
656
657 /* 613 /*
658 * Apply control port protocol, this allows us to 614 * Apply control port protocol, this allows us to
659 * not encrypt dynamic WEP control frames. 615 * not encrypt dynamic WEP control frames.
@@ -667,14 +623,32 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
667 params->crypto.control_port_no_encrypt; 623 params->crypto.control_port_no_encrypt;
668 } 624 }
669 625
626 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
627 sdata->vif.bss_conf.dtim_period = params->dtim_period;
628
629 sdata->vif.bss_conf.ssid_len = params->ssid_len;
630 if (params->ssid_len)
631 memcpy(sdata->vif.bss_conf.ssid, params->ssid,
632 params->ssid_len);
633 sdata->vif.bss_conf.hidden_ssid =
634 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
635
636 err = ieee80211_assign_beacon(sdata, &params->beacon);
637 if (err < 0)
638 return err;
639 changed |= err;
640
641 ieee80211_bss_info_change_notify(sdata, changed);
642
670 return 0; 643 return 0;
671} 644}
672 645
673static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, 646static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
674 struct beacon_parameters *params) 647 struct cfg80211_beacon_data *params)
675{ 648{
676 struct ieee80211_sub_if_data *sdata; 649 struct ieee80211_sub_if_data *sdata;
677 struct beacon_data *old; 650 struct beacon_data *old;
651 int err;
678 652
679 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 653 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
680 654
@@ -682,10 +656,14 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
682 if (!old) 656 if (!old)
683 return -ENOENT; 657 return -ENOENT;
684 658
685 return ieee80211_config_beacon(sdata, params); 659 err = ieee80211_assign_beacon(sdata, params);
660 if (err < 0)
661 return err;
662 ieee80211_bss_info_change_notify(sdata, err);
663 return 0;
686} 664}
687 665
688static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) 666static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
689{ 667{
690 struct ieee80211_sub_if_data *sdata; 668 struct ieee80211_sub_if_data *sdata;
691 struct beacon_data *old; 669 struct beacon_data *old;
@@ -697,10 +675,11 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
697 return -ENOENT; 675 return -ENOENT;
698 676
699 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); 677 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
700 synchronize_rcu(); 678
701 kfree(old); 679 kfree_rcu(old, rcu_head);
702 680
703 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 681 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
682
704 return 0; 683 return 0;
705} 684}
706 685
@@ -776,12 +755,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
776 755
777 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) && 756 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
778 !test_sta_flag(sta, WLAN_STA_AUTH)) { 757 !test_sta_flag(sta, WLAN_STA_AUTH)) {
779 ret = sta_info_move_state_checked(sta, 758 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
780 IEEE80211_STA_AUTH);
781 if (ret) 759 if (ret)
782 return ret; 760 return ret;
783 ret = sta_info_move_state_checked(sta, 761 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
784 IEEE80211_STA_ASSOC);
785 if (ret) 762 if (ret)
786 return ret; 763 return ret;
787 } 764 }
@@ -789,11 +766,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
789 766
790 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { 767 if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
791 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 768 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
792 ret = sta_info_move_state_checked(sta, 769 ret = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
793 IEEE80211_STA_AUTHORIZED);
794 else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 770 else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
795 ret = sta_info_move_state_checked(sta, 771 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
796 IEEE80211_STA_ASSOC);
797 if (ret) 772 if (ret)
798 return ret; 773 return ret;
799 } 774 }
@@ -805,12 +780,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
805 780
806 if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) && 781 if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
807 test_sta_flag(sta, WLAN_STA_AUTH)) { 782 test_sta_flag(sta, WLAN_STA_AUTH)) {
808 ret = sta_info_move_state_checked(sta, 783 ret = sta_info_move_state(sta, IEEE80211_STA_AUTH);
809 IEEE80211_STA_AUTH);
810 if (ret) 784 if (ret)
811 return ret; 785 return ret;
812 ret = sta_info_move_state_checked(sta, 786 ret = sta_info_move_state(sta, IEEE80211_STA_NONE);
813 IEEE80211_STA_NONE);
814 if (ret) 787 if (ret)
815 return ret; 788 return ret;
816 } 789 }
@@ -944,8 +917,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
944 if (!sta) 917 if (!sta)
945 return -ENOMEM; 918 return -ENOMEM;
946 919
947 sta_info_move_state(sta, IEEE80211_STA_AUTH); 920 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
948 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 921 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
949 922
950 err = sta_apply_parameters(local, sta, params); 923 err = sta_apply_parameters(local, sta, params);
951 if (err) { 924 if (err) {
@@ -1001,6 +974,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1001 struct ieee80211_local *local = wiphy_priv(wiphy); 974 struct ieee80211_local *local = wiphy_priv(wiphy);
1002 struct sta_info *sta; 975 struct sta_info *sta;
1003 struct ieee80211_sub_if_data *vlansdata; 976 struct ieee80211_sub_if_data *vlansdata;
977 int err;
1004 978
1005 mutex_lock(&local->sta_mtx); 979 mutex_lock(&local->sta_mtx);
1006 980
@@ -1040,7 +1014,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1040 ieee80211_send_layer2_update(sta); 1014 ieee80211_send_layer2_update(sta);
1041 } 1015 }
1042 1016
1043 sta_apply_parameters(local, sta, params); 1017 err = sta_apply_parameters(local, sta, params);
1018 if (err) {
1019 mutex_unlock(&local->sta_mtx);
1020 return err;
1021 }
1044 1022
1045 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates) 1023 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
1046 rate_control_rate_init(sta); 1024 rate_control_rate_init(sta);
@@ -1341,6 +1319,16 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1341 conf->dot11MeshHWMPRannInterval = 1319 conf->dot11MeshHWMPRannInterval =
1342 nconf->dot11MeshHWMPRannInterval; 1320 nconf->dot11MeshHWMPRannInterval;
1343 } 1321 }
1322 if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
1323 conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
1324 if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
1325 /* our RSSI threshold implementation is supported only for
1326 * devices that report signal in dBm.
1327 */
1328 if (!(sdata->local->hw.flags & IEEE80211_HW_SIGNAL_DBM))
1329 return -ENOTSUPP;
1330 conf->rssi_threshold = nconf->rssi_threshold;
1331 }
1344 return 0; 1332 return 0;
1345} 1333}
1346 1334
@@ -1622,19 +1610,15 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1622} 1610}
1623 1611
1624static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, 1612static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
1625 struct cfg80211_deauth_request *req, 1613 struct cfg80211_deauth_request *req)
1626 void *cookie)
1627{ 1614{
1628 return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), 1615 return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req);
1629 req, cookie);
1630} 1616}
1631 1617
1632static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, 1618static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1633 struct cfg80211_disassoc_request *req, 1619 struct cfg80211_disassoc_request *req)
1634 void *cookie)
1635{ 1620{
1636 return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), 1621 return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
1637 req, cookie);
1638} 1622}
1639 1623
1640static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, 1624static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
@@ -1868,7 +1852,6 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
1868 s32 rssi_thold, u32 rssi_hyst) 1852 s32 rssi_thold, u32 rssi_hyst)
1869{ 1853{
1870 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1854 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1871 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1872 struct ieee80211_vif *vif = &sdata->vif; 1855 struct ieee80211_vif *vif = &sdata->vif;
1873 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1856 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1874 1857
@@ -1879,14 +1862,9 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
1879 bss_conf->cqm_rssi_thold = rssi_thold; 1862 bss_conf->cqm_rssi_thold = rssi_thold;
1880 bss_conf->cqm_rssi_hyst = rssi_hyst; 1863 bss_conf->cqm_rssi_hyst = rssi_hyst;
1881 1864
1882 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1883 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1884 return -EOPNOTSUPP;
1885 return 0;
1886 }
1887
1888 /* tell the driver upon association, unless already associated */ 1865 /* tell the driver upon association, unless already associated */
1889 if (sdata->u.mgd.associated) 1866 if (sdata->u.mgd.associated &&
1867 sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
1890 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); 1868 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
1891 1869
1892 return 0; 1870 return 0;
@@ -1907,8 +1885,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1907 return ret; 1885 return ret;
1908 } 1886 }
1909 1887
1910 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 1888 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1911 sdata->rc_rateidx_mask[i] = mask->control[i].legacy; 1889 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
1890 memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].mcs,
1891 sizeof(mask->control[i].mcs));
1892 }
1912 1893
1913 return 0; 1894 return 0;
1914} 1895}
@@ -2030,7 +2011,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
2030 if (wk->offchan_tx.wait && !wk->offchan_tx.status) 2011 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
2031 cfg80211_mgmt_tx_status(wk->sdata->dev, 2012 cfg80211_mgmt_tx_status(wk->sdata->dev,
2032 (unsigned long) wk->offchan_tx.frame, 2013 (unsigned long) wk->offchan_tx.frame,
2033 wk->ie, wk->ie_len, false, GFP_KERNEL); 2014 wk->data, wk->data_len, false, GFP_KERNEL);
2034 2015
2035 return WORK_DONE_DESTROY; 2016 return WORK_DONE_DESTROY;
2036} 2017}
@@ -2181,8 +2162,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2181 wk->done = ieee80211_offchan_tx_done; 2162 wk->done = ieee80211_offchan_tx_done;
2182 wk->offchan_tx.frame = skb; 2163 wk->offchan_tx.frame = skb;
2183 wk->offchan_tx.wait = wait; 2164 wk->offchan_tx.wait = wait;
2184 wk->ie_len = len; 2165 wk->data_len = len;
2185 memcpy(wk->ie, buf, len); 2166 memcpy(wk->data, buf, len);
2186 2167
2187 ieee80211_add_work(wk); 2168 ieee80211_add_work(wk);
2188 return 0; 2169 return 0;
@@ -2701,9 +2682,9 @@ struct cfg80211_ops mac80211_config_ops = {
2701 .get_key = ieee80211_get_key, 2682 .get_key = ieee80211_get_key,
2702 .set_default_key = ieee80211_config_default_key, 2683 .set_default_key = ieee80211_config_default_key,
2703 .set_default_mgmt_key = ieee80211_config_default_mgmt_key, 2684 .set_default_mgmt_key = ieee80211_config_default_mgmt_key,
2704 .add_beacon = ieee80211_add_beacon, 2685 .start_ap = ieee80211_start_ap,
2705 .set_beacon = ieee80211_set_beacon, 2686 .change_beacon = ieee80211_change_beacon,
2706 .del_beacon = ieee80211_del_beacon, 2687 .stop_ap = ieee80211_stop_ap,
2707 .add_station = ieee80211_add_station, 2688 .add_station = ieee80211_add_station,
2708 .del_station = ieee80211_del_station, 2689 .del_station = ieee80211_del_station,
2709 .change_station = ieee80211_change_station, 2690 .change_station = ieee80211_change_station,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 889c3e93e0f4..e00ce8c3e28e 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include <net/cfg80211.h>
6#include "ieee80211_i.h" 7#include "ieee80211_i.h"
7 8
8static enum ieee80211_chan_mode 9static enum ieee80211_chan_mode
@@ -20,23 +21,29 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
20 if (!ieee80211_sdata_running(sdata)) 21 if (!ieee80211_sdata_running(sdata))
21 continue; 22 continue;
22 23
23 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) 24 switch (sdata->vif.type) {
25 case NL80211_IFTYPE_MONITOR:
24 continue; 26 continue;
25 27 case NL80211_IFTYPE_STATION:
26 if (sdata->vif.type == NL80211_IFTYPE_STATION && 28 if (!sdata->u.mgd.associated)
27 !sdata->u.mgd.associated) 29 continue;
28 continue; 30 break;
29 31 case NL80211_IFTYPE_ADHOC:
30 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
31 if (!sdata->u.ibss.ssid_len) 32 if (!sdata->u.ibss.ssid_len)
32 continue; 33 continue;
33 if (!sdata->u.ibss.fixed_channel) 34 if (!sdata->u.ibss.fixed_channel)
34 return CHAN_MODE_HOPPING; 35 return CHAN_MODE_HOPPING;
35 } 36 break;
36 37 case NL80211_IFTYPE_AP_VLAN:
37 if (sdata->vif.type == NL80211_IFTYPE_AP && 38 /* will also have _AP interface */
38 !sdata->u.ap.beacon)
39 continue; 39 continue;
40 case NL80211_IFTYPE_AP:
41 if (!sdata->u.ap.beacon)
42 continue;
43 break;
44 default:
45 break;
46 }
40 47
41 return CHAN_MODE_FIXED; 48 return CHAN_MODE_FIXED;
42 } 49 }
@@ -128,3 +135,29 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
128 135
129 return result; 136 return result;
130} 137}
138
139/*
140 * ieee80211_get_tx_channel_type returns the channel type we should
141 * use for packet transmission, given the channel capability and
142 * whatever regulatory flags we have been given.
143 */
144enum nl80211_channel_type ieee80211_get_tx_channel_type(
145 struct ieee80211_local *local,
146 enum nl80211_channel_type channel_type)
147{
148 switch (channel_type) {
149 case NL80211_CHAN_HT40PLUS:
150 if (local->hw.conf.channel->flags &
151 IEEE80211_CHAN_NO_HT40PLUS)
152 return NL80211_CHAN_HT20;
153 break;
154 case NL80211_CHAN_HT40MINUS:
155 if (local->hw.conf.channel->flags &
156 IEEE80211_CHAN_NO_HT40MINUS)
157 return NL80211_CHAN_HT20;
158 break;
159 default:
160 break;
161 }
162 return channel_type;
163}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 90baea53e7c5..cc5b7a6e7e0b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -97,85 +97,6 @@ static const struct file_operations reset_ops = {
97 .llseek = noop_llseek, 97 .llseek = noop_llseek,
98}; 98};
99 99
100static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
101 size_t count, loff_t *ppos)
102{
103 struct ieee80211_local *local = file->private_data;
104 return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
105 local->uapsd_queues);
106}
107
108static ssize_t uapsd_queues_write(struct file *file,
109 const char __user *user_buf,
110 size_t count, loff_t *ppos)
111{
112 struct ieee80211_local *local = file->private_data;
113 u8 val;
114 int ret;
115
116 ret = kstrtou8_from_user(user_buf, count, 0, &val);
117 if (ret)
118 return ret;
119
120 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
121 return -ERANGE;
122
123 local->uapsd_queues = val;
124
125 return count;
126}
127
128static const struct file_operations uapsd_queues_ops = {
129 .read = uapsd_queues_read,
130 .write = uapsd_queues_write,
131 .open = mac80211_open_file_generic,
132 .llseek = default_llseek,
133};
134
135static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
136 size_t count, loff_t *ppos)
137{
138 struct ieee80211_local *local = file->private_data;
139
140 return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
141 local->uapsd_max_sp_len);
142}
143
144static ssize_t uapsd_max_sp_len_write(struct file *file,
145 const char __user *user_buf,
146 size_t count, loff_t *ppos)
147{
148 struct ieee80211_local *local = file->private_data;
149 unsigned long val;
150 char buf[10];
151 size_t len;
152 int ret;
153
154 len = min(count, sizeof(buf) - 1);
155 if (copy_from_user(buf, user_buf, len))
156 return -EFAULT;
157 buf[len] = '\0';
158
159 ret = kstrtoul(buf, 0, &val);
160
161 if (ret)
162 return -EINVAL;
163
164 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
165 return -ERANGE;
166
167 local->uapsd_max_sp_len = val;
168
169 return count;
170}
171
172static const struct file_operations uapsd_max_sp_len_ops = {
173 .read = uapsd_max_sp_len_read,
174 .write = uapsd_max_sp_len_write,
175 .open = mac80211_open_file_generic,
176 .llseek = default_llseek,
177};
178
179static ssize_t channel_type_read(struct file *file, char __user *user_buf, 100static ssize_t channel_type_read(struct file *file, char __user *user_buf,
180 size_t count, loff_t *ppos) 101 size_t count, loff_t *ppos)
181{ 102{
@@ -247,8 +168,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
247 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n"); 168 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
248 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE) 169 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
249 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n"); 170 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
250 if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
251 sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
252 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) 171 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
253 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n"); 172 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
254 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) 173 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
@@ -259,14 +178,14 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
259 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n"); 178 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
260 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 179 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
261 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n"); 180 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
262 if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
263 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
264 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK) 181 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
265 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n"); 182 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
266 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) 183 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
267 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); 184 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
268 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW) 185 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
269 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n"); 186 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
187 if (local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)
188 sf += snprintf(buf + sf, mxln - sf, "SCAN_WHILE_IDLE\n");
270 189
271 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 190 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
272 kfree(buf); 191 kfree(buf);
@@ -364,8 +283,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
364 DEBUGFS_ADD(wep_iv); 283 DEBUGFS_ADD(wep_iv);
365 DEBUGFS_ADD(queues); 284 DEBUGFS_ADD(queues);
366 DEBUGFS_ADD_MODE(reset, 0200); 285 DEBUGFS_ADD_MODE(reset, 0200);
367 DEBUGFS_ADD(uapsd_queues);
368 DEBUGFS_ADD(uapsd_max_sp_len);
369 DEBUGFS_ADD(channel_type); 286 DEBUGFS_ADD(channel_type);
370 DEBUGFS_ADD(hwflags); 287 DEBUGFS_ADD(hwflags);
371 DEBUGFS_ADD(user_power); 288 DEBUGFS_ADD(user_power);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 176c08ffb13c..a32eeda04aa3 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -49,16 +49,15 @@ static ssize_t ieee80211_if_write(
49 size_t count, loff_t *ppos, 49 size_t count, loff_t *ppos,
50 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) 50 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
51{ 51{
52 u8 *buf; 52 char buf[64];
53 ssize_t ret; 53 ssize_t ret;
54 54
55 buf = kmalloc(count, GFP_KERNEL); 55 if (count >= sizeof(buf))
56 if (!buf) 56 return -E2BIG;
57 return -ENOMEM;
58 57
59 ret = -EFAULT;
60 if (copy_from_user(buf, userbuf, count)) 58 if (copy_from_user(buf, userbuf, count))
61 goto freebuf; 59 return -EFAULT;
60 buf[count] = '\0';
62 61
63 ret = -ENODEV; 62 ret = -ENODEV;
64 rtnl_lock(); 63 rtnl_lock();
@@ -66,8 +65,6 @@ static ssize_t ieee80211_if_write(
66 ret = (*write)(sdata, buf, count); 65 ret = (*write)(sdata, buf, count);
67 rtnl_unlock(); 66 rtnl_unlock();
68 67
69freebuf:
70 kfree(buf);
71 return ret; 68 return ret;
72} 69}
73 70
@@ -87,6 +84,21 @@ static ssize_t ieee80211_if_fmt_##name( \
87#define IEEE80211_IF_FMT_SIZE(name, field) \ 84#define IEEE80211_IF_FMT_SIZE(name, field) \
88 IEEE80211_IF_FMT(name, field, "%zd\n") 85 IEEE80211_IF_FMT(name, field, "%zd\n")
89 86
87#define IEEE80211_IF_FMT_HEXARRAY(name, field) \
88static ssize_t ieee80211_if_fmt_##name( \
89 const struct ieee80211_sub_if_data *sdata, \
90 char *buf, int buflen) \
91{ \
92 char *p = buf; \
93 int i; \
94 for (i = 0; i < sizeof(sdata->field); i++) { \
95 p += scnprintf(p, buflen + buf - p, "%.2x ", \
96 sdata->field[i]); \
97 } \
98 p += scnprintf(p, buflen + buf - p, "\n"); \
99 return p - buf; \
100}
101
90#define IEEE80211_IF_FMT_ATOMIC(name, field) \ 102#define IEEE80211_IF_FMT_ATOMIC(name, field) \
91static ssize_t ieee80211_if_fmt_##name( \ 103static ssize_t ieee80211_if_fmt_##name( \
92 const struct ieee80211_sub_if_data *sdata, \ 104 const struct ieee80211_sub_if_data *sdata, \
@@ -148,6 +160,11 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
148 HEX); 160 HEX);
149IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], 161IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
150 HEX); 162 HEX);
163IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
164 rc_rateidx_mcs_mask[IEEE80211_BAND_2GHZ], HEXARRAY);
165IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
166 rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
167
151IEEE80211_IF_FILE(flags, flags, HEX); 168IEEE80211_IF_FILE(flags, flags, HEX);
152IEEE80211_IF_FILE(state, state, LHEX); 169IEEE80211_IF_FILE(state, state, LHEX);
153IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC); 170IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
@@ -320,6 +337,62 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
320 337
321__IEEE80211_IF_FILE_W(tkip_mic_test); 338__IEEE80211_IF_FILE_W(tkip_mic_test);
322 339
340static ssize_t ieee80211_if_fmt_uapsd_queues(
341 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
342{
343 const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
344
345 return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_queues);
346}
347
348static ssize_t ieee80211_if_parse_uapsd_queues(
349 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
350{
351 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
352 u8 val;
353 int ret;
354
355 ret = kstrtou8(buf, 0, &val);
356 if (ret)
357 return ret;
358
359 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
360 return -ERANGE;
361
362 ifmgd->uapsd_queues = val;
363
364 return buflen;
365}
366__IEEE80211_IF_FILE_W(uapsd_queues);
367
368static ssize_t ieee80211_if_fmt_uapsd_max_sp_len(
369 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
370{
371 const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
372
373 return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_max_sp_len);
374}
375
376static ssize_t ieee80211_if_parse_uapsd_max_sp_len(
377 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
378{
379 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
380 unsigned long val;
381 int ret;
382
383 ret = kstrtoul(buf, 0, &val);
384 if (ret)
385 return -EINVAL;
386
387 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
388 return -ERANGE;
389
390 ifmgd->uapsd_max_sp_len = val;
391
392 return buflen;
393}
394__IEEE80211_IF_FILE_W(uapsd_max_sp_len);
395
323/* AP attributes */ 396/* AP attributes */
324IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC); 397IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC);
325IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 398IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -422,6 +495,8 @@ IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
422 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); 495 u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
423IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, 496IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
424 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); 497 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
498IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
499IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
425#endif 500#endif
426 501
427 502
@@ -441,6 +516,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
441 DEBUGFS_ADD(channel_type); 516 DEBUGFS_ADD(channel_type);
442 DEBUGFS_ADD(rc_rateidx_mask_2ghz); 517 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
443 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 518 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
519 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
520 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
444 521
445 DEBUGFS_ADD(bssid); 522 DEBUGFS_ADD(bssid);
446 DEBUGFS_ADD(aid); 523 DEBUGFS_ADD(aid);
@@ -448,6 +525,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
448 DEBUGFS_ADD(ave_beacon); 525 DEBUGFS_ADD(ave_beacon);
449 DEBUGFS_ADD_MODE(smps, 0600); 526 DEBUGFS_ADD_MODE(smps, 0600);
450 DEBUGFS_ADD_MODE(tkip_mic_test, 0200); 527 DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
528 DEBUGFS_ADD_MODE(uapsd_queues, 0600);
529 DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
451} 530}
452 531
453static void add_ap_files(struct ieee80211_sub_if_data *sdata) 532static void add_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -458,6 +537,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
458 DEBUGFS_ADD(channel_type); 537 DEBUGFS_ADD(channel_type);
459 DEBUGFS_ADD(rc_rateidx_mask_2ghz); 538 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
460 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 539 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
540 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
541 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
461 542
462 DEBUGFS_ADD(num_sta_authorized); 543 DEBUGFS_ADD(num_sta_authorized);
463 DEBUGFS_ADD(num_sta_ps); 544 DEBUGFS_ADD(num_sta_ps);
@@ -468,6 +549,12 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
468 549
469static void add_ibss_files(struct ieee80211_sub_if_data *sdata) 550static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
470{ 551{
552 DEBUGFS_ADD(channel_type);
553 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
554 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
555 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
556 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
557
471 DEBUGFS_ADD_MODE(tsf, 0600); 558 DEBUGFS_ADD_MODE(tsf, 0600);
472} 559}
473 560
@@ -479,6 +566,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
479 DEBUGFS_ADD(channel_type); 566 DEBUGFS_ADD(channel_type);
480 DEBUGFS_ADD(rc_rateidx_mask_2ghz); 567 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
481 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 568 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
569 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
570 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
482 571
483 DEBUGFS_ADD(peer); 572 DEBUGFS_ADD(peer);
484} 573}
@@ -491,6 +580,8 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
491 DEBUGFS_ADD(channel_type); 580 DEBUGFS_ADD(channel_type);
492 DEBUGFS_ADD(rc_rateidx_mask_2ghz); 581 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
493 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 582 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
583 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
584 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
494} 585}
495 586
496static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 587static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -502,11 +593,15 @@ static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
502 593
503#ifdef CONFIG_MAC80211_MESH 594#ifdef CONFIG_MAC80211_MESH
504 595
596static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
597{
598 DEBUGFS_ADD_MODE(tsf, 0600);
599}
600
505static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) 601static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
506{ 602{
507 struct dentry *dir = debugfs_create_dir("mesh_stats", 603 struct dentry *dir = debugfs_create_dir("mesh_stats",
508 sdata->debugfs.dir); 604 sdata->debugfs.dir);
509
510#define MESHSTATS_ADD(name)\ 605#define MESHSTATS_ADD(name)\
511 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); 606 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
512 607
@@ -546,6 +641,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
546 MESHPARAMS_ADD(dot11MeshHWMPRootMode); 641 MESHPARAMS_ADD(dot11MeshHWMPRootMode);
547 MESHPARAMS_ADD(dot11MeshHWMPRannInterval); 642 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
548 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); 643 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
644 MESHPARAMS_ADD(rssi_threshold);
549#undef MESHPARAMS_ADD 645#undef MESHPARAMS_ADD
550} 646}
551#endif 647#endif
@@ -558,6 +654,7 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
558 switch (sdata->vif.type) { 654 switch (sdata->vif.type) {
559 case NL80211_IFTYPE_MESH_POINT: 655 case NL80211_IFTYPE_MESH_POINT:
560#ifdef CONFIG_MAC80211_MESH 656#ifdef CONFIG_MAC80211_MESH
657 add_mesh_files(sdata);
561 add_mesh_stats(sdata); 658 add_mesh_stats(sdata);
562 add_mesh_config(sdata); 659 add_mesh_config(sdata);
563#endif 660#endif
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d86217d56bd7..6d45804d09bc 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,14 +63,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 64
65 int res = scnprintf(buf, sizeof(buf), 65 int res = scnprintf(buf, sizeof(buf),
66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 TEST(PS_DRIVER), TEST(AUTHORIZED), 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 TEST(SHORT_PREAMBLE), 69 TEST(SHORT_PREAMBLE),
70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH), TEST(RATE_CONTROL)); 73 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
74 TEST(INSERTED), TEST(RATE_CONTROL));
74#undef TEST 75#undef TEST
75 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 76 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
76} 77}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index e8960ae39861..af4691fed645 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -168,41 +168,6 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
168 trace_drv_return_void(local); 168 trace_drv_return_void(local);
169} 169}
170 170
171static inline int drv_tx_sync(struct ieee80211_local *local,
172 struct ieee80211_sub_if_data *sdata,
173 const u8 *bssid,
174 enum ieee80211_tx_sync_type type)
175{
176 int ret = 0;
177
178 might_sleep();
179
180 check_sdata_in_driver(sdata);
181
182 trace_drv_tx_sync(local, sdata, bssid, type);
183 if (local->ops->tx_sync)
184 ret = local->ops->tx_sync(&local->hw, &sdata->vif,
185 bssid, type);
186 trace_drv_return_int(local, ret);
187 return ret;
188}
189
190static inline void drv_finish_tx_sync(struct ieee80211_local *local,
191 struct ieee80211_sub_if_data *sdata,
192 const u8 *bssid,
193 enum ieee80211_tx_sync_type type)
194{
195 might_sleep();
196
197 check_sdata_in_driver(sdata);
198
199 trace_drv_finish_tx_sync(local, sdata, bssid, type);
200 if (local->ops->finish_tx_sync)
201 local->ops->finish_tx_sync(&local->hw, &sdata->vif,
202 bssid, type);
203 trace_drv_return_void(local);
204}
205
206static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 171static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
207 struct netdev_hw_addr_list *mc_list) 172 struct netdev_hw_addr_list *mc_list)
208{ 173{
@@ -253,6 +218,7 @@ static inline int drv_set_key(struct ieee80211_local *local,
253 218
254 might_sleep(); 219 might_sleep();
255 220
221 sdata = get_bss_sdata(sdata);
256 check_sdata_in_driver(sdata); 222 check_sdata_in_driver(sdata);
257 223
258 trace_drv_set_key(local, cmd, sdata, sta, key); 224 trace_drv_set_key(local, cmd, sdata, sta, key);
@@ -272,6 +238,7 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
272 if (sta) 238 if (sta)
273 ista = &sta->sta; 239 ista = &sta->sta;
274 240
241 sdata = get_bss_sdata(sdata);
275 check_sdata_in_driver(sdata); 242 check_sdata_in_driver(sdata);
276 243
277 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); 244 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
@@ -476,6 +443,37 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
476 trace_drv_return_void(local); 443 trace_drv_return_void(local);
477} 444}
478 445
446static inline __must_check
447int drv_sta_state(struct ieee80211_local *local,
448 struct ieee80211_sub_if_data *sdata,
449 struct sta_info *sta,
450 enum ieee80211_sta_state old_state,
451 enum ieee80211_sta_state new_state)
452{
453 int ret = 0;
454
455 might_sleep();
456
457 sdata = get_bss_sdata(sdata);
458 check_sdata_in_driver(sdata);
459
460 trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
461 if (local->ops->sta_state) {
462 ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
463 old_state, new_state);
464 } else if (old_state == IEEE80211_STA_AUTH &&
465 new_state == IEEE80211_STA_ASSOC) {
466 ret = drv_sta_add(local, sdata, &sta->sta);
467 if (ret == 0)
468 sta->uploaded = true;
469 } else if (old_state == IEEE80211_STA_ASSOC &&
470 new_state == IEEE80211_STA_AUTH) {
471 drv_sta_remove(local, sdata, &sta->sta);
472 }
473 trace_drv_return_int(local, ret);
474 return ret;
475}
476
479static inline int drv_conf_tx(struct ieee80211_local *local, 477static inline int drv_conf_tx(struct ieee80211_local *local,
480 struct ieee80211_sub_if_data *sdata, u16 queue, 478 struct ieee80211_sub_if_data *sdata, u16 queue,
481 const struct ieee80211_tx_queue_params *params) 479 const struct ieee80211_tx_queue_params *params)
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6e9df8fd8fb8..21d6f5290a1c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -296,7 +296,7 @@ TRACE_EVENT(drv_bss_info_changed,
296 __entry->dtimper = info->dtim_period; 296 __entry->dtimper = info->dtim_period;
297 __entry->bcnint = info->beacon_int; 297 __entry->bcnint = info->beacon_int;
298 __entry->assoc_cap = info->assoc_capability; 298 __entry->assoc_cap = info->assoc_capability;
299 __entry->timestamp = info->timestamp; 299 __entry->timestamp = info->last_tsf;
300 __entry->basic_rates = info->basic_rates; 300 __entry->basic_rates = info->basic_rates;
301 __entry->enable_beacon = info->enable_beacon; 301 __entry->enable_beacon = info->enable_beacon;
302 __entry->ht_operation_mode = info->ht_operation_mode; 302 __entry->ht_operation_mode = info->ht_operation_mode;
@@ -308,49 +308,6 @@ TRACE_EVENT(drv_bss_info_changed,
308 ) 308 )
309); 309);
310 310
311DECLARE_EVENT_CLASS(tx_sync_evt,
312 TP_PROTO(struct ieee80211_local *local,
313 struct ieee80211_sub_if_data *sdata,
314 const u8 *bssid,
315 enum ieee80211_tx_sync_type type),
316 TP_ARGS(local, sdata, bssid, type),
317
318 TP_STRUCT__entry(
319 LOCAL_ENTRY
320 VIF_ENTRY
321 __array(char, bssid, ETH_ALEN)
322 __field(u32, sync_type)
323 ),
324
325 TP_fast_assign(
326 LOCAL_ASSIGN;
327 VIF_ASSIGN;
328 memcpy(__entry->bssid, bssid, ETH_ALEN);
329 __entry->sync_type = type;
330 ),
331
332 TP_printk(
333 LOCAL_PR_FMT VIF_PR_FMT " bssid:%pM type:%d",
334 LOCAL_PR_ARG, VIF_PR_ARG, __entry->bssid, __entry->sync_type
335 )
336);
337
338DEFINE_EVENT(tx_sync_evt, drv_tx_sync,
339 TP_PROTO(struct ieee80211_local *local,
340 struct ieee80211_sub_if_data *sdata,
341 const u8 *bssid,
342 enum ieee80211_tx_sync_type type),
343 TP_ARGS(local, sdata, bssid, type)
344);
345
346DEFINE_EVENT(tx_sync_evt, drv_finish_tx_sync,
347 TP_PROTO(struct ieee80211_local *local,
348 struct ieee80211_sub_if_data *sdata,
349 const u8 *bssid,
350 enum ieee80211_tx_sync_type type),
351 TP_ARGS(local, sdata, bssid, type)
352);
353
354TRACE_EVENT(drv_prepare_multicast, 311TRACE_EVENT(drv_prepare_multicast,
355 TP_PROTO(struct ieee80211_local *local, int mc_count), 312 TP_PROTO(struct ieee80211_local *local, int mc_count),
356 313
@@ -635,6 +592,38 @@ TRACE_EVENT(drv_sta_notify,
635 ) 592 )
636); 593);
637 594
595TRACE_EVENT(drv_sta_state,
596 TP_PROTO(struct ieee80211_local *local,
597 struct ieee80211_sub_if_data *sdata,
598 struct ieee80211_sta *sta,
599 enum ieee80211_sta_state old_state,
600 enum ieee80211_sta_state new_state),
601
602 TP_ARGS(local, sdata, sta, old_state, new_state),
603
604 TP_STRUCT__entry(
605 LOCAL_ENTRY
606 VIF_ENTRY
607 STA_ENTRY
608 __field(u32, old_state)
609 __field(u32, new_state)
610 ),
611
612 TP_fast_assign(
613 LOCAL_ASSIGN;
614 VIF_ASSIGN;
615 STA_ASSIGN;
616 __entry->old_state = old_state;
617 __entry->new_state = new_state;
618 ),
619
620 TP_printk(
621 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " state: %d->%d",
622 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG,
623 __entry->old_state, __entry->new_state
624 )
625);
626
638TRACE_EVENT(drv_sta_add, 627TRACE_EVENT(drv_sta_add,
639 TP_PROTO(struct ieee80211_local *local, 628 TP_PROTO(struct ieee80211_local *local,
640 struct ieee80211_sub_if_data *sdata, 629 struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a4643969a13b..33fd8d9f714e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -20,7 +20,6 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <net/mac80211.h> 22#include <net/mac80211.h>
23#include <asm/unaligned.h>
24 23
25#include "ieee80211_i.h" 24#include "ieee80211_i.h"
26#include "driver-ops.h" 25#include "driver-ops.h"
@@ -36,31 +35,6 @@
36#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 35#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
37 36
38 37
39static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
40 struct ieee80211_mgmt *mgmt,
41 size_t len)
42{
43 u16 auth_alg, auth_transaction;
44
45 lockdep_assert_held(&sdata->u.ibss.mtx);
46
47 if (len < 24 + 6)
48 return;
49
50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
52
53 /*
54 * IEEE 802.11 standard does not require authentication in IBSS
55 * networks and most implementations do not seem to use it.
56 * However, try to reply to authentication attempts if someone
57 * has actually implemented this.
58 */
59 if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1)
60 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
61 sdata->u.ibss.bssid, NULL, 0, 0);
62}
63
64static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 38static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
65 const u8 *bssid, const int beacon_int, 39 const u8 *bssid, const int beacon_int,
66 struct ieee80211_channel *chan, 40 struct ieee80211_channel *chan,
@@ -92,7 +66,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
92 skb_reset_tail_pointer(skb); 66 skb_reset_tail_pointer(skb);
93 skb_reserve(skb, sdata->local->hw.extra_tx_headroom); 67 skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
94 68
95 if (memcmp(ifibss->bssid, bssid, ETH_ALEN)) 69 if (compare_ether_addr(ifibss->bssid, bssid))
96 sta_info_flush(sdata->local, sdata); 70 sta_info_flush(sdata->local, sdata);
97 71
98 /* if merging, indicate to driver that we leave the old IBSS */ 72 /* if merging, indicate to driver that we leave the old IBSS */
@@ -276,7 +250,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
276 cbss->tsf); 250 cbss->tsf);
277} 251}
278 252
279static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) 253static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
254 bool auth)
280 __acquires(RCU) 255 __acquires(RCU)
281{ 256{
282 struct ieee80211_sub_if_data *sdata = sta->sdata; 257 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,22 +265,34 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
290 addr, sdata->name); 265 addr, sdata->name);
291#endif 266#endif
292 267
293 sta_info_move_state(sta, IEEE80211_STA_AUTH); 268 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
294 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 269 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
295 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 270 /* authorize the station only if the network is not RSN protected. If
271 * not wait for the userspace to authorize it */
272 if (!sta->sdata->u.ibss.control_port)
273 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
296 274
297 rate_control_rate_init(sta); 275 rate_control_rate_init(sta);
298 276
299 /* If it fails, maybe we raced another insertion? */ 277 /* If it fails, maybe we raced another insertion? */
300 if (sta_info_insert_rcu(sta)) 278 if (sta_info_insert_rcu(sta))
301 return sta_info_get(sdata, addr); 279 return sta_info_get(sdata, addr);
280 if (auth) {
281#ifdef CONFIG_MAC80211_IBSS_DEBUG
282 printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM"
283 "(auth_transaction=1)\n", sdata->vif.addr,
284 sdata->u.ibss.bssid, addr);
285#endif
286 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
287 addr, sdata->u.ibss.bssid, NULL, 0, 0);
288 }
302 return sta; 289 return sta;
303} 290}
304 291
305static struct sta_info * 292static struct sta_info *
306ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 293ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
307 const u8 *bssid, const u8 *addr, 294 const u8 *bssid, const u8 *addr,
308 u32 supp_rates) 295 u32 supp_rates, bool auth)
309 __acquires(RCU) 296 __acquires(RCU)
310{ 297{
311 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 298 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -347,7 +334,42 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
347 sta->sta.supp_rates[band] = supp_rates | 334 sta->sta.supp_rates[band] = supp_rates |
348 ieee80211_mandatory_rates(local, band); 335 ieee80211_mandatory_rates(local, band);
349 336
350 return ieee80211_ibss_finish_sta(sta); 337 return ieee80211_ibss_finish_sta(sta, auth);
338}
339
340static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
341 struct ieee80211_mgmt *mgmt,
342 size_t len)
343{
344 u16 auth_alg, auth_transaction;
345
346 lockdep_assert_held(&sdata->u.ibss.mtx);
347
348 if (len < 24 + 6)
349 return;
350
351 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
352 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
353
354 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
355 return;
356#ifdef CONFIG_MAC80211_IBSS_DEBUG
357 printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM."
358 "(auth_transaction=%d)\n",
359 sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
360#endif
361 sta_info_destroy_addr(sdata, mgmt->sa);
362 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
363 rcu_read_unlock();
364
365 /*
366 * IEEE 802.11 standard does not require authentication in IBSS
367 * networks and most implementations do not seem to use it.
368 * However, try to reply to authentication attempts if someone
369 * has actually implemented this.
370 */
371 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
372 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0);
351} 373}
352 374
353static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 375static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -381,7 +403,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
381 return; 403 return;
382 404
383 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 405 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
384 memcmp(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) { 406 compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) {
385 407
386 rcu_read_lock(); 408 rcu_read_lock();
387 sta = sta_info_get(sdata, mgmt->sa); 409 sta = sta_info_get(sdata, mgmt->sa);
@@ -412,7 +434,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
412 } else { 434 } else {
413 rcu_read_unlock(); 435 rcu_read_unlock();
414 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, 436 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
415 mgmt->sa, supp_rates); 437 mgmt->sa, supp_rates, true);
416 } 438 }
417 } 439 }
418 440
@@ -486,7 +508,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
486 goto put_bss; 508 goto put_bss;
487 509
488 /* same BSSID */ 510 /* same BSSID */
489 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 511 if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0)
490 goto put_bss; 512 goto put_bss;
491 513
492 if (rx_status->flag & RX_FLAG_MACTIME_MPDU) { 514 if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
@@ -540,7 +562,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
540 ieee80211_sta_join_ibss(sdata, bss); 562 ieee80211_sta_join_ibss(sdata, bss);
541 supp_rates = ieee80211_sta_get_rates(local, elems, band); 563 supp_rates = ieee80211_sta_get_rates(local, elems, band);
542 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 564 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
543 supp_rates); 565 supp_rates, true);
544 rcu_read_unlock(); 566 rcu_read_unlock();
545 } 567 }
546 568
@@ -643,8 +665,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
643 "IBSS networks with same SSID (merge)\n", sdata->name); 665 "IBSS networks with same SSID (merge)\n", sdata->name);
644 666
645 ieee80211_request_internal_scan(sdata, 667 ieee80211_request_internal_scan(sdata,
646 ifibss->ssid, ifibss->ssid_len, 668 ifibss->ssid, ifibss->ssid_len, NULL);
647 ifibss->fixed_channel ? ifibss->channel : NULL);
648} 669}
649 670
650static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 671static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -810,8 +831,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
810 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) 831 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
811 return; 832 return;
812 833
813 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 && 834 if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 &&
814 memcmp(mgmt->bssid, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0) 835 !is_broadcast_ether_addr(mgmt->bssid))
815 return; 836 return;
816 837
817 end = ((u8 *) mgmt) + len; 838 end = ((u8 *) mgmt) + len;
@@ -855,9 +876,6 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
855 size_t baselen; 876 size_t baselen;
856 struct ieee802_11_elems elems; 877 struct ieee802_11_elems elems;
857 878
858 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
859 return; /* ignore ProbeResp to foreign address */
860
861 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 879 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
862 if (baselen > len) 880 if (baselen > len)
863 return; 881 return;
@@ -945,7 +963,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
945 list_del(&sta->list); 963 list_del(&sta->list);
946 spin_unlock_bh(&ifibss->incomplete_lock); 964 spin_unlock_bh(&ifibss->incomplete_lock);
947 965
948 ieee80211_ibss_finish_sta(sta); 966 ieee80211_ibss_finish_sta(sta, true);
949 rcu_read_unlock(); 967 rcu_read_unlock();
950 spin_lock_bh(&ifibss->incomplete_lock); 968 spin_lock_bh(&ifibss->incomplete_lock);
951 } 969 }
@@ -1059,6 +1077,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1059 sdata->u.ibss.fixed_bssid = false; 1077 sdata->u.ibss.fixed_bssid = false;
1060 1078
1061 sdata->u.ibss.privacy = params->privacy; 1079 sdata->u.ibss.privacy = params->privacy;
1080 sdata->u.ibss.control_port = params->control_port;
1062 sdata->u.ibss.basic_rates = params->basic_rates; 1081 sdata->u.ibss.basic_rates = params->basic_rates;
1063 memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, 1082 memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
1064 sizeof(params->mcast_rate)); 1083 sizeof(params->mcast_rate));
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2f0642d9e154..d9798a307f20 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -105,6 +105,44 @@ struct ieee80211_bss {
105 */ 105 */
106 bool has_erp_value; 106 bool has_erp_value;
107 u8 erp_value; 107 u8 erp_value;
108
109 /* Keep track of the corruption of the last beacon/probe response. */
110 u8 corrupt_data;
111
112 /* Keep track of what bits of information we have valid info for. */
113 u8 valid_data;
114};
115
116/**
117 * enum ieee80211_corrupt_data_flags - BSS data corruption flags
118 * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
119 * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
120 *
121 * These are bss flags that are attached to a bss in the
122 * @corrupt_data field of &struct ieee80211_bss.
123 */
124enum ieee80211_bss_corrupt_data_flags {
125 IEEE80211_BSS_CORRUPT_BEACON = BIT(0),
126 IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1)
127};
128
129/**
130 * enum ieee80211_valid_data_flags - BSS valid data flags
131 * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
132 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
133 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
134 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
135 *
136 * These are bss flags that are attached to a bss in the
137 * @valid_data field of &struct ieee80211_bss. They show which parts
138 * of the data structure were recieved as a result of an un-corrupted
139 * beacon/probe response.
140 */
141enum ieee80211_bss_valid_data_flags {
142 IEEE80211_BSS_VALID_DTIM = BIT(0),
143 IEEE80211_BSS_VALID_WMM = BIT(1),
144 IEEE80211_BSS_VALID_RATES = BIT(2),
145 IEEE80211_BSS_VALID_ERP = BIT(3)
108}; 146};
109 147
110static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss) 148static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
@@ -228,7 +266,7 @@ struct ieee80211_rx_data {
228struct beacon_data { 266struct beacon_data {
229 u8 *head, *tail; 267 u8 *head, *tail;
230 int head_len, tail_len; 268 int head_len, tail_len;
231 int dtim_period; 269 struct rcu_head rcu_head;
232}; 270};
233 271
234struct ieee80211_if_ap { 272struct ieee80211_if_ap {
@@ -280,10 +318,6 @@ struct mesh_preq_queue {
280 318
281enum ieee80211_work_type { 319enum ieee80211_work_type {
282 IEEE80211_WORK_ABORT, 320 IEEE80211_WORK_ABORT,
283 IEEE80211_WORK_DIRECT_PROBE,
284 IEEE80211_WORK_AUTH,
285 IEEE80211_WORK_ASSOC_BEACON_WAIT,
286 IEEE80211_WORK_ASSOC,
287 IEEE80211_WORK_REMAIN_ON_CHANNEL, 321 IEEE80211_WORK_REMAIN_ON_CHANNEL,
288 IEEE80211_WORK_OFFCHANNEL_TX, 322 IEEE80211_WORK_OFFCHANNEL_TX,
289}; 323};
@@ -316,36 +350,10 @@ struct ieee80211_work {
316 unsigned long timeout; 350 unsigned long timeout;
317 enum ieee80211_work_type type; 351 enum ieee80211_work_type type;
318 352
319 u8 filter_ta[ETH_ALEN];
320
321 bool started; 353 bool started;
322 354
323 union { 355 union {
324 struct { 356 struct {
325 int tries;
326 u16 algorithm, transaction;
327 u8 ssid[IEEE80211_MAX_SSID_LEN];
328 u8 ssid_len;
329 u8 key[WLAN_KEY_LEN_WEP104];
330 u8 key_len, key_idx;
331 bool privacy;
332 bool synced;
333 } probe_auth;
334 struct {
335 struct cfg80211_bss *bss;
336 const u8 *supp_rates;
337 const u8 *ht_information_ie;
338 enum ieee80211_smps_mode smps;
339 int tries;
340 u16 capability;
341 u8 prev_bssid[ETH_ALEN];
342 u8 ssid[IEEE80211_MAX_SSID_LEN];
343 u8 ssid_len;
344 u8 supp_rates_len;
345 bool wmm_used, use_11n, uapsd_used;
346 bool synced;
347 } assoc;
348 struct {
349 u32 duration; 357 u32 duration;
350 } remain; 358 } remain;
351 struct { 359 struct {
@@ -355,9 +363,8 @@ struct ieee80211_work {
355 } offchan_tx; 363 } offchan_tx;
356 }; 364 };
357 365
358 int ie_len; 366 size_t data_len;
359 /* must be last */ 367 u8 data[];
360 u8 ie[0];
361}; 368};
362 369
363/* flags used in struct ieee80211_if_managed.flags */ 370/* flags used in struct ieee80211_if_managed.flags */
@@ -373,6 +380,42 @@ enum ieee80211_sta_flags {
373 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), 380 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
374}; 381};
375 382
383struct ieee80211_mgd_auth_data {
384 struct cfg80211_bss *bss;
385 unsigned long timeout;
386 int tries;
387 u16 algorithm, expected_transaction;
388
389 u8 key[WLAN_KEY_LEN_WEP104];
390 u8 key_len, key_idx;
391 bool done;
392
393 size_t ie_len;
394 u8 ie[];
395};
396
397struct ieee80211_mgd_assoc_data {
398 struct cfg80211_bss *bss;
399 const u8 *supp_rates;
400 const u8 *ht_information_ie;
401
402 unsigned long timeout;
403 int tries;
404
405 u16 capability;
406 u8 prev_bssid[ETH_ALEN];
407 u8 ssid[IEEE80211_MAX_SSID_LEN];
408 u8 ssid_len;
409 u8 supp_rates_len;
410 bool wmm, uapsd;
411 bool have_beacon;
412 bool sent_assoc;
413 bool synced;
414
415 size_t ie_len;
416 u8 ie[];
417};
418
376struct ieee80211_if_managed { 419struct ieee80211_if_managed {
377 struct timer_list timer; 420 struct timer_list timer;
378 struct timer_list conn_mon_timer; 421 struct timer_list conn_mon_timer;
@@ -389,6 +432,8 @@ struct ieee80211_if_managed {
389 432
390 struct mutex mtx; 433 struct mutex mtx;
391 struct cfg80211_bss *associated; 434 struct cfg80211_bss *associated;
435 struct ieee80211_mgd_auth_data *auth_data;
436 struct ieee80211_mgd_assoc_data *assoc_data;
392 437
393 u8 bssid[ETH_ALEN]; 438 u8 bssid[ETH_ALEN];
394 439
@@ -414,6 +459,20 @@ struct ieee80211_if_managed {
414 IEEE80211_MFP_REQUIRED 459 IEEE80211_MFP_REQUIRED
415 } mfp; /* management frame protection */ 460 } mfp; /* management frame protection */
416 461
462 /*
463 * Bitmask of enabled u-apsd queues,
464 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
465 * to take effect.
466 */
467 unsigned int uapsd_queues;
468
469 /*
470 * Maximum number of buffered frames AP can deliver during a
471 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
472 * Needs a new association to take effect.
473 */
474 unsigned int uapsd_max_sp_len;
475
417 int wmm_last_param_set; 476 int wmm_last_param_set;
418 477
419 u8 use_4addr; 478 u8 use_4addr;
@@ -470,7 +529,9 @@ struct ieee80211_if_ibss {
470 bool fixed_channel; 529 bool fixed_channel;
471 bool privacy; 530 bool privacy;
472 531
473 u8 bssid[ETH_ALEN]; 532 bool control_port;
533
534 u8 bssid[ETH_ALEN] __aligned(2);
474 u8 ssid[IEEE80211_MAX_SSID_LEN]; 535 u8 ssid[IEEE80211_MAX_SSID_LEN];
475 u8 ssid_len, ie_len; 536 u8 ssid_len, ie_len;
476 u8 *ie; 537 u8 *ie;
@@ -646,6 +707,7 @@ struct ieee80211_sub_if_data {
646 707
647 /* bitmap of allowed (non-MCS) rate indexes for rate control */ 708 /* bitmap of allowed (non-MCS) rate indexes for rate control */
648 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS]; 709 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
710 u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
649 711
650 union { 712 union {
651 struct ieee80211_if_ap ap; 713 struct ieee80211_if_ap ap;
@@ -769,7 +831,6 @@ struct ieee80211_local {
769 struct list_head work_list; 831 struct list_head work_list;
770 struct timer_list work_timer; 832 struct timer_list work_timer;
771 struct work_struct work_work; 833 struct work_struct work_work;
772 struct sk_buff_head work_skb_queue;
773 834
774 /* 835 /*
775 * private workqueue to mac80211. mac80211 makes this accessible 836 * private workqueue to mac80211. mac80211 makes this accessible
@@ -970,20 +1031,6 @@ struct ieee80211_local {
970 */ 1031 */
971 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 1032 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
972 1033
973 /*
974 * Bitmask of enabled u-apsd queues,
975 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
976 * to take effect.
977 */
978 unsigned int uapsd_queues;
979
980 /*
981 * Maximum number of buffered frames AP can deliver during a
982 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
983 * Needs a new association to take effect.
984 */
985 unsigned int uapsd_max_sp_len;
986
987 bool pspolling; 1034 bool pspolling;
988 bool offchannel_ps_enabled; 1035 bool offchannel_ps_enabled;
989 /* 1036 /*
@@ -1110,6 +1157,9 @@ struct ieee802_11_elems {
1110 u8 quiet_elem_len; 1157 u8 quiet_elem_len;
1111 u8 num_of_quiet_elem; /* can be more the one */ 1158 u8 num_of_quiet_elem; /* can be more the one */
1112 u8 timeout_int_len; 1159 u8 timeout_int_len;
1160
1161 /* whether a parse error occurred while retrieving these elements */
1162 bool parse_error;
1113}; 1163};
1114 1164
1115static inline struct ieee80211_local *hw_to_local( 1165static inline struct ieee80211_local *hw_to_local(
@@ -1118,12 +1168,6 @@ static inline struct ieee80211_local *hw_to_local(
1118 return container_of(hw, struct ieee80211_local, hw); 1168 return container_of(hw, struct ieee80211_local, hw);
1119} 1169}
1120 1170
1121static inline struct ieee80211_hw *local_to_hw(
1122 struct ieee80211_local *local)
1123{
1124 return &local->hw;
1125}
1126
1127 1171
1128static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 1172static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
1129{ 1173{
@@ -1146,11 +1190,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
1146int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 1190int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1147 struct cfg80211_assoc_request *req); 1191 struct cfg80211_assoc_request *req);
1148int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 1192int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
1149 struct cfg80211_deauth_request *req, 1193 struct cfg80211_deauth_request *req);
1150 void *cookie);
1151int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 1194int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
1152 struct cfg80211_disassoc_request *req, 1195 struct cfg80211_disassoc_request *req);
1153 void *cookie);
1154void ieee80211_send_pspoll(struct ieee80211_local *local, 1196void ieee80211_send_pspoll(struct ieee80211_local *local,
1155 struct ieee80211_sub_if_data *sdata); 1197 struct ieee80211_sub_if_data *sdata);
1156void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); 1198void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
@@ -1168,6 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1168 struct sk_buff *skb); 1210 struct sk_buff *skb);
1169void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1170void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1213void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata);
1171 1214
1172/* IBSS code */ 1215/* IBSS code */
1173void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1345,7 +1388,8 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
1345void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 1388void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
1346 struct ieee80211_hdr *hdr, const u8 *tsc, 1389 struct ieee80211_hdr *hdr, const u8 *tsc,
1347 gfp_t gfp); 1390 gfp_t gfp);
1348void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); 1391void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
1392 bool bss_notify);
1349void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1393void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1350 1394
1351void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 1395void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
@@ -1396,7 +1440,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
1396void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1440void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1397 u16 transaction, u16 auth_alg, 1441 u16 transaction, u16 auth_alg,
1398 u8 *extra, size_t extra_len, const u8 *bssid, 1442 u8 *extra, size_t extra_len, const u8 *bssid,
1399 const u8 *key, u8 key_len, u8 key_idx); 1443 const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
1400int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1444int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1401 const u8 *ie, size_t ie_len, 1445 const u8 *ie, size_t ie_len,
1402 enum ieee80211_band band, u32 rate_mask, 1446 enum ieee80211_band band, u32 rate_mask,
@@ -1436,8 +1480,6 @@ void ieee80211_work_init(struct ieee80211_local *local);
1436void ieee80211_add_work(struct ieee80211_work *wk); 1480void ieee80211_add_work(struct ieee80211_work *wk);
1437void free_work(struct ieee80211_work *wk); 1481void free_work(struct ieee80211_work *wk);
1438void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata); 1482void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1439ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1440 struct sk_buff *skb);
1441int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, 1483int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1442 struct ieee80211_channel *chan, 1484 struct ieee80211_channel *chan,
1443 enum nl80211_channel_type channel_type, 1485 enum nl80211_channel_type channel_type,
@@ -1460,6 +1502,9 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
1460 enum nl80211_channel_type chantype); 1502 enum nl80211_channel_type chantype);
1461enum nl80211_channel_type 1503enum nl80211_channel_type
1462ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); 1504ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
1505enum nl80211_channel_type ieee80211_get_tx_channel_type(
1506 struct ieee80211_local *local,
1507 enum nl80211_channel_type channel_type);
1463 1508
1464#ifdef CONFIG_MAC80211_NOINLINE 1509#ifdef CONFIG_MAC80211_NOINLINE
1465#define debug_noinline noinline 1510#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8e2137bd87e2..401c01f0731e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -304,7 +304,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
304 * need to initialise the hardware if the hardware 304 * need to initialise the hardware if the hardware
305 * doesn't start up with sane defaults 305 * doesn't start up with sane defaults
306 */ 306 */
307 ieee80211_set_wmm_default(sdata); 307 ieee80211_set_wmm_default(sdata, true);
308 } 308 }
309 309
310 set_bit(SDATA_STATE_RUNNING, &sdata->state); 310 set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -318,9 +318,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
318 goto err_del_interface; 318 goto err_del_interface;
319 } 319 }
320 320
321 sta_info_move_state(sta, IEEE80211_STA_AUTH); 321 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
322 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 322 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
323 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 323 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
324 324
325 res = sta_info_insert(sta); 325 res = sta_info_insert(sta);
326 if (res) { 326 if (res) {
@@ -644,6 +644,8 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
644 644
645 if (ieee80211_vif_is_mesh(&sdata->vif)) 645 if (ieee80211_vif_is_mesh(&sdata->vif))
646 mesh_rmc_free(sdata); 646 mesh_rmc_free(sdata);
647 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
648 ieee80211_mgd_teardown(sdata);
647 649
648 flushed = sta_info_flush(local, sdata); 650 flushed = sta_info_flush(local, sdata);
649 WARN_ON(flushed); 651 WARN_ON(flushed);
@@ -1181,6 +1183,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1181 sband = local->hw.wiphy->bands[i]; 1183 sband = local->hw.wiphy->bands[i];
1182 sdata->rc_rateidx_mask[i] = 1184 sdata->rc_rateidx_mask[i] =
1183 sband ? (1 << sband->n_bitrates) - 1 : 0; 1185 sband ? (1 << sband->n_bitrates) - 1 : 0;
1186 if (sband)
1187 memcpy(sdata->rc_rateidx_mcs_mask[i],
1188 sband->ht_cap.mcs.rx_mask,
1189 sizeof(sdata->rc_rateidx_mcs_mask[i]));
1190 else
1191 memset(sdata->rc_rateidx_mcs_mask[i], 0,
1192 sizeof(sdata->rc_rateidx_mcs_mask[i]));
1184 } 1193 }
1185 1194
1186 /* setup type-dependent data */ 1195 /* setup type-dependent data */
@@ -1303,7 +1312,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1303 1312
1304 /* do not count disabled managed interfaces */ 1313 /* do not count disabled managed interfaces */
1305 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1314 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1306 !sdata->u.mgd.associated) { 1315 !sdata->u.mgd.associated &&
1316 !sdata->u.mgd.auth_data &&
1317 !sdata->u.mgd.assoc_data) {
1307 sdata->vif.bss_conf.idle = true; 1318 sdata->vif.bss_conf.idle = true;
1308 continue; 1319 continue;
1309 } 1320 }
@@ -1323,7 +1334,8 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1323 wk->sdata->vif.bss_conf.idle = false; 1334 wk->sdata->vif.bss_conf.idle = false;
1324 } 1335 }
1325 1336
1326 if (local->scan_sdata) { 1337 if (local->scan_sdata &&
1338 !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
1327 scanning = true; 1339 scanning = true;
1328 local->scan_sdata->vif.bss_conf.idle = false; 1340 local->scan_sdata->vif.bss_conf.idle = false;
1329 } 1341 }
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 87a89741432d..5bb600d93d77 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/export.h> 18#include <linux/export.h>
19#include <net/mac80211.h> 19#include <net/mac80211.h>
20#include <asm/unaligned.h>
20#include "ieee80211_i.h" 21#include "ieee80211_i.h"
21#include "driver-ops.h" 22#include "driver-ops.h"
22#include "debugfs_key.h" 23#include "debugfs_key.h"
@@ -54,14 +55,6 @@ static void assert_key_lock(struct ieee80211_local *local)
54 lockdep_assert_held(&local->key_mtx); 55 lockdep_assert_held(&local->key_mtx);
55} 56}
56 57
57static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
58{
59 if (key->sta)
60 return &key->sta->sta;
61
62 return NULL;
63}
64
65static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) 58static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
66{ 59{
67 /* 60 /*
@@ -95,7 +88,7 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
95static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 88static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
96{ 89{
97 struct ieee80211_sub_if_data *sdata; 90 struct ieee80211_sub_if_data *sdata;
98 struct ieee80211_sta *sta; 91 struct sta_info *sta;
99 int ret; 92 int ret;
100 93
101 might_sleep(); 94 might_sleep();
@@ -105,7 +98,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
105 98
106 assert_key_lock(key->local); 99 assert_key_lock(key->local);
107 100
108 sta = get_sta_for_key(key); 101 sta = key->sta;
109 102
110 /* 103 /*
111 * If this is a per-STA GTK, check if it 104 * If this is a per-STA GTK, check if it
@@ -115,6 +108,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
115 !(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)) 108 !(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK))
116 goto out_unsupported; 109 goto out_unsupported;
117 110
111 if (sta && !sta->uploaded)
112 goto out_unsupported;
113
118 sdata = key->sdata; 114 sdata = key->sdata;
119 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 115 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
120 /* 116 /*
@@ -123,12 +119,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
123 */ 119 */
124 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) 120 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
125 goto out_unsupported; 121 goto out_unsupported;
126 sdata = container_of(sdata->bss,
127 struct ieee80211_sub_if_data,
128 u.ap);
129 } 122 }
130 123
131 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); 124 ret = drv_set_key(key->local, SET_KEY, sdata,
125 sta ? &sta->sta : NULL, &key->conf);
132 126
133 if (!ret) { 127 if (!ret) {
134 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 128 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
@@ -147,7 +141,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
147 if (ret != -ENOSPC && ret != -EOPNOTSUPP) 141 if (ret != -ENOSPC && ret != -EOPNOTSUPP)
148 wiphy_err(key->local->hw.wiphy, 142 wiphy_err(key->local->hw.wiphy,
149 "failed to set key (%d, %pM) to hardware (%d)\n", 143 "failed to set key (%d, %pM) to hardware (%d)\n",
150 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 144 key->conf.keyidx,
145 sta ? sta->sta.addr : bcast_addr, ret);
151 146
152 out_unsupported: 147 out_unsupported:
153 switch (key->conf.cipher) { 148 switch (key->conf.cipher) {
@@ -166,7 +161,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
166static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) 161static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
167{ 162{
168 struct ieee80211_sub_if_data *sdata; 163 struct ieee80211_sub_if_data *sdata;
169 struct ieee80211_sta *sta; 164 struct sta_info *sta;
170 int ret; 165 int ret;
171 166
172 might_sleep(); 167 might_sleep();
@@ -179,7 +174,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
179 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 174 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
180 return; 175 return;
181 176
182 sta = get_sta_for_key(key); 177 sta = key->sta;
183 sdata = key->sdata; 178 sdata = key->sdata;
184 179
185 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 180 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
@@ -187,18 +182,14 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
187 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 182 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
188 increment_tailroom_need_count(sdata); 183 increment_tailroom_need_count(sdata);
189 184
190 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
191 sdata = container_of(sdata->bss,
192 struct ieee80211_sub_if_data,
193 u.ap);
194
195 ret = drv_set_key(key->local, DISABLE_KEY, sdata, 185 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
196 sta, &key->conf); 186 sta ? &sta->sta : NULL, &key->conf);
197 187
198 if (ret) 188 if (ret)
199 wiphy_err(key->local->hw.wiphy, 189 wiphy_err(key->local->hw.wiphy,
200 "failed to remove key (%d, %pM) from hardware (%d)\n", 190 "failed to remove key (%d, %pM) from hardware (%d)\n",
201 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 191 key->conf.keyidx,
192 sta ? sta->sta.addr : bcast_addr, ret);
202 193
203 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 194 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
204} 195}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b142bd4c2390..b581a24fa15c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -155,7 +155,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
155 power = chan->max_power; 155 power = chan->max_power;
156 else 156 else
157 power = local->power_constr_level ? 157 power = local->power_constr_level ?
158 (chan->max_power - local->power_constr_level) : 158 min(chan->max_power,
159 (chan->max_reg_power - local->power_constr_level)) :
159 chan->max_power; 160 chan->max_power;
160 161
161 if (local->user_power_level >= 0) 162 if (local->user_power_level >= 0)
@@ -198,15 +199,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
198 return; 199 return;
199 200
200 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 201 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
201 /* 202 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
202 * While not associated, claim a BSSID of all-zeroes
203 * so that drivers don't do any weird things with the
204 * BSSID at that time.
205 */
206 if (sdata->vif.bss_conf.assoc)
207 sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
208 else
209 sdata->vif.bss_conf.bssid = zero;
210 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 203 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
211 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 204 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
212 else if (sdata->vif.type == NL80211_IFTYPE_AP) 205 else if (sdata->vif.type == NL80211_IFTYPE_AP)
@@ -293,11 +286,11 @@ static void ieee80211_tasklet_handler(unsigned long data)
293 /* Clear skb->pkt_type in order to not confuse kernel 286 /* Clear skb->pkt_type in order to not confuse kernel
294 * netstack. */ 287 * netstack. */
295 skb->pkt_type = 0; 288 skb->pkt_type = 0;
296 ieee80211_rx(local_to_hw(local), skb); 289 ieee80211_rx(&local->hw, skb);
297 break; 290 break;
298 case IEEE80211_TX_STATUS_MSG: 291 case IEEE80211_TX_STATUS_MSG:
299 skb->pkt_type = 0; 292 skb->pkt_type = 0;
300 ieee80211_tx_status(local_to_hw(local), skb); 293 ieee80211_tx_status(&local->hw, skb);
301 break; 294 break;
302 case IEEE80211_EOSP_MSG: 295 case IEEE80211_EOSP_MSG:
303 eosp_data = (void *)skb->cb; 296 eosp_data = (void *)skb->cb;
@@ -534,6 +527,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
534 int priv_size, i; 527 int priv_size, i;
535 struct wiphy *wiphy; 528 struct wiphy *wiphy;
536 529
530 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
531 return NULL;
532
537 /* Ensure 32-byte alignment of our private data and hw private data. 533 /* Ensure 32-byte alignment of our private data and hw private data.
538 * We use the wiphy priv data for both our ieee80211_local and for 534 * We use the wiphy priv data for both our ieee80211_local and for
539 * the driver's private data 535 * the driver's private data
@@ -599,8 +595,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
599 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 595 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
600 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 596 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
601 local->user_power_level = -1; 597 local->user_power_level = -1;
602 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
603 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
604 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; 598 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
605 599
606 INIT_LIST_HEAD(&local->interfaces); 600 INIT_LIST_HEAD(&local->interfaces);
@@ -672,7 +666,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
672 666
673 ieee80211_hw_roc_setup(local); 667 ieee80211_hw_roc_setup(local);
674 668
675 return local_to_hw(local); 669 return &local->hw;
676} 670}
677EXPORT_SYMBOL(ieee80211_alloc_hw); 671EXPORT_SYMBOL(ieee80211_alloc_hw);
678 672
@@ -701,6 +695,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
701 ) 695 )
702 return -EINVAL; 696 return -EINVAL;
703 697
698 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
699 return -EINVAL;
700
704 if (hw->max_report_rates == 0) 701 if (hw->max_report_rates == 0)
705 hw->max_report_rates = hw->max_rates; 702 hw->max_report_rates = hw->max_rates;
706 703
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c707c8bf6d2c..e5fbb7cf3562 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -204,7 +204,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
204 kmem_cache_free(rm_cache, p); 204 kmem_cache_free(rm_cache, p);
205 --entries; 205 --entries;
206 } else if ((seqnum == p->seqnum) && 206 } else if ((seqnum == p->seqnum) &&
207 (memcmp(sa, p->sa, ETH_ALEN) == 0)) 207 (compare_ether_addr(sa, p->sa) == 0))
208 return -1; 208 return -1;
209 } 209 }
210 210
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index bd14bd26a2b6..8d53b71378e3 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -13,7 +13,6 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <asm/unaligned.h>
17#include "ieee80211_i.h" 16#include "ieee80211_i.h"
18 17
19 18
@@ -86,6 +85,8 @@ enum mesh_deferred_task_flags {
86 * @state_lock: mesh path state lock used to protect changes to the 85 * @state_lock: mesh path state lock used to protect changes to the
87 * mpath itself. No need to take this lock when adding or removing 86 * mpath itself. No need to take this lock when adding or removing
88 * an mpath to a hash bucket on a path table. 87 * an mpath to a hash bucket on a path table.
88 * @rann_snd_addr: the RANN sender address
89 * @is_root: the destination station of this path is a root node
89 * @is_gate: the destination station of this path is a mesh gate 90 * @is_gate: the destination station of this path is a mesh gate
90 * 91 *
91 * 92 *
@@ -110,6 +111,8 @@ struct mesh_path {
110 u8 discovery_retries; 111 u8 discovery_retries;
111 enum mesh_path_flags flags; 112 enum mesh_path_flags flags;
112 spinlock_t state_lock; 113 spinlock_t state_lock;
114 u8 rann_snd_addr[ETH_ALEN];
115 bool is_root;
113 bool is_gate; 116 bool is_gate;
114}; 117};
115 118
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 54df1b2bafd2..1c6f3d02aebf 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,6 +8,8 @@
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/etherdevice.h>
12#include <asm/unaligned.h>
11#include "wme.h" 13#include "wme.h"
12#include "mesh.h" 14#include "mesh.h"
13 15
@@ -322,6 +324,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
322 struct sta_info *sta) 324 struct sta_info *sta)
323{ 325{
324 struct ieee80211_supported_band *sband; 326 struct ieee80211_supported_band *sband;
327 struct rate_info rinfo;
325 /* This should be adjusted for each device */ 328 /* This should be adjusted for each device */
326 int device_constant = 1 << ARITH_SHIFT; 329 int device_constant = 1 << ARITH_SHIFT;
327 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; 330 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
@@ -335,7 +338,9 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
335 if (sta->fail_avg >= 100) 338 if (sta->fail_avg >= 100)
336 return MAX_METRIC; 339 return MAX_METRIC;
337 340
338 if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS) 341 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
342 rate = cfg80211_calculate_bitrate(&rinfo);
343 if (WARN_ON(!rate))
339 return MAX_METRIC; 344 return MAX_METRIC;
340 345
341 err = (sta->fail_avg << ARITH_SHIFT) / 100; 346 err = (sta->fail_avg << ARITH_SHIFT) / 100;
@@ -343,7 +348,6 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
343 /* bitrate is in units of 100 Kbps, while we need rate in units of 348 /* bitrate is in units of 100 Kbps, while we need rate in units of
344 * 1Mbps. This will be corrected on tx_time computation. 349 * 1Mbps. This will be corrected on tx_time computation.
345 */ 350 */
346 rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
347 tx_time = (device_constant + 10 * test_frame_len / rate); 351 tx_time = (device_constant + 10 * test_frame_len / rate);
348 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); 352 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
349 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; 353 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
@@ -418,7 +422,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
418 new_metric = MAX_METRIC; 422 new_metric = MAX_METRIC;
419 exp_time = TU_TO_EXP_TIME(orig_lifetime); 423 exp_time = TU_TO_EXP_TIME(orig_lifetime);
420 424
421 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) { 425 if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) {
422 /* This MP is the originator, we are not interested in this 426 /* This MP is the originator, we are not interested in this
423 * frame, except for updating transmitter's path info. 427 * frame, except for updating transmitter's path info.
424 */ 428 */
@@ -468,7 +472,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
468 472
469 /* Update and check transmitter routing info */ 473 /* Update and check transmitter routing info */
470 ta = mgmt->sa; 474 ta = mgmt->sa;
471 if (memcmp(orig_addr, ta, ETH_ALEN) == 0) 475 if (compare_ether_addr(orig_addr, ta) == 0)
472 fresh_info = false; 476 fresh_info = false;
473 else { 477 else {
474 fresh_info = true; 478 fresh_info = true;
@@ -512,8 +516,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
512 u8 *preq_elem, u32 metric) 516 u8 *preq_elem, u32 metric)
513{ 517{
514 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 518 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
515 struct mesh_path *mpath; 519 struct mesh_path *mpath = NULL;
516 u8 *target_addr, *orig_addr; 520 u8 *target_addr, *orig_addr;
521 const u8 *da;
517 u8 target_flags, ttl; 522 u8 target_flags, ttl;
518 u32 orig_sn, target_sn, lifetime; 523 u32 orig_sn, target_sn, lifetime;
519 bool reply = false; 524 bool reply = false;
@@ -528,7 +533,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
528 533
529 mhwmp_dbg("received PREQ from %pM", orig_addr); 534 mhwmp_dbg("received PREQ from %pM", orig_addr);
530 535
531 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) { 536 if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) {
532 mhwmp_dbg("PREQ is for us"); 537 mhwmp_dbg("PREQ is for us");
533 forward = false; 538 forward = false;
534 reply = true; 539 reply = true;
@@ -575,7 +580,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
575 ifmsh->mshstats.dropped_frames_ttl++; 580 ifmsh->mshstats.dropped_frames_ttl++;
576 } 581 }
577 582
578 if (forward) { 583 if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
579 u32 preq_id; 584 u32 preq_id;
580 u8 hopcount, flags; 585 u8 hopcount, flags;
581 586
@@ -590,9 +595,11 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
590 flags = PREQ_IE_FLAGS(preq_elem); 595 flags = PREQ_IE_FLAGS(preq_elem);
591 preq_id = PREQ_IE_PREQ_ID(preq_elem); 596 preq_id = PREQ_IE_PREQ_ID(preq_elem);
592 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 597 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
598 da = (mpath && mpath->is_root) ?
599 mpath->rann_snd_addr : broadcast_addr;
593 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 600 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
594 cpu_to_le32(orig_sn), target_flags, target_addr, 601 cpu_to_le32(orig_sn), target_flags, target_addr,
595 cpu_to_le32(target_sn), broadcast_addr, 602 cpu_to_le32(target_sn), da,
596 hopcount, ttl, cpu_to_le32(lifetime), 603 hopcount, ttl, cpu_to_le32(lifetime),
597 cpu_to_le32(metric), cpu_to_le32(preq_id), 604 cpu_to_le32(metric), cpu_to_le32(preq_id),
598 sdata); 605 sdata);
@@ -614,6 +621,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
614 struct ieee80211_mgmt *mgmt, 621 struct ieee80211_mgmt *mgmt,
615 u8 *prep_elem, u32 metric) 622 u8 *prep_elem, u32 metric)
616{ 623{
624 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
617 struct mesh_path *mpath; 625 struct mesh_path *mpath;
618 u8 *target_addr, *orig_addr; 626 u8 *target_addr, *orig_addr;
619 u8 ttl, hopcount, flags; 627 u8 ttl, hopcount, flags;
@@ -623,10 +631,13 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
623 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); 631 mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
624 632
625 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 633 orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
626 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) 634 if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
627 /* destination, no forwarding required */ 635 /* destination, no forwarding required */
628 return; 636 return;
629 637
638 if (!ifmsh->mshcfg.dot11MeshForwarding)
639 return;
640
630 ttl = PREP_IE_TTL(prep_elem); 641 ttl = PREP_IE_TTL(prep_elem);
631 if (ttl <= 1) { 642 if (ttl <= 1) {
632 sdata->u.mesh.mshstats.dropped_frames_ttl++; 643 sdata->u.mesh.mshstats.dropped_frames_ttl++;
@@ -693,21 +704,26 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
693 rcu_read_lock(); 704 rcu_read_lock();
694 mpath = mesh_path_lookup(target_addr, sdata); 705 mpath = mesh_path_lookup(target_addr, sdata);
695 if (mpath) { 706 if (mpath) {
707 struct sta_info *sta;
708
696 spin_lock_bh(&mpath->state_lock); 709 spin_lock_bh(&mpath->state_lock);
710 sta = next_hop_deref_protected(mpath);
697 if (mpath->flags & MESH_PATH_ACTIVE && 711 if (mpath->flags & MESH_PATH_ACTIVE &&
698 memcmp(ta, next_hop_deref_protected(mpath)->sta.addr, 712 compare_ether_addr(ta, sta->sta.addr) == 0 &&
699 ETH_ALEN) == 0 &&
700 (!(mpath->flags & MESH_PATH_SN_VALID) || 713 (!(mpath->flags & MESH_PATH_SN_VALID) ||
701 SN_GT(target_sn, mpath->sn))) { 714 SN_GT(target_sn, mpath->sn))) {
702 mpath->flags &= ~MESH_PATH_ACTIVE; 715 mpath->flags &= ~MESH_PATH_ACTIVE;
703 mpath->sn = target_sn; 716 mpath->sn = target_sn;
704 spin_unlock_bh(&mpath->state_lock); 717 spin_unlock_bh(&mpath->state_lock);
718 if (!ifmsh->mshcfg.dot11MeshForwarding)
719 goto endperr;
705 mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn), 720 mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
706 cpu_to_le16(target_rcode), 721 cpu_to_le16(target_rcode),
707 broadcast_addr, sdata); 722 broadcast_addr, sdata);
708 } else 723 } else
709 spin_unlock_bh(&mpath->state_lock); 724 spin_unlock_bh(&mpath->state_lock);
710 } 725 }
726endperr:
711 rcu_read_unlock(); 727 rcu_read_unlock();
712} 728}
713 729
@@ -738,11 +754,11 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
738 metric = rann->rann_metric; 754 metric = rann->rann_metric;
739 755
740 /* Ignore our own RANNs */ 756 /* Ignore our own RANNs */
741 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) 757 if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
742 return; 758 return;
743 759
744 mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr, 760 mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
745 root_is_gate); 761 orig_addr, mgmt->sa, root_is_gate);
746 762
747 rcu_read_lock(); 763 rcu_read_lock();
748 mpath = mesh_path_lookup(orig_addr, sdata); 764 mpath = mesh_path_lookup(orig_addr, sdata);
@@ -764,7 +780,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
764 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 780 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
765 } 781 }
766 782
767 if (mpath->sn < orig_sn) { 783 if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) {
768 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 784 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
769 cpu_to_le32(orig_sn), 785 cpu_to_le32(orig_sn),
770 0, NULL, 0, broadcast_addr, 786 0, NULL, 0, broadcast_addr,
@@ -773,6 +789,11 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
773 0, sdata); 789 0, sdata);
774 mpath->sn = orig_sn; 790 mpath->sn = orig_sn;
775 } 791 }
792
793 /* Using individually addressed PREQ for root node */
794 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
795 mpath->is_root = true;
796
776 if (root_is_gate) 797 if (root_is_gate)
777 mesh_path_add_gate(mpath); 798 mesh_path_add_gate(mpath);
778 799
@@ -908,6 +929,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
908 struct mesh_preq_queue *preq_node; 929 struct mesh_preq_queue *preq_node;
909 struct mesh_path *mpath; 930 struct mesh_path *mpath;
910 u8 ttl, target_flags; 931 u8 ttl, target_flags;
932 const u8 *da;
911 u32 lifetime; 933 u32 lifetime;
912 934
913 spin_lock_bh(&ifmsh->mesh_preq_queue_lock); 935 spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -970,9 +992,10 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
970 target_flags = MP_F_RF; 992 target_flags = MP_F_RF;
971 993
972 spin_unlock_bh(&mpath->state_lock); 994 spin_unlock_bh(&mpath->state_lock);
995 da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
973 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, 996 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
974 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, 997 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
975 cpu_to_le32(mpath->sn), broadcast_addr, 0, 998 cpu_to_le32(mpath->sn), da, 0,
976 ttl, cpu_to_le32(lifetime), 0, 999 ttl, cpu_to_le32(lifetime), 0,
977 cpu_to_le32(ifmsh->preq_id++), sdata); 1000 cpu_to_le32(ifmsh->preq_id++), sdata);
978 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1001 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
@@ -1063,7 +1086,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
1063 if (time_after(jiffies, 1086 if (time_after(jiffies,
1064 mpath->exp_time - 1087 mpath->exp_time -
1065 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 1088 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1066 !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) && 1089 !compare_ether_addr(sdata->vif.addr, hdr->addr4) &&
1067 !(mpath->flags & MESH_PATH_RESOLVING) && 1090 !(mpath->flags & MESH_PATH_RESOLVING) &&
1068 !(mpath->flags & MESH_PATH_FIXED)) 1091 !(mpath->flags & MESH_PATH_FIXED))
1069 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 1092 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 30420bc1f699..49aaefd99635 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -336,7 +336,7 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
336} 336}
337 337
338 338
339static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst, 339static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
340 struct ieee80211_sub_if_data *sdata) 340 struct ieee80211_sub_if_data *sdata)
341{ 341{
342 struct mesh_path *mpath; 342 struct mesh_path *mpath;
@@ -348,7 +348,7 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
348 hlist_for_each_entry_rcu(node, n, bucket, list) { 348 hlist_for_each_entry_rcu(node, n, bucket, list) {
349 mpath = node->mpath; 349 mpath = node->mpath;
350 if (mpath->sdata == sdata && 350 if (mpath->sdata == sdata &&
351 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 351 compare_ether_addr(dst, mpath->dst) == 0) {
352 if (MPATH_EXPIRED(mpath)) { 352 if (MPATH_EXPIRED(mpath)) {
353 spin_lock_bh(&mpath->state_lock); 353 spin_lock_bh(&mpath->state_lock);
354 mpath->flags &= ~MESH_PATH_ACTIVE; 354 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -371,12 +371,12 @@ static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
371 */ 371 */
372struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 372struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
373{ 373{
374 return path_lookup(rcu_dereference(mesh_paths), dst, sdata); 374 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
375} 375}
376 376
377struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 377struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
378{ 378{
379 return path_lookup(rcu_dereference(mpp_paths), dst, sdata); 379 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
380} 380}
381 381
382 382
@@ -517,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
517 int err = 0; 517 int err = 0;
518 u32 hash_idx; 518 u32 hash_idx;
519 519
520 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 520 if (compare_ether_addr(dst, sdata->vif.addr) == 0)
521 /* never add ourselves as neighbours */ 521 /* never add ourselves as neighbours */
522 return -ENOTSUPP; 522 return -ENOTSUPP;
523 523
@@ -553,12 +553,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
553 hash_idx = mesh_table_hash(dst, sdata, tbl); 553 hash_idx = mesh_table_hash(dst, sdata, tbl);
554 bucket = &tbl->hash_buckets[hash_idx]; 554 bucket = &tbl->hash_buckets[hash_idx];
555 555
556 spin_lock_bh(&tbl->hashwlock[hash_idx]); 556 spin_lock(&tbl->hashwlock[hash_idx]);
557 557
558 err = -EEXIST; 558 err = -EEXIST;
559 hlist_for_each_entry(node, n, bucket, list) { 559 hlist_for_each_entry(node, n, bucket, list) {
560 mpath = node->mpath; 560 mpath = node->mpath;
561 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 561 if (mpath->sdata == sdata &&
562 compare_ether_addr(dst, mpath->dst) == 0)
562 goto err_exists; 563 goto err_exists;
563 } 564 }
564 565
@@ -569,7 +570,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
569 570
570 mesh_paths_generation++; 571 mesh_paths_generation++;
571 572
572 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 573 spin_unlock(&tbl->hashwlock[hash_idx]);
573 read_unlock_bh(&pathtbl_resize_lock); 574 read_unlock_bh(&pathtbl_resize_lock);
574 if (grow) { 575 if (grow) {
575 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 576 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -578,7 +579,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
578 return 0; 579 return 0;
579 580
580err_exists: 581err_exists:
581 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 582 spin_unlock(&tbl->hashwlock[hash_idx]);
582 read_unlock_bh(&pathtbl_resize_lock); 583 read_unlock_bh(&pathtbl_resize_lock);
583 kfree(new_node); 584 kfree(new_node);
584err_node_alloc: 585err_node_alloc:
@@ -649,7 +650,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
649 int err = 0; 650 int err = 0;
650 u32 hash_idx; 651 u32 hash_idx;
651 652
652 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 653 if (compare_ether_addr(dst, sdata->vif.addr) == 0)
653 /* never add ourselves as neighbours */ 654 /* never add ourselves as neighbours */
654 return -ENOTSUPP; 655 return -ENOTSUPP;
655 656
@@ -681,12 +682,13 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
681 hash_idx = mesh_table_hash(dst, sdata, tbl); 682 hash_idx = mesh_table_hash(dst, sdata, tbl);
682 bucket = &tbl->hash_buckets[hash_idx]; 683 bucket = &tbl->hash_buckets[hash_idx];
683 684
684 spin_lock_bh(&tbl->hashwlock[hash_idx]); 685 spin_lock(&tbl->hashwlock[hash_idx]);
685 686
686 err = -EEXIST; 687 err = -EEXIST;
687 hlist_for_each_entry(node, n, bucket, list) { 688 hlist_for_each_entry(node, n, bucket, list) {
688 mpath = node->mpath; 689 mpath = node->mpath;
689 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 690 if (mpath->sdata == sdata &&
691 compare_ether_addr(dst, mpath->dst) == 0)
690 goto err_exists; 692 goto err_exists;
691 } 693 }
692 694
@@ -695,7 +697,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
695 tbl->mean_chain_len * (tbl->hash_mask + 1)) 697 tbl->mean_chain_len * (tbl->hash_mask + 1))
696 grow = 1; 698 grow = 1;
697 699
698 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 700 spin_unlock(&tbl->hashwlock[hash_idx]);
699 read_unlock_bh(&pathtbl_resize_lock); 701 read_unlock_bh(&pathtbl_resize_lock);
700 if (grow) { 702 if (grow) {
701 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 703 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -704,7 +706,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
704 return 0; 706 return 0;
705 707
706err_exists: 708err_exists:
707 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 709 spin_unlock(&tbl->hashwlock[hash_idx]);
708 read_unlock_bh(&pathtbl_resize_lock); 710 read_unlock_bh(&pathtbl_resize_lock);
709 kfree(new_node); 711 kfree(new_node);
710err_node_alloc: 712err_node_alloc:
@@ -803,9 +805,9 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
803 for_each_mesh_entry(tbl, p, node, i) { 805 for_each_mesh_entry(tbl, p, node, i) {
804 mpath = node->mpath; 806 mpath = node->mpath;
805 if (rcu_dereference(mpath->next_hop) == sta) { 807 if (rcu_dereference(mpath->next_hop) == sta) {
806 spin_lock_bh(&tbl->hashwlock[i]); 808 spin_lock(&tbl->hashwlock[i]);
807 __mesh_path_del(tbl, node); 809 __mesh_path_del(tbl, node);
808 spin_unlock_bh(&tbl->hashwlock[i]); 810 spin_unlock(&tbl->hashwlock[i]);
809 } 811 }
810 } 812 }
811 read_unlock_bh(&pathtbl_resize_lock); 813 read_unlock_bh(&pathtbl_resize_lock);
@@ -876,11 +878,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
876 hash_idx = mesh_table_hash(addr, sdata, tbl); 878 hash_idx = mesh_table_hash(addr, sdata, tbl);
877 bucket = &tbl->hash_buckets[hash_idx]; 879 bucket = &tbl->hash_buckets[hash_idx];
878 880
879 spin_lock_bh(&tbl->hashwlock[hash_idx]); 881 spin_lock(&tbl->hashwlock[hash_idx]);
880 hlist_for_each_entry(node, n, bucket, list) { 882 hlist_for_each_entry(node, n, bucket, list) {
881 mpath = node->mpath; 883 mpath = node->mpath;
882 if (mpath->sdata == sdata && 884 if (mpath->sdata == sdata &&
883 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 885 compare_ether_addr(addr, mpath->dst) == 0) {
884 __mesh_path_del(tbl, node); 886 __mesh_path_del(tbl, node);
885 goto enddel; 887 goto enddel;
886 } 888 }
@@ -889,7 +891,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
889 err = -ENXIO; 891 err = -ENXIO;
890enddel: 892enddel:
891 mesh_paths_generation++; 893 mesh_paths_generation++;
892 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 894 spin_unlock(&tbl->hashwlock[hash_idx]);
893 read_unlock_bh(&pathtbl_resize_lock); 895 read_unlock_bh(&pathtbl_resize_lock);
894 return err; 896 return err;
895} 897}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a17251730b9e..4e53c4cbca9e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -31,6 +31,12 @@
31#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) 31#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
32#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) 32#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
33 33
34/* We only need a valid sta if user configured a minimum rssi_threshold. */
35#define rssi_threshold_check(sta, sdata) \
36 (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\
37 (sta && (s8) -ewma_read(&sta->avg_signal) > \
38 sdata->u.mesh.mshcfg.rssi_threshold))
39
34enum plink_event { 40enum plink_event {
35 PLINK_UNDEFINED, 41 PLINK_UNDEFINED,
36 OPN_ACPT, 42 OPN_ACPT,
@@ -96,9 +102,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
96 if (!sta) 102 if (!sta)
97 return NULL; 103 return NULL;
98 104
99 sta_info_move_state(sta, IEEE80211_STA_AUTH); 105 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
100 sta_info_move_state(sta, IEEE80211_STA_ASSOC); 106 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
101 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 107 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
102 108
103 set_sta_flag(sta, WLAN_STA_WME); 109 set_sta_flag(sta, WLAN_STA_WME);
104 110
@@ -301,7 +307,8 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates,
301 if (mesh_peer_accepts_plinks(elems) && 307 if (mesh_peer_accepts_plinks(elems) &&
302 sta->plink_state == NL80211_PLINK_LISTEN && 308 sta->plink_state == NL80211_PLINK_LISTEN &&
303 sdata->u.mesh.accepting_plinks && 309 sdata->u.mesh.accepting_plinks &&
304 sdata->u.mesh.mshcfg.auto_open_plinks) 310 sdata->u.mesh.mshcfg.auto_open_plinks &&
311 rssi_threshold_check(sta, sdata))
305 mesh_plink_open(sta); 312 mesh_plink_open(sta);
306 313
307 rcu_read_unlock(); 314 rcu_read_unlock();
@@ -531,6 +538,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
531 return; 538 return;
532 } 539 }
533 540
541 if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
542 !rssi_threshold_check(sta, sdata)) {
543 mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n",
544 mgmt->sa);
545 rcu_read_unlock();
546 return;
547 }
548
534 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) { 549 if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
535 mpl_dbg("Mesh plink: Action frame from non-authed peer\n"); 550 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
536 rcu_read_unlock(); 551 rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 295be92f7c77..576fb25456dd 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,6 +30,12 @@
30#include "rate.h" 30#include "rate.h"
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_MAX_TRIES 3
35#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
36#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
37#define IEEE80211_ASSOC_MAX_TRIES 3
38
33static int max_nullfunc_tries = 2; 39static int max_nullfunc_tries = 2;
34module_param(max_nullfunc_tries, int, 0644); 40module_param(max_nullfunc_tries, int, 0644);
35MODULE_PARM_DESC(max_nullfunc_tries, 41MODULE_PARM_DESC(max_nullfunc_tries,
@@ -82,6 +88,8 @@ MODULE_PARM_DESC(probe_wait_ms,
82#define TMR_RUNNING_TIMER 0 88#define TMR_RUNNING_TIMER 0
83#define TMR_RUNNING_CHANSW 1 89#define TMR_RUNNING_CHANSW 1
84 90
91#define DEAUTH_DISASSOC_LEN (24 /* hdr */ + 2 /* reason */)
92
85/* 93/*
86 * All cfg80211 functions have to be called outside a locked 94 * All cfg80211 functions have to be called outside a locked
87 * section so that they can acquire a lock themselves... This 95 * section so that they can acquire a lock themselves... This
@@ -97,6 +105,15 @@ enum rx_mgmt_action {
97 105
98 /* caller must call cfg80211_send_disassoc() */ 106 /* caller must call cfg80211_send_disassoc() */
99 RX_MGMT_CFG80211_DISASSOC, 107 RX_MGMT_CFG80211_DISASSOC,
108
109 /* caller must call cfg80211_send_rx_auth() */
110 RX_MGMT_CFG80211_RX_AUTH,
111
112 /* caller must call cfg80211_send_rx_assoc() */
113 RX_MGMT_CFG80211_RX_ASSOC,
114
115 /* caller must call cfg80211_send_assoc_timeout() */
116 RX_MGMT_CFG80211_ASSOC_TIMEOUT,
100}; 117};
101 118
102/* utils */ 119/* utils */
@@ -115,8 +132,7 @@ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
115 * has happened -- the work that runs from this timer will 132 * has happened -- the work that runs from this timer will
116 * do that. 133 * do that.
117 */ 134 */
118static void run_again(struct ieee80211_if_managed *ifmgd, 135static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout)
119 unsigned long timeout)
120{ 136{
121 ASSERT_MGD_MTX(ifmgd); 137 ASSERT_MGD_MTX(ifmgd);
122 138
@@ -127,7 +143,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd,
127 143
128void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) 144void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
129{ 145{
130 if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) 146 if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
131 return; 147 return;
132 148
133 mod_timer(&sdata->u.mgd.bcn_mon_timer, 149 mod_timer(&sdata->u.mgd.bcn_mon_timer,
@@ -173,40 +189,35 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
173 u16 ht_opmode; 189 u16 ht_opmode;
174 bool enable_ht = true; 190 bool enable_ht = true;
175 enum nl80211_channel_type prev_chantype; 191 enum nl80211_channel_type prev_chantype;
176 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 192 enum nl80211_channel_type rx_channel_type = NL80211_CHAN_NO_HT;
193 enum nl80211_channel_type tx_channel_type;
177 194
178 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 195 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
179
180 prev_chantype = sdata->vif.bss_conf.channel_type; 196 prev_chantype = sdata->vif.bss_conf.channel_type;
181 197
182 /* HT is not supported */
183 if (!sband->ht_cap.ht_supported)
184 enable_ht = false;
185 198
186 if (enable_ht) { 199 hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
187 hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, 200 sband->band);
188 sband->band); 201 /* check that channel matches the right operating channel */
189 /* check that channel matches the right operating channel */ 202 if (local->hw.conf.channel->center_freq != hti_cfreq) {
190 if (local->hw.conf.channel->center_freq != hti_cfreq) { 203 /* Some APs mess this up, evidently.
191 /* Some APs mess this up, evidently. 204 * Netgear WNDR3700 sometimes reports 4 higher than
192 * Netgear WNDR3700 sometimes reports 4 higher than 205 * the actual channel, for instance.
193 * the actual channel, for instance. 206 */
194 */ 207 printk(KERN_DEBUG
195 printk(KERN_DEBUG 208 "%s: Wrong control channel in association"
196 "%s: Wrong control channel in association" 209 " response: configured center-freq: %d"
197 " response: configured center-freq: %d" 210 " hti-cfreq: %d hti->control_chan: %d"
198 " hti-cfreq: %d hti->control_chan: %d" 211 " band: %d. Disabling HT.\n",
199 " band: %d. Disabling HT.\n", 212 sdata->name,
200 sdata->name, 213 local->hw.conf.channel->center_freq,
201 local->hw.conf.channel->center_freq, 214 hti_cfreq, hti->control_chan,
202 hti_cfreq, hti->control_chan, 215 sband->band);
203 sband->band); 216 enable_ht = false;
204 enable_ht = false;
205 }
206 } 217 }
207 218
208 if (enable_ht) { 219 if (enable_ht) {
209 channel_type = NL80211_CHAN_HT20; 220 rx_channel_type = NL80211_CHAN_HT20;
210 221
211 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && 222 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
212 !ieee80111_cfg_override_disables_ht40(sdata) && 223 !ieee80111_cfg_override_disables_ht40(sdata) &&
@@ -214,29 +225,28 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
214 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { 225 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
215 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 226 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
216 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 227 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
217 if (!(local->hw.conf.channel->flags & 228 rx_channel_type = NL80211_CHAN_HT40PLUS;
218 IEEE80211_CHAN_NO_HT40PLUS))
219 channel_type = NL80211_CHAN_HT40PLUS;
220 break; 229 break;
221 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 230 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
222 if (!(local->hw.conf.channel->flags & 231 rx_channel_type = NL80211_CHAN_HT40MINUS;
223 IEEE80211_CHAN_NO_HT40MINUS))
224 channel_type = NL80211_CHAN_HT40MINUS;
225 break; 232 break;
226 } 233 }
227 } 234 }
228 } 235 }
229 236
237 tx_channel_type = ieee80211_get_tx_channel_type(local, rx_channel_type);
238
230 if (local->tmp_channel) 239 if (local->tmp_channel)
231 local->tmp_channel_type = channel_type; 240 local->tmp_channel_type = rx_channel_type;
232 241
233 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 242 if (!ieee80211_set_channel_type(local, sdata, rx_channel_type)) {
234 /* can only fail due to HT40+/- mismatch */ 243 /* can only fail due to HT40+/- mismatch */
235 channel_type = NL80211_CHAN_HT20; 244 rx_channel_type = NL80211_CHAN_HT20;
236 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 245 WARN_ON(!ieee80211_set_channel_type(local, sdata,
246 rx_channel_type));
237 } 247 }
238 248
239 if (beacon_htcap_ie && (prev_chantype != channel_type)) { 249 if (beacon_htcap_ie && (prev_chantype != rx_channel_type)) {
240 /* 250 /*
241 * Whenever the AP announces the HT mode change that can be 251 * Whenever the AP announces the HT mode change that can be
242 * 40MHz intolerant or etc., it would be safer to stop tx 252 * 40MHz intolerant or etc., it would be safer to stop tx
@@ -254,13 +264,13 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
254 /* channel_type change automatically detected */ 264 /* channel_type change automatically detected */
255 ieee80211_hw_config(local, 0); 265 ieee80211_hw_config(local, 0);
256 266
257 if (prev_chantype != channel_type) { 267 if (prev_chantype != tx_channel_type) {
258 rcu_read_lock(); 268 rcu_read_lock();
259 sta = sta_info_get(sdata, bssid); 269 sta = sta_info_get(sdata, bssid);
260 if (sta) 270 if (sta)
261 rate_control_rate_update(local, sband, sta, 271 rate_control_rate_update(local, sband, sta,
262 IEEE80211_RC_HT_CHANGED, 272 IEEE80211_RC_HT_CHANGED,
263 channel_type); 273 tx_channel_type);
264 rcu_read_unlock(); 274 rcu_read_unlock();
265 275
266 if (beacon_htcap_ie) 276 if (beacon_htcap_ie)
@@ -273,7 +283,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
273 /* if bss configuration changed store the new one */ 283 /* if bss configuration changed store the new one */
274 if (sdata->ht_opmode_valid != enable_ht || 284 if (sdata->ht_opmode_valid != enable_ht ||
275 sdata->vif.bss_conf.ht_operation_mode != ht_opmode || 285 sdata->vif.bss_conf.ht_operation_mode != ht_opmode ||
276 prev_chantype != channel_type) { 286 prev_chantype != rx_channel_type) {
277 changed |= BSS_CHANGED_HT; 287 changed |= BSS_CHANGED_HT;
278 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 288 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
279 sdata->ht_opmode_valid = enable_ht; 289 sdata->ht_opmode_valid = enable_ht;
@@ -284,48 +294,351 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
284 294
285/* frame sending functions */ 295/* frame sending functions */
286 296
287static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 297static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
288 const u8 *bssid, u16 stype, u16 reason, 298 struct ieee80211_supported_band *sband,
289 void *cookie, bool send_frame) 299 u32 *rates)
300{
301 int i, j, count;
302 *rates = 0;
303 count = 0;
304 for (i = 0; i < supp_rates_len; i++) {
305 int rate = (supp_rates[i] & 0x7F) * 5;
306
307 for (j = 0; j < sband->n_bitrates; j++)
308 if (sband->bitrates[j].bitrate == rate) {
309 *rates |= BIT(j);
310 count++;
311 break;
312 }
313 }
314
315 return count;
316}
317
318static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
319 struct sk_buff *skb, const u8 *ht_info_ie,
320 struct ieee80211_supported_band *sband,
321 struct ieee80211_channel *channel,
322 enum ieee80211_smps_mode smps)
323{
324 struct ieee80211_ht_info *ht_info;
325 u8 *pos;
326 u32 flags = channel->flags;
327 u16 cap;
328 struct ieee80211_sta_ht_cap ht_cap;
329
330 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
331
332 if (!ht_info_ie)
333 return;
334
335 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
336 return;
337
338 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
339 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
340
341 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
342
343 /* determine capability flags */
344 cap = ht_cap.cap;
345
346 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
347 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
348 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
349 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
350 cap &= ~IEEE80211_HT_CAP_SGI_40;
351 }
352 break;
353 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
354 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
355 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
356 cap &= ~IEEE80211_HT_CAP_SGI_40;
357 }
358 break;
359 }
360
361 /* set SM PS mode properly */
362 cap &= ~IEEE80211_HT_CAP_SM_PS;
363 switch (smps) {
364 case IEEE80211_SMPS_AUTOMATIC:
365 case IEEE80211_SMPS_NUM_MODES:
366 WARN_ON(1);
367 case IEEE80211_SMPS_OFF:
368 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
369 IEEE80211_HT_CAP_SM_PS_SHIFT;
370 break;
371 case IEEE80211_SMPS_STATIC:
372 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
373 IEEE80211_HT_CAP_SM_PS_SHIFT;
374 break;
375 case IEEE80211_SMPS_DYNAMIC:
376 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
377 IEEE80211_HT_CAP_SM_PS_SHIFT;
378 break;
379 }
380
381 /* reserve and fill IE */
382 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
383 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
384}
385
386static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
290{ 387{
291 struct ieee80211_local *local = sdata->local; 388 struct ieee80211_local *local = sdata->local;
292 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 389 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
390 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
293 struct sk_buff *skb; 391 struct sk_buff *skb;
294 struct ieee80211_mgmt *mgmt; 392 struct ieee80211_mgmt *mgmt;
393 u8 *pos, qos_info;
394 size_t offset = 0, noffset;
395 int i, count, rates_len, supp_rates_len;
396 u16 capab;
397 struct ieee80211_supported_band *sband;
398 u32 rates = 0;
399
400 lockdep_assert_held(&ifmgd->mtx);
401
402 sband = local->hw.wiphy->bands[local->oper_channel->band];
295 403
296 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 404 if (assoc_data->supp_rates_len) {
405 /*
406 * Get all rates supported by the device and the AP as
407 * some APs don't like getting a superset of their rates
408 * in the association request (e.g. D-Link DAP 1353 in
409 * b-only mode)...
410 */
411 rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
412 assoc_data->supp_rates_len,
413 sband, &rates);
414 } else {
415 /*
416 * In case AP not provide any supported rates information
417 * before association, we send information element(s) with
418 * all rates that we support.
419 */
420 rates = ~0;
421 rates_len = sband->n_bitrates;
422 }
423
424 skb = alloc_skb(local->hw.extra_tx_headroom +
425 sizeof(*mgmt) + /* bit too much but doesn't matter */
426 2 + assoc_data->ssid_len + /* SSID */
427 4 + rates_len + /* (extended) rates */
428 4 + /* power capability */
429 2 + 2 * sband->n_channels + /* supported channels */
430 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
431 assoc_data->ie_len + /* extra IEs */
432 9, /* WMM */
433 GFP_KERNEL);
297 if (!skb) 434 if (!skb)
298 return; 435 return;
299 436
300 skb_reserve(skb, local->hw.extra_tx_headroom); 437 skb_reserve(skb, local->hw.extra_tx_headroom);
301 438
439 capab = WLAN_CAPABILITY_ESS;
440
441 if (sband->band == IEEE80211_BAND_2GHZ) {
442 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
443 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
444 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
445 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
446 }
447
448 if (assoc_data->capability & WLAN_CAPABILITY_PRIVACY)
449 capab |= WLAN_CAPABILITY_PRIVACY;
450
451 if ((assoc_data->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
452 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
453 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
454
302 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 455 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
303 memset(mgmt, 0, 24); 456 memset(mgmt, 0, 24);
457 memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN);
458 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
459 memcpy(mgmt->bssid, assoc_data->bss->bssid, ETH_ALEN);
460
461 if (!is_zero_ether_addr(assoc_data->prev_bssid)) {
462 skb_put(skb, 10);
463 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
464 IEEE80211_STYPE_REASSOC_REQ);
465 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
466 mgmt->u.reassoc_req.listen_interval =
467 cpu_to_le16(local->hw.conf.listen_interval);
468 memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid,
469 ETH_ALEN);
470 } else {
471 skb_put(skb, 4);
472 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
473 IEEE80211_STYPE_ASSOC_REQ);
474 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
475 mgmt->u.assoc_req.listen_interval =
476 cpu_to_le16(local->hw.conf.listen_interval);
477 }
478
479 /* SSID */
480 pos = skb_put(skb, 2 + assoc_data->ssid_len);
481 *pos++ = WLAN_EID_SSID;
482 *pos++ = assoc_data->ssid_len;
483 memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
484
485 /* add all rates which were marked to be used above */
486 supp_rates_len = rates_len;
487 if (supp_rates_len > 8)
488 supp_rates_len = 8;
489
490 pos = skb_put(skb, supp_rates_len + 2);
491 *pos++ = WLAN_EID_SUPP_RATES;
492 *pos++ = supp_rates_len;
493
494 count = 0;
495 for (i = 0; i < sband->n_bitrates; i++) {
496 if (BIT(i) & rates) {
497 int rate = sband->bitrates[i].bitrate;
498 *pos++ = (u8) (rate / 5);
499 if (++count == 8)
500 break;
501 }
502 }
503
504 if (rates_len > count) {
505 pos = skb_put(skb, rates_len - count + 2);
506 *pos++ = WLAN_EID_EXT_SUPP_RATES;
507 *pos++ = rates_len - count;
508
509 for (i++; i < sband->n_bitrates; i++) {
510 if (BIT(i) & rates) {
511 int rate = sband->bitrates[i].bitrate;
512 *pos++ = (u8) (rate / 5);
513 }
514 }
515 }
516
517 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
518 /* 1. power capabilities */
519 pos = skb_put(skb, 4);
520 *pos++ = WLAN_EID_PWR_CAPABILITY;
521 *pos++ = 2;
522 *pos++ = 0; /* min tx power */
523 *pos++ = local->oper_channel->max_power; /* max tx power */
524
525 /* 2. supported channels */
526 /* TODO: get this in reg domain format */
527 pos = skb_put(skb, 2 * sband->n_channels + 2);
528 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
529 *pos++ = 2 * sband->n_channels;
530 for (i = 0; i < sband->n_channels; i++) {
531 *pos++ = ieee80211_frequency_to_channel(
532 sband->channels[i].center_freq);
533 *pos++ = 1; /* one channel in the subband*/
534 }
535 }
536
537 /* if present, add any custom IEs that go before HT */
538 if (assoc_data->ie_len && assoc_data->ie) {
539 static const u8 before_ht[] = {
540 WLAN_EID_SSID,
541 WLAN_EID_SUPP_RATES,
542 WLAN_EID_EXT_SUPP_RATES,
543 WLAN_EID_PWR_CAPABILITY,
544 WLAN_EID_SUPPORTED_CHANNELS,
545 WLAN_EID_RSN,
546 WLAN_EID_QOS_CAPA,
547 WLAN_EID_RRM_ENABLED_CAPABILITIES,
548 WLAN_EID_MOBILITY_DOMAIN,
549 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
550 };
551 noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
552 before_ht, ARRAY_SIZE(before_ht),
553 offset);
554 pos = skb_put(skb, noffset - offset);
555 memcpy(pos, assoc_data->ie + offset, noffset - offset);
556 offset = noffset;
557 }
558
559 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
560 ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie,
561 sband, local->oper_channel, ifmgd->ap_smps);
562
563 /* if present, add any custom non-vendor IEs that go after HT */
564 if (assoc_data->ie_len && assoc_data->ie) {
565 noffset = ieee80211_ie_split_vendor(assoc_data->ie,
566 assoc_data->ie_len,
567 offset);
568 pos = skb_put(skb, noffset - offset);
569 memcpy(pos, assoc_data->ie + offset, noffset - offset);
570 offset = noffset;
571 }
572
573 if (assoc_data->wmm) {
574 if (assoc_data->uapsd) {
575 qos_info = ifmgd->uapsd_queues;
576 qos_info |= (ifmgd->uapsd_max_sp_len <<
577 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
578 } else {
579 qos_info = 0;
580 }
581
582 pos = skb_put(skb, 9);
583 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
584 *pos++ = 7; /* len */
585 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
586 *pos++ = 0x50;
587 *pos++ = 0xf2;
588 *pos++ = 2; /* WME */
589 *pos++ = 0; /* WME info */
590 *pos++ = 1; /* WME ver */
591 *pos++ = qos_info;
592 }
593
594 /* add any remaining custom (i.e. vendor specific here) IEs */
595 if (assoc_data->ie_len && assoc_data->ie) {
596 noffset = assoc_data->ie_len;
597 pos = skb_put(skb, noffset - offset);
598 memcpy(pos, assoc_data->ie + offset, noffset - offset);
599 }
600
601 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
602 ieee80211_tx_skb(sdata, skb);
603}
604
605static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
606 const u8 *bssid, u16 stype,
607 u16 reason, bool send_frame,
608 u8 *frame_buf)
609{
610 struct ieee80211_local *local = sdata->local;
611 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
612 struct sk_buff *skb;
613 struct ieee80211_mgmt *mgmt = (void *)frame_buf;
614
615 /* build frame */
616 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
617 mgmt->duration = 0; /* initialize only */
618 mgmt->seq_ctrl = 0; /* initialize only */
304 memcpy(mgmt->da, bssid, ETH_ALEN); 619 memcpy(mgmt->da, bssid, ETH_ALEN);
305 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 620 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
306 memcpy(mgmt->bssid, bssid, ETH_ALEN); 621 memcpy(mgmt->bssid, bssid, ETH_ALEN);
307 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
308 skb_put(skb, 2);
309 /* u.deauth.reason_code == u.disassoc.reason_code */ 622 /* u.deauth.reason_code == u.disassoc.reason_code */
310 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 623 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
311 624
312 if (stype == IEEE80211_STYPE_DEAUTH) 625 if (send_frame) {
313 if (cookie) 626 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
314 __cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 627 DEAUTH_DISASSOC_LEN);
315 else 628 if (!skb)
316 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 629 return;
317 else
318 if (cookie)
319 __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
320 else
321 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
322 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
323 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
324 630
325 if (send_frame) 631 skb_reserve(skb, local->hw.extra_tx_headroom);
632
633 /* copy in frame */
634 memcpy(skb_put(skb, DEAUTH_DISASSOC_LEN),
635 mgmt, DEAUTH_DISASSOC_LEN);
636
637 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
638 IEEE80211_SKB_CB(skb)->flags |=
639 IEEE80211_TX_INTFL_DONT_ENCRYPT;
326 ieee80211_tx_skb(sdata, skb); 640 ieee80211_tx_skb(sdata, skb);
327 else 641 }
328 kfree_skb(skb);
329} 642}
330 643
331void ieee80211_send_pspoll(struct ieee80211_local *local, 644void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -547,7 +860,7 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
547 if (pwr_constr_elem_len != 1) 860 if (pwr_constr_elem_len != 1)
548 return; 861 return;
549 862
550 if ((*pwr_constr_elem <= conf->channel->max_power) && 863 if ((*pwr_constr_elem <= conf->channel->max_reg_power) &&
551 (*pwr_constr_elem != sdata->local->power_constr_level)) { 864 (*pwr_constr_elem != sdata->local->power_constr_level)) {
552 sdata->local->power_constr_level = *pwr_constr_elem; 865 sdata->local->power_constr_level = *pwr_constr_elem;
553 ieee80211_hw_config(sdata->local, 0); 866 ieee80211_hw_config(sdata->local, 0);
@@ -879,7 +1192,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
879 return; 1192 return;
880 1193
881 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 1194 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
882 uapsd_queues = local->uapsd_queues; 1195 uapsd_queues = ifmgd->uapsd_queues;
883 1196
884 count = wmm_param[6] & 0x0f; 1197 count = wmm_param[6] & 0x0f;
885 if (count == ifmgd->wmm_last_param_set) 1198 if (count == ifmgd->wmm_last_param_set)
@@ -953,7 +1266,6 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
953 1266
954 /* enable WMM or activate new settings */ 1267 /* enable WMM or activate new settings */
955 sdata->vif.bss_conf.qos = true; 1268 sdata->vif.bss_conf.qos = true;
956 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
957} 1269}
958 1270
959static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 1271static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -1006,7 +1318,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1006 bss_info_changed |= BSS_CHANGED_ASSOC; 1318 bss_info_changed |= BSS_CHANGED_ASSOC;
1007 /* set timing information */ 1319 /* set timing information */
1008 bss_conf->beacon_int = cbss->beacon_interval; 1320 bss_conf->beacon_int = cbss->beacon_interval;
1009 bss_conf->timestamp = cbss->tsf; 1321 bss_conf->last_tsf = cbss->tsf;
1010 1322
1011 bss_info_changed |= BSS_CHANGED_BEACON_INT; 1323 bss_info_changed |= BSS_CHANGED_BEACON_INT;
1012 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 1324 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -1032,18 +1344,9 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1032 bss_conf->dtim_period = 0; 1344 bss_conf->dtim_period = 0;
1033 1345
1034 bss_conf->assoc = 1; 1346 bss_conf->assoc = 1;
1035 /*
1036 * For now just always ask the driver to update the basic rateset
1037 * when we have associated, we aren't checking whether it actually
1038 * changed or not.
1039 */
1040 bss_info_changed |= BSS_CHANGED_BASIC_RATES;
1041
1042 /* And the BSSID changed - we're associated now */
1043 bss_info_changed |= BSS_CHANGED_BSSID;
1044 1347
1045 /* Tell the driver to monitor connection quality (if supported) */ 1348 /* Tell the driver to monitor connection quality (if supported) */
1046 if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && 1349 if (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI &&
1047 bss_conf->cqm_rssi_thold) 1350 bss_conf->cqm_rssi_thold)
1048 bss_info_changed |= BSS_CHANGED_CQM; 1351 bss_info_changed |= BSS_CHANGED_CQM;
1049 1352
@@ -1065,16 +1368,20 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1065} 1368}
1066 1369
1067static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, 1370static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1068 bool remove_sta, bool tx) 1371 u16 stype, u16 reason, bool tx,
1372 u8 *frame_buf)
1069{ 1373{
1070 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1374 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1071 struct ieee80211_local *local = sdata->local; 1375 struct ieee80211_local *local = sdata->local;
1072 struct sta_info *sta; 1376 struct sta_info *sta;
1073 u32 changed = 0, config_changed = 0; 1377 u32 changed = 0;
1074 u8 bssid[ETH_ALEN]; 1378 u8 bssid[ETH_ALEN];
1075 1379
1076 ASSERT_MGD_MTX(ifmgd); 1380 ASSERT_MGD_MTX(ifmgd);
1077 1381
1382 if (WARN_ON_ONCE(tx && !frame_buf))
1383 return;
1384
1078 if (WARN_ON(!ifmgd->associated)) 1385 if (WARN_ON(!ifmgd->associated))
1079 return; 1386 return;
1080 1387
@@ -1108,17 +1415,25 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1108 } 1415 }
1109 mutex_unlock(&local->sta_mtx); 1416 mutex_unlock(&local->sta_mtx);
1110 1417
1418 /* deauthenticate/disassociate now */
1419 if (tx || frame_buf)
1420 ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
1421 tx, frame_buf);
1422
1423 /* flush out frame */
1424 if (tx)
1425 drv_flush(local, false);
1426
1427 /* remove AP and TDLS peers */
1428 sta_info_flush(local, sdata);
1429
1430 /* finally reset all BSS / config parameters */
1111 changed |= ieee80211_reset_erp_info(sdata); 1431 changed |= ieee80211_reset_erp_info(sdata);
1112 1432
1113 ieee80211_led_assoc(local, 0); 1433 ieee80211_led_assoc(local, 0);
1114 changed |= BSS_CHANGED_ASSOC; 1434 changed |= BSS_CHANGED_ASSOC;
1115 sdata->vif.bss_conf.assoc = false; 1435 sdata->vif.bss_conf.assoc = false;
1116 1436
1117 ieee80211_set_wmm_default(sdata);
1118
1119 /* channel(_type) changes are handled by ieee80211_hw_config */
1120 WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
1121
1122 /* on the next assoc, re-program HT parameters */ 1437 /* on the next assoc, re-program HT parameters */
1123 sdata->ht_opmode_valid = false; 1438 sdata->ht_opmode_valid = false;
1124 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1439 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
@@ -1131,25 +1446,29 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1131 1446
1132 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 1447 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1133 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1448 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1134 config_changed |= IEEE80211_CONF_CHANGE_PS; 1449 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1135 } 1450 }
1136 local->ps_sdata = NULL; 1451 local->ps_sdata = NULL;
1137 1452
1138 ieee80211_hw_config(local, config_changed);
1139
1140 /* Disable ARP filtering */ 1453 /* Disable ARP filtering */
1141 if (sdata->vif.bss_conf.arp_filter_enabled) { 1454 if (sdata->vif.bss_conf.arp_filter_enabled) {
1142 sdata->vif.bss_conf.arp_filter_enabled = false; 1455 sdata->vif.bss_conf.arp_filter_enabled = false;
1143 changed |= BSS_CHANGED_ARP_FILTER; 1456 changed |= BSS_CHANGED_ARP_FILTER;
1144 } 1457 }
1145 1458
1459 sdata->vif.bss_conf.qos = false;
1460 changed |= BSS_CHANGED_QOS;
1461
1146 /* The BSSID (not really interesting) and HT changed */ 1462 /* The BSSID (not really interesting) and HT changed */
1147 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; 1463 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
1148 ieee80211_bss_info_change_notify(sdata, changed); 1464 ieee80211_bss_info_change_notify(sdata, changed);
1149 1465
1150 /* remove AP and TDLS peers */ 1466 /* channel(_type) changes are handled by ieee80211_hw_config */
1151 if (remove_sta) 1467 WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
1152 sta_info_flush(local, sdata); 1468 ieee80211_hw_config(local, 0);
1469
1470 /* disassociated - set to defaults now */
1471 ieee80211_set_wmm_default(sdata, false);
1153 1472
1154 del_timer_sync(&sdata->u.mgd.conn_mon_timer); 1473 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
1155 del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 1474 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1347,6 +1666,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1347 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1666 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1348 struct ieee80211_local *local = sdata->local; 1667 struct ieee80211_local *local = sdata->local;
1349 u8 bssid[ETH_ALEN]; 1668 u8 bssid[ETH_ALEN];
1669 u8 frame_buf[DEAUTH_DISASSOC_LEN];
1350 1670
1351 mutex_lock(&ifmgd->mtx); 1671 mutex_lock(&ifmgd->mtx);
1352 if (!ifmgd->associated) { 1672 if (!ifmgd->associated) {
@@ -1359,17 +1679,16 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1359 printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n", 1679 printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
1360 sdata->name, bssid); 1680 sdata->name, bssid);
1361 1681
1362 ieee80211_set_disassoc(sdata, true, true); 1682 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1683 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1684 false, frame_buf);
1363 mutex_unlock(&ifmgd->mtx); 1685 mutex_unlock(&ifmgd->mtx);
1364 1686
1365 /* 1687 /*
1366 * must be outside lock due to cfg80211, 1688 * must be outside lock due to cfg80211,
1367 * but that's not a problem. 1689 * but that's not a problem.
1368 */ 1690 */
1369 ieee80211_send_deauth_disassoc(sdata, bssid, 1691 cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
1370 IEEE80211_STYPE_DEAUTH,
1371 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1372 NULL, true);
1373 1692
1374 mutex_lock(&local->mtx); 1693 mutex_lock(&local->mtx);
1375 ieee80211_recalc_idle(local); 1694 ieee80211_recalc_idle(local);
@@ -1423,6 +1742,126 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
1423EXPORT_SYMBOL(ieee80211_connection_loss); 1742EXPORT_SYMBOL(ieee80211_connection_loss);
1424 1743
1425 1744
1745static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1746 bool assoc)
1747{
1748 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
1749
1750 lockdep_assert_held(&sdata->u.mgd.mtx);
1751
1752 if (!assoc) {
1753 sta_info_destroy_addr(sdata, auth_data->bss->bssid);
1754
1755 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
1756 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
1757 }
1758
1759 cfg80211_put_bss(auth_data->bss);
1760 kfree(auth_data);
1761 sdata->u.mgd.auth_data = NULL;
1762}
1763
1764static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1765 struct ieee80211_mgmt *mgmt, size_t len)
1766{
1767 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
1768 u8 *pos;
1769 struct ieee802_11_elems elems;
1770
1771 pos = mgmt->u.auth.variable;
1772 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1773 if (!elems.challenge)
1774 return;
1775 auth_data->expected_transaction = 4;
1776 ieee80211_send_auth(sdata, 3, auth_data->algorithm,
1777 elems.challenge - 2, elems.challenge_len + 2,
1778 auth_data->bss->bssid, auth_data->bss->bssid,
1779 auth_data->key, auth_data->key_len,
1780 auth_data->key_idx);
1781}
1782
1783static enum rx_mgmt_action __must_check
1784ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1785 struct ieee80211_mgmt *mgmt, size_t len)
1786{
1787 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1788 u8 bssid[ETH_ALEN];
1789 u16 auth_alg, auth_transaction, status_code;
1790 struct sta_info *sta;
1791
1792 lockdep_assert_held(&ifmgd->mtx);
1793
1794 if (len < 24 + 6)
1795 return RX_MGMT_NONE;
1796
1797 if (!ifmgd->auth_data || ifmgd->auth_data->done)
1798 return RX_MGMT_NONE;
1799
1800 memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
1801
1802 if (compare_ether_addr(bssid, mgmt->bssid))
1803 return RX_MGMT_NONE;
1804
1805 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1806 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1807 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1808
1809 if (auth_alg != ifmgd->auth_data->algorithm ||
1810 auth_transaction != ifmgd->auth_data->expected_transaction)
1811 return RX_MGMT_NONE;
1812
1813 if (status_code != WLAN_STATUS_SUCCESS) {
1814 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
1815 sdata->name, mgmt->sa, status_code);
1816 goto out;
1817 }
1818
1819 switch (ifmgd->auth_data->algorithm) {
1820 case WLAN_AUTH_OPEN:
1821 case WLAN_AUTH_LEAP:
1822 case WLAN_AUTH_FT:
1823 break;
1824 case WLAN_AUTH_SHARED_KEY:
1825 if (ifmgd->auth_data->expected_transaction != 4) {
1826 ieee80211_auth_challenge(sdata, mgmt, len);
1827 /* need another frame */
1828 return RX_MGMT_NONE;
1829 }
1830 break;
1831 default:
1832 WARN_ONCE(1, "invalid auth alg %d",
1833 ifmgd->auth_data->algorithm);
1834 return RX_MGMT_NONE;
1835 }
1836
1837 printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
1838 out:
1839 ifmgd->auth_data->done = true;
1840 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
1841 run_again(ifmgd, ifmgd->auth_data->timeout);
1842
1843 /* move station state to auth */
1844 mutex_lock(&sdata->local->sta_mtx);
1845 sta = sta_info_get(sdata, bssid);
1846 if (!sta) {
1847 WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
1848 goto out_err;
1849 }
1850 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
1851 printk(KERN_DEBUG "%s: failed moving %pM to auth\n",
1852 sdata->name, bssid);
1853 goto out_err;
1854 }
1855 mutex_unlock(&sdata->local->sta_mtx);
1856
1857 return RX_MGMT_CFG80211_RX_AUTH;
1858 out_err:
1859 mutex_unlock(&sdata->local->sta_mtx);
1860 /* ignore frame -- wait for timeout */
1861 return RX_MGMT_NONE;
1862}
1863
1864
1426static enum rx_mgmt_action __must_check 1865static enum rx_mgmt_action __must_check
1427ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 1866ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1428 struct ieee80211_mgmt *mgmt, size_t len) 1867 struct ieee80211_mgmt *mgmt, size_t len)
@@ -1431,10 +1870,14 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1431 const u8 *bssid = NULL; 1870 const u8 *bssid = NULL;
1432 u16 reason_code; 1871 u16 reason_code;
1433 1872
1873 lockdep_assert_held(&ifmgd->mtx);
1874
1434 if (len < 24 + 2) 1875 if (len < 24 + 2)
1435 return RX_MGMT_NONE; 1876 return RX_MGMT_NONE;
1436 1877
1437 ASSERT_MGD_MTX(ifmgd); 1878 if (!ifmgd->associated ||
1879 compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
1880 return RX_MGMT_NONE;
1438 1881
1439 bssid = ifmgd->associated->bssid; 1882 bssid = ifmgd->associated->bssid;
1440 1883
@@ -1443,7 +1886,8 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1443 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 1886 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
1444 sdata->name, bssid, reason_code); 1887 sdata->name, bssid, reason_code);
1445 1888
1446 ieee80211_set_disassoc(sdata, true, false); 1889 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1890
1447 mutex_lock(&sdata->local->mtx); 1891 mutex_lock(&sdata->local->mtx);
1448 ieee80211_recalc_idle(sdata->local); 1892 ieee80211_recalc_idle(sdata->local);
1449 mutex_unlock(&sdata->local->mtx); 1893 mutex_unlock(&sdata->local->mtx);
@@ -1459,15 +1903,13 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1459 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1903 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1460 u16 reason_code; 1904 u16 reason_code;
1461 1905
1462 if (len < 24 + 2) 1906 lockdep_assert_held(&ifmgd->mtx);
1463 return RX_MGMT_NONE;
1464
1465 ASSERT_MGD_MTX(ifmgd);
1466 1907
1467 if (WARN_ON(!ifmgd->associated)) 1908 if (len < 24 + 2)
1468 return RX_MGMT_NONE; 1909 return RX_MGMT_NONE;
1469 1910
1470 if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN))) 1911 if (!ifmgd->associated ||
1912 compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
1471 return RX_MGMT_NONE; 1913 return RX_MGMT_NONE;
1472 1914
1473 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1915 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -1475,10 +1917,12 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1475 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1917 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1476 sdata->name, mgmt->sa, reason_code); 1918 sdata->name, mgmt->sa, reason_code);
1477 1919
1478 ieee80211_set_disassoc(sdata, true, false); 1920 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
1921
1479 mutex_lock(&sdata->local->mtx); 1922 mutex_lock(&sdata->local->mtx);
1480 ieee80211_recalc_idle(sdata->local); 1923 ieee80211_recalc_idle(sdata->local);
1481 mutex_unlock(&sdata->local->mtx); 1924 mutex_unlock(&sdata->local->mtx);
1925
1482 return RX_MGMT_CFG80211_DISASSOC; 1926 return RX_MGMT_CFG80211_DISASSOC;
1483} 1927}
1484 1928
@@ -1524,25 +1968,39 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
1524 } 1968 }
1525} 1969}
1526 1970
1527static bool ieee80211_assoc_success(struct ieee80211_work *wk, 1971static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
1972 bool assoc)
1973{
1974 struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
1975
1976 lockdep_assert_held(&sdata->u.mgd.mtx);
1977
1978 if (!assoc) {
1979 sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
1980
1981 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
1982 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
1983 }
1984
1985 kfree(assoc_data);
1986 sdata->u.mgd.assoc_data = NULL;
1987}
1988
1989static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
1990 struct cfg80211_bss *cbss,
1528 struct ieee80211_mgmt *mgmt, size_t len) 1991 struct ieee80211_mgmt *mgmt, size_t len)
1529{ 1992{
1530 struct ieee80211_sub_if_data *sdata = wk->sdata;
1531 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1993 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1532 struct ieee80211_local *local = sdata->local; 1994 struct ieee80211_local *local = sdata->local;
1533 struct ieee80211_supported_band *sband; 1995 struct ieee80211_supported_band *sband;
1534 struct sta_info *sta; 1996 struct sta_info *sta;
1535 struct cfg80211_bss *cbss = wk->assoc.bss;
1536 u8 *pos; 1997 u8 *pos;
1537 u32 rates, basic_rates;
1538 u16 capab_info, aid; 1998 u16 capab_info, aid;
1539 struct ieee802_11_elems elems; 1999 struct ieee802_11_elems elems;
1540 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 2000 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1541 u32 changed = 0; 2001 u32 changed = 0;
1542 int err; 2002 int err;
1543 bool have_higher_than_11mbit = false;
1544 u16 ap_ht_cap_flags; 2003 u16 ap_ht_cap_flags;
1545 int min_rate = INT_MAX, min_rate_index = -1;
1546 2004
1547 /* AssocResp and ReassocResp have identical structure */ 2005 /* AssocResp and ReassocResp have identical structure */
1548 2006
@@ -1581,49 +2039,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1581 * station info was already allocated and inserted before 2039 * station info was already allocated and inserted before
1582 * the association and should be available to us 2040 * the association and should be available to us
1583 */ 2041 */
1584 sta = sta_info_get_rx(sdata, cbss->bssid); 2042 sta = sta_info_get(sdata, cbss->bssid);
1585 if (WARN_ON(!sta)) { 2043 if (WARN_ON(!sta)) {
1586 mutex_unlock(&sdata->local->sta_mtx); 2044 mutex_unlock(&sdata->local->sta_mtx);
1587 return false; 2045 return false;
1588 } 2046 }
1589 2047
1590 sta_info_move_state(sta, IEEE80211_STA_AUTH); 2048 sband = local->hw.wiphy->bands[local->oper_channel->band];
1591 sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1592 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1593 sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
1594
1595 rates = 0;
1596 basic_rates = 0;
1597 sband = local->hw.wiphy->bands[wk->chan->band];
1598
1599 ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
1600 &rates, &basic_rates, &have_higher_than_11mbit,
1601 &min_rate, &min_rate_index);
1602
1603 ieee80211_get_rates(sband, elems.ext_supp_rates,
1604 elems.ext_supp_rates_len, &rates, &basic_rates,
1605 &have_higher_than_11mbit,
1606 &min_rate, &min_rate_index);
1607
1608 /*
1609 * some buggy APs don't advertise basic_rates. use the lowest
1610 * supported rate instead.
1611 */
1612 if (unlikely(!basic_rates) && min_rate_index >= 0) {
1613 printk(KERN_DEBUG "%s: No basic rates in AssocResp. "
1614 "Using min supported rate instead.\n", sdata->name);
1615 basic_rates = BIT(min_rate_index);
1616 }
1617
1618 sta->sta.supp_rates[wk->chan->band] = rates;
1619 sdata->vif.bss_conf.basic_rates = basic_rates;
1620
1621 /* cf. IEEE 802.11 9.2.12 */
1622 if (wk->chan->band == IEEE80211_BAND_2GHZ &&
1623 have_higher_than_11mbit)
1624 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
1625 else
1626 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
1627 2049
1628 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2050 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1629 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2051 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
@@ -1639,15 +2061,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1639 if (elems.wmm_param) 2061 if (elems.wmm_param)
1640 set_sta_flag(sta, WLAN_STA_WME); 2062 set_sta_flag(sta, WLAN_STA_WME);
1641 2063
1642 /* sta_info_reinsert will also unlock the mutex lock */ 2064 err = sta_info_move_state(sta, IEEE80211_STA_AUTH);
1643 err = sta_info_reinsert(sta); 2065 if (!err)
1644 sta = NULL; 2066 err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
2067 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
2068 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
1645 if (err) { 2069 if (err) {
1646 printk(KERN_DEBUG "%s: failed to insert STA entry for" 2070 printk(KERN_DEBUG
1647 " the AP (error %d)\n", sdata->name, err); 2071 "%s: failed to move station %pM to desired state\n",
2072 sdata->name, sta->sta.addr);
2073 WARN_ON(__sta_info_destroy(sta));
2074 mutex_unlock(&sdata->local->sta_mtx);
1648 return false; 2075 return false;
1649 } 2076 }
1650 2077
2078 mutex_unlock(&sdata->local->sta_mtx);
2079
1651 /* 2080 /*
1652 * Always handle WMM once after association regardless 2081 * Always handle WMM once after association regardless
1653 * of the first value the AP uses. Setting -1 here has 2082 * of the first value the AP uses. Setting -1 here has
@@ -1660,12 +2089,10 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1660 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, 2089 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
1661 elems.wmm_param_len); 2090 elems.wmm_param_len);
1662 else 2091 else
1663 ieee80211_set_wmm_default(sdata); 2092 ieee80211_set_wmm_default(sdata, false);
1664 2093 changed |= BSS_CHANGED_QOS;
1665 local->oper_channel = wk->chan;
1666 2094
1667 if (elems.ht_info_elem && elems.wmm_param && 2095 if (elems.ht_info_elem && elems.wmm_param &&
1668 (sdata->local->hw.queues >= 4) &&
1669 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2096 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1670 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 2097 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1671 cbss->bssid, ap_ht_cap_flags, 2098 cbss->bssid, ap_ht_cap_flags,
@@ -1694,7 +2121,88 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1694 return true; 2121 return true;
1695} 2122}
1696 2123
2124static enum rx_mgmt_action __must_check
2125ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2126 struct ieee80211_mgmt *mgmt, size_t len,
2127 struct cfg80211_bss **bss)
2128{
2129 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2130 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
2131 u16 capab_info, status_code, aid;
2132 struct ieee802_11_elems elems;
2133 u8 *pos;
2134 bool reassoc;
2135
2136 lockdep_assert_held(&ifmgd->mtx);
1697 2137
2138 if (!assoc_data)
2139 return RX_MGMT_NONE;
2140 if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid))
2141 return RX_MGMT_NONE;
2142
2143 /*
2144 * AssocResp and ReassocResp have identical structure, so process both
2145 * of them in this function.
2146 */
2147
2148 if (len < 24 + 6)
2149 return RX_MGMT_NONE;
2150
2151 reassoc = ieee80211_is_reassoc_req(mgmt->frame_control);
2152 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
2153 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2154 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
2155
2156 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
2157 "status=%d aid=%d)\n",
2158 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
2159 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2160
2161 pos = mgmt->u.assoc_resp.variable;
2162 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
2163
2164 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
2165 elems.timeout_int && elems.timeout_int_len == 5 &&
2166 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
2167 u32 tu, ms;
2168 tu = get_unaligned_le32(elems.timeout_int + 1);
2169 ms = tu * 1024 / 1000;
2170 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
2171 "comeback duration %u TU (%u ms)\n",
2172 sdata->name, mgmt->sa, tu, ms);
2173 assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
2174 if (ms > IEEE80211_ASSOC_TIMEOUT)
2175 run_again(ifmgd, assoc_data->timeout);
2176 return RX_MGMT_NONE;
2177 }
2178
2179 *bss = assoc_data->bss;
2180
2181 if (status_code != WLAN_STATUS_SUCCESS) {
2182 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
2183 sdata->name, mgmt->sa, status_code);
2184 ieee80211_destroy_assoc_data(sdata, false);
2185 } else {
2186 printk(KERN_DEBUG "%s: associated\n", sdata->name);
2187
2188 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2189 /* oops -- internal error -- send timeout for now */
2190 ieee80211_destroy_assoc_data(sdata, true);
2191 sta_info_destroy_addr(sdata, mgmt->bssid);
2192 cfg80211_put_bss(*bss);
2193 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2194 }
2195
2196 /*
2197 * destroy assoc_data afterwards, as otherwise an idle
2198 * recalc after assoc_data is NULL but before associated
2199 * is set can cause the interface to go idle
2200 */
2201 ieee80211_destroy_assoc_data(sdata, true);
2202 }
2203
2204 return RX_MGMT_CFG80211_RX_ASSOC;
2205}
1698static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 2206static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1699 struct ieee80211_mgmt *mgmt, 2207 struct ieee80211_mgmt *mgmt,
1700 size_t len, 2208 size_t len,
@@ -1708,7 +2216,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1708 struct ieee80211_channel *channel; 2216 struct ieee80211_channel *channel;
1709 bool need_ps = false; 2217 bool need_ps = false;
1710 2218
1711 if (sdata->u.mgd.associated) { 2219 if (sdata->u.mgd.associated &&
2220 compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid)
2221 == 0) {
1712 bss = (void *)sdata->u.mgd.associated->priv; 2222 bss = (void *)sdata->u.mgd.associated->priv;
1713 /* not previously set so we may need to recalc */ 2223 /* not previously set so we may need to recalc */
1714 need_ps = !bss->dtim_period; 2224 need_ps = !bss->dtim_period;
@@ -1763,7 +2273,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1763 2273
1764 ASSERT_MGD_MTX(ifmgd); 2274 ASSERT_MGD_MTX(ifmgd);
1765 2275
1766 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) 2276 if (compare_ether_addr(mgmt->da, sdata->vif.addr))
1767 return; /* ignore ProbeResp to foreign address */ 2277 return; /* ignore ProbeResp to foreign address */
1768 2278
1769 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 2279 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1776,8 +2286,18 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1776 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 2286 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1777 2287
1778 if (ifmgd->associated && 2288 if (ifmgd->associated &&
1779 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0) 2289 compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0)
1780 ieee80211_reset_ap_probe(sdata); 2290 ieee80211_reset_ap_probe(sdata);
2291
2292 if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
2293 compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid)
2294 == 0) {
2295 /* got probe response, continue with auth */
2296 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
2297 ifmgd->auth_data->tries = 0;
2298 ifmgd->auth_data->timeout = jiffies;
2299 run_again(ifmgd, ifmgd->auth_data->timeout);
2300 }
1781} 2301}
1782 2302
1783/* 2303/*
@@ -1817,7 +2337,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1817 u32 ncrc; 2337 u32 ncrc;
1818 u8 *bssid; 2338 u8 *bssid;
1819 2339
1820 ASSERT_MGD_MTX(ifmgd); 2340 lockdep_assert_held(&ifmgd->mtx);
1821 2341
1822 /* Process beacon from the current BSS */ 2342 /* Process beacon from the current BSS */
1823 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 2343 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
@@ -1827,21 +2347,26 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1827 if (rx_status->freq != local->hw.conf.channel->center_freq) 2347 if (rx_status->freq != local->hw.conf.channel->center_freq)
1828 return; 2348 return;
1829 2349
1830 /* 2350 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
1831 * We might have received a number of frames, among them a 2351 compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid)
1832 * disassoc frame and a beacon... 2352 == 0) {
1833 */ 2353 ieee802_11_parse_elems(mgmt->u.beacon.variable,
1834 if (!ifmgd->associated) 2354 len - baselen, &elems);
1835 return;
1836 2355
1837 bssid = ifmgd->associated->bssid; 2356 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
2357 false);
2358 ifmgd->assoc_data->have_beacon = true;
2359 ifmgd->assoc_data->sent_assoc = false;
2360 /* continue assoc process */
2361 ifmgd->assoc_data->timeout = jiffies;
2362 run_again(ifmgd, ifmgd->assoc_data->timeout);
2363 return;
2364 }
1838 2365
1839 /* 2366 if (!ifmgd->associated ||
1840 * And in theory even frames from a different AP we were just 2367 compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
1841 * associated to a split-second ago!
1842 */
1843 if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
1844 return; 2368 return;
2369 bssid = ifmgd->associated->bssid;
1845 2370
1846 /* Track average RSSI from the Beacon frames of the current AP */ 2371 /* Track average RSSI from the Beacon frames of the current AP */
1847 ifmgd->last_beacon_signal = rx_status->signal; 2372 ifmgd->last_beacon_signal = rx_status->signal;
@@ -1882,7 +2407,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1882 2407
1883 if (bss_conf->cqm_rssi_thold && 2408 if (bss_conf->cqm_rssi_thold &&
1884 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && 2409 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
1885 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { 2410 !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
1886 int sig = ifmgd->ave_beacon_signal / 16; 2411 int sig = ifmgd->ave_beacon_signal / 16;
1887 int last_event = ifmgd->last_cqm_event_signal; 2412 int last_event = ifmgd->last_cqm_event_signal;
1888 int thold = bss_conf->cqm_rssi_thold; 2413 int thold = bss_conf->cqm_rssi_thold;
@@ -2025,6 +2550,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2025 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2550 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2026 struct ieee80211_rx_status *rx_status; 2551 struct ieee80211_rx_status *rx_status;
2027 struct ieee80211_mgmt *mgmt; 2552 struct ieee80211_mgmt *mgmt;
2553 struct cfg80211_bss *bss = NULL;
2028 enum rx_mgmt_action rma = RX_MGMT_NONE; 2554 enum rx_mgmt_action rma = RX_MGMT_NONE;
2029 u16 fc; 2555 u16 fc;
2030 2556
@@ -2034,92 +2560,59 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2034 2560
2035 mutex_lock(&ifmgd->mtx); 2561 mutex_lock(&ifmgd->mtx);
2036 2562
2037 if (ifmgd->associated && 2563 switch (fc & IEEE80211_FCTL_STYPE) {
2038 memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) { 2564 case IEEE80211_STYPE_BEACON:
2039 switch (fc & IEEE80211_FCTL_STYPE) { 2565 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
2040 case IEEE80211_STYPE_BEACON: 2566 break;
2041 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 2567 case IEEE80211_STYPE_PROBE_RESP:
2042 rx_status); 2568 ieee80211_rx_mgmt_probe_resp(sdata, skb);
2043 break; 2569 break;
2044 case IEEE80211_STYPE_PROBE_RESP: 2570 case IEEE80211_STYPE_AUTH:
2045 ieee80211_rx_mgmt_probe_resp(sdata, skb); 2571 rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
2046 break; 2572 break;
2047 case IEEE80211_STYPE_DEAUTH: 2573 case IEEE80211_STYPE_DEAUTH:
2048 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); 2574 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
2049 break; 2575 break;
2050 case IEEE80211_STYPE_DISASSOC: 2576 case IEEE80211_STYPE_DISASSOC:
2051 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 2577 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
2052 break; 2578 break;
2053 case IEEE80211_STYPE_ACTION: 2579 case IEEE80211_STYPE_ASSOC_RESP:
2054 switch (mgmt->u.action.category) { 2580 case IEEE80211_STYPE_REASSOC_RESP:
2055 case WLAN_CATEGORY_SPECTRUM_MGMT: 2581 rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss);
2056 ieee80211_sta_process_chanswitch(sdata, 2582 break;
2057 &mgmt->u.action.u.chan_switch.sw_elem, 2583 case IEEE80211_STYPE_ACTION:
2058 (void *)ifmgd->associated->priv, 2584 switch (mgmt->u.action.category) {
2059 rx_status->mactime); 2585 case WLAN_CATEGORY_SPECTRUM_MGMT:
2060 break; 2586 ieee80211_sta_process_chanswitch(sdata,
2061 } 2587 &mgmt->u.action.u.chan_switch.sw_elem,
2062 } 2588 (void *)ifmgd->associated->priv,
2063 mutex_unlock(&ifmgd->mtx); 2589 rx_status->mactime);
2064
2065 switch (rma) {
2066 case RX_MGMT_NONE:
2067 /* no action */
2068 break;
2069 case RX_MGMT_CFG80211_DEAUTH:
2070 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2071 break;
2072 case RX_MGMT_CFG80211_DISASSOC:
2073 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
2074 break; 2590 break;
2075 default:
2076 WARN(1, "unexpected: %d", rma);
2077 } 2591 }
2078 return;
2079 } 2592 }
2080
2081 mutex_unlock(&ifmgd->mtx); 2593 mutex_unlock(&ifmgd->mtx);
2082 2594
2083 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && 2595 switch (rma) {
2084 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { 2596 case RX_MGMT_NONE:
2085 struct ieee80211_local *local = sdata->local; 2597 /* no action */
2086 struct ieee80211_work *wk; 2598 break;
2087 2599 case RX_MGMT_CFG80211_DEAUTH:
2088 mutex_lock(&local->mtx);
2089 list_for_each_entry(wk, &local->work_list, list) {
2090 if (wk->sdata != sdata)
2091 continue;
2092
2093 if (wk->type != IEEE80211_WORK_ASSOC &&
2094 wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2095 continue;
2096
2097 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
2098 continue;
2099 if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
2100 continue;
2101
2102 /*
2103 * Printing the message only here means we can't
2104 * spuriously print it, but it also means that it
2105 * won't be printed when the frame comes in before
2106 * we even tried to associate or in similar cases.
2107 *
2108 * Ultimately, I suspect cfg80211 should print the
2109 * messages instead.
2110 */
2111 printk(KERN_DEBUG
2112 "%s: deauthenticated from %pM (Reason: %u)\n",
2113 sdata->name, mgmt->bssid,
2114 le16_to_cpu(mgmt->u.deauth.reason_code));
2115
2116 list_del_rcu(&wk->list);
2117 free_work(wk);
2118 break;
2119 }
2120 mutex_unlock(&local->mtx);
2121
2122 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 2600 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2601 break;
2602 case RX_MGMT_CFG80211_DISASSOC:
2603 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
2604 break;
2605 case RX_MGMT_CFG80211_RX_AUTH:
2606 cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len);
2607 break;
2608 case RX_MGMT_CFG80211_RX_ASSOC:
2609 cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len);
2610 break;
2611 case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
2612 cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
2613 break;
2614 default:
2615 WARN(1, "unexpected: %d", rma);
2123 } 2616 }
2124} 2617}
2125 2618
@@ -2143,19 +2636,20 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2143{ 2636{
2144 struct ieee80211_local *local = sdata->local; 2637 struct ieee80211_local *local = sdata->local;
2145 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2638 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2639 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2146 2640
2147 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 2641 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
2148 IEEE80211_STA_BEACON_POLL); 2642 IEEE80211_STA_BEACON_POLL);
2149 2643
2150 ieee80211_set_disassoc(sdata, true, true); 2644 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2645 false, frame_buf);
2151 mutex_unlock(&ifmgd->mtx); 2646 mutex_unlock(&ifmgd->mtx);
2647
2152 /* 2648 /*
2153 * must be outside lock due to cfg80211, 2649 * must be outside lock due to cfg80211,
2154 * but that's not a problem. 2650 * but that's not a problem.
2155 */ 2651 */
2156 ieee80211_send_deauth_disassoc(sdata, bssid, 2652 cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
2157 IEEE80211_STYPE_DEAUTH, reason,
2158 NULL, true);
2159 2653
2160 mutex_lock(&local->mtx); 2654 mutex_lock(&local->mtx);
2161 ieee80211_recalc_idle(local); 2655 ieee80211_recalc_idle(local);
@@ -2164,14 +2658,144 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2164 mutex_lock(&ifmgd->mtx); 2658 mutex_lock(&ifmgd->mtx);
2165} 2659}
2166 2660
2661static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2662{
2663 struct ieee80211_local *local = sdata->local;
2664 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2665 struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
2666
2667 lockdep_assert_held(&ifmgd->mtx);
2668
2669 if (WARN_ON_ONCE(!auth_data))
2670 return -EINVAL;
2671
2672 auth_data->tries++;
2673
2674 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
2675 printk(KERN_DEBUG "%s: authentication with %pM timed out\n",
2676 sdata->name, auth_data->bss->bssid);
2677
2678 /*
2679 * Most likely AP is not in the range so remove the
2680 * bss struct for that AP.
2681 */
2682 cfg80211_unlink_bss(local->hw.wiphy, auth_data->bss);
2683
2684 return -ETIMEDOUT;
2685 }
2686
2687 if (auth_data->bss->proberesp_ies) {
2688 printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n",
2689 sdata->name, auth_data->bss->bssid, auth_data->tries,
2690 IEEE80211_AUTH_MAX_TRIES);
2691
2692 auth_data->expected_transaction = 2;
2693 ieee80211_send_auth(sdata, 1, auth_data->algorithm,
2694 auth_data->ie, auth_data->ie_len,
2695 auth_data->bss->bssid,
2696 auth_data->bss->bssid, NULL, 0, 0);
2697 } else {
2698 const u8 *ssidie;
2699
2700 printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
2701 sdata->name, auth_data->bss->bssid, auth_data->tries,
2702 IEEE80211_AUTH_MAX_TRIES);
2703
2704 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
2705 if (!ssidie)
2706 return -EINVAL;
2707 /*
2708 * Direct probe is sent to broadcast address as some APs
2709 * will not answer to direct packet in unassociated state.
2710 */
2711 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2712 NULL, 0, (u32) -1, true, false);
2713 }
2714
2715 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
2716 run_again(ifmgd, auth_data->timeout);
2717
2718 return 0;
2719}
2720
2721static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2722{
2723 struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
2724 struct ieee80211_local *local = sdata->local;
2725
2726 lockdep_assert_held(&sdata->u.mgd.mtx);
2727
2728 assoc_data->tries++;
2729 if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
2730 printk(KERN_DEBUG "%s: association with %pM timed out\n",
2731 sdata->name, assoc_data->bss->bssid);
2732
2733 /*
2734 * Most likely AP is not in the range so remove the
2735 * bss struct for that AP.
2736 */
2737 cfg80211_unlink_bss(local->hw.wiphy, assoc_data->bss);
2738
2739 return -ETIMEDOUT;
2740 }
2741
2742 printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n",
2743 sdata->name, assoc_data->bss->bssid, assoc_data->tries,
2744 IEEE80211_ASSOC_MAX_TRIES);
2745 ieee80211_send_assoc(sdata);
2746
2747 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
2748 run_again(&sdata->u.mgd, assoc_data->timeout);
2749
2750 return 0;
2751}
2752
2167void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) 2753void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2168{ 2754{
2169 struct ieee80211_local *local = sdata->local; 2755 struct ieee80211_local *local = sdata->local;
2170 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2756 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2171 2757
2172 /* then process the rest of the work */
2173 mutex_lock(&ifmgd->mtx); 2758 mutex_lock(&ifmgd->mtx);
2174 2759
2760 if (ifmgd->auth_data &&
2761 time_after(jiffies, ifmgd->auth_data->timeout)) {
2762 if (ifmgd->auth_data->done) {
2763 /*
2764 * ok ... we waited for assoc but userspace didn't,
2765 * so let's just kill the auth data
2766 */
2767 ieee80211_destroy_auth_data(sdata, false);
2768 } else if (ieee80211_probe_auth(sdata)) {
2769 u8 bssid[ETH_ALEN];
2770
2771 memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
2772
2773 ieee80211_destroy_auth_data(sdata, false);
2774
2775 mutex_unlock(&ifmgd->mtx);
2776 cfg80211_send_auth_timeout(sdata->dev, bssid);
2777 mutex_lock(&ifmgd->mtx);
2778 }
2779 } else if (ifmgd->auth_data)
2780 run_again(ifmgd, ifmgd->auth_data->timeout);
2781
2782 if (ifmgd->assoc_data &&
2783 time_after(jiffies, ifmgd->assoc_data->timeout)) {
2784 if (!ifmgd->assoc_data->have_beacon ||
2785 ieee80211_do_assoc(sdata)) {
2786 u8 bssid[ETH_ALEN];
2787
2788 memcpy(bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN);
2789
2790 ieee80211_destroy_assoc_data(sdata, false);
2791
2792 mutex_unlock(&ifmgd->mtx);
2793 cfg80211_send_assoc_timeout(sdata->dev, bssid);
2794 mutex_lock(&ifmgd->mtx);
2795 }
2796 } else if (ifmgd->assoc_data)
2797 run_again(ifmgd, ifmgd->assoc_data->timeout);
2798
2175 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 2799 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
2176 IEEE80211_STA_CONNECTION_POLL) && 2800 IEEE80211_STA_CONNECTION_POLL) &&
2177 ifmgd->associated) { 2801 ifmgd->associated) {
@@ -2247,6 +2871,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2247 } 2871 }
2248 2872
2249 mutex_unlock(&ifmgd->mtx); 2873 mutex_unlock(&ifmgd->mtx);
2874
2875 mutex_lock(&local->mtx);
2876 ieee80211_recalc_idle(local);
2877 mutex_unlock(&local->mtx);
2250} 2878}
2251 2879
2252static void ieee80211_sta_bcn_mon_timer(unsigned long data) 2880static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2286,13 +2914,17 @@ static void ieee80211_sta_monitor_work(struct work_struct *work)
2286 2914
2287static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) 2915static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
2288{ 2916{
2917 u32 flags;
2918
2289 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2919 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2290 sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | 2920 sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
2291 IEEE80211_STA_CONNECTION_POLL); 2921 IEEE80211_STA_CONNECTION_POLL);
2292 2922
2293 /* let's probe the connection once */ 2923 /* let's probe the connection once */
2294 ieee80211_queue_work(&sdata->local->hw, 2924 flags = sdata->local->hw.flags;
2295 &sdata->u.mgd.monitor_work); 2925 if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
2926 ieee80211_queue_work(&sdata->local->hw,
2927 &sdata->u.mgd.monitor_work);
2296 /* and do all the other regular work too */ 2928 /* and do all the other regular work too */
2297 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 2929 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
2298 } 2930 }
@@ -2356,7 +2988,6 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2356 add_timer(&ifmgd->chswitch_timer); 2988 add_timer(&ifmgd->chswitch_timer);
2357 ieee80211_sta_reset_beacon_monitor(sdata); 2989 ieee80211_sta_reset_beacon_monitor(sdata);
2358 ieee80211_restart_sta_timer(sdata); 2990 ieee80211_restart_sta_timer(sdata);
2359 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work);
2360} 2991}
2361#endif 2992#endif
2362 2993
@@ -2382,6 +3013,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2382 3013
2383 ifmgd->flags = 0; 3014 ifmgd->flags = 0;
2384 ifmgd->powersave = sdata->wdev.ps; 3015 ifmgd->powersave = sdata->wdev.ps;
3016 ifmgd->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
3017 ifmgd->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
2385 3018
2386 mutex_init(&ifmgd->mtx); 3019 mutex_init(&ifmgd->mtx);
2387 3020
@@ -2418,54 +3051,119 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
2418 return 0; 3051 return 0;
2419} 3052}
2420 3053
2421/* config hooks */ 3054static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
2422static enum work_done_result 3055 struct cfg80211_bss *cbss, bool assoc)
2423ieee80211_probe_auth_done(struct ieee80211_work *wk,
2424 struct sk_buff *skb)
2425{ 3056{
2426 struct ieee80211_local *local = wk->sdata->local; 3057 struct ieee80211_local *local = sdata->local;
3058 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3059 struct ieee80211_bss *bss = (void *)cbss->priv;
3060 struct sta_info *sta;
3061 bool have_sta = false;
3062 int err;
2427 3063
2428 if (!skb) { 3064 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
2429 cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta); 3065 return -EINVAL;
2430 goto destroy; 3066
3067 if (assoc) {
3068 rcu_read_lock();
3069 have_sta = sta_info_get(sdata, cbss->bssid);
3070 rcu_read_unlock();
2431 } 3071 }
2432 3072
2433 if (wk->type == IEEE80211_WORK_AUTH) { 3073 if (!have_sta) {
2434 cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len); 3074 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
2435 goto destroy; 3075 if (!sta)
3076 return -ENOMEM;
2436 } 3077 }
2437 3078
2438 mutex_lock(&wk->sdata->u.mgd.mtx); 3079 mutex_lock(&local->mtx);
2439 ieee80211_rx_mgmt_probe_resp(wk->sdata, skb); 3080 ieee80211_recalc_idle(sdata->local);
2440 mutex_unlock(&wk->sdata->u.mgd.mtx); 3081 mutex_unlock(&local->mtx);
3082
3083 /* switch to the right channel */
3084 local->oper_channel = cbss->channel;
3085 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3086
3087 if (!have_sta) {
3088 struct ieee80211_supported_band *sband;
3089 u32 rates = 0, basic_rates = 0;
3090 bool have_higher_than_11mbit;
3091 int min_rate = INT_MAX, min_rate_index = -1;
3092
3093 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
3094
3095 ieee80211_get_rates(sband, bss->supp_rates,
3096 bss->supp_rates_len,
3097 &rates, &basic_rates,
3098 &have_higher_than_11mbit,
3099 &min_rate, &min_rate_index);
3100
3101 /*
3102 * This used to be a workaround for basic rates missing
3103 * in the association response frame. Now that we no
3104 * longer use the basic rates from there, it probably
3105 * doesn't happen any more, but keep the workaround so
3106 * in case some *other* APs are buggy in different ways
3107 * we can connect -- with a warning.
3108 */
3109 if (!basic_rates && min_rate_index >= 0) {
3110 printk(KERN_DEBUG
3111 "%s: No basic rates, using min rate instead.\n",
3112 sdata->name);
3113 basic_rates = BIT(min_rate_index);
3114 }
3115
3116 sta->sta.supp_rates[cbss->channel->band] = rates;
3117 sdata->vif.bss_conf.basic_rates = basic_rates;
3118
3119 /* cf. IEEE 802.11 9.2.12 */
3120 if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
3121 have_higher_than_11mbit)
3122 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
3123 else
3124 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
2441 3125
2442 wk->type = IEEE80211_WORK_AUTH; 3126 memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
2443 wk->probe_auth.tries = 0;
2444 return WORK_DONE_REQUEUE;
2445 destroy:
2446 if (wk->probe_auth.synced)
2447 drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
2448 IEEE80211_TX_SYNC_AUTH);
2449 3127
2450 return WORK_DONE_DESTROY; 3128 /* tell driver about BSSID and basic rates */
3129 ieee80211_bss_info_change_notify(sdata,
3130 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES);
3131
3132 if (assoc)
3133 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
3134
3135 err = sta_info_insert(sta);
3136 sta = NULL;
3137 if (err) {
3138 printk(KERN_DEBUG
3139 "%s: failed to insert STA entry for the AP (error %d)\n",
3140 sdata->name, err);
3141 return err;
3142 }
3143 } else
3144 WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, cbss->bssid));
3145
3146 return 0;
2451} 3147}
2452 3148
3149/* config hooks */
2453int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 3150int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2454 struct cfg80211_auth_request *req) 3151 struct cfg80211_auth_request *req)
2455{ 3152{
2456 const u8 *ssid; 3153 struct ieee80211_local *local = sdata->local;
2457 struct ieee80211_work *wk; 3154 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3155 struct ieee80211_mgd_auth_data *auth_data;
2458 u16 auth_alg; 3156 u16 auth_alg;
3157 int err;
2459 3158
2460 if (req->local_state_change) 3159 /* prepare auth data structure */
2461 return 0; /* no need to update mac80211 state */
2462 3160
2463 switch (req->auth_type) { 3161 switch (req->auth_type) {
2464 case NL80211_AUTHTYPE_OPEN_SYSTEM: 3162 case NL80211_AUTHTYPE_OPEN_SYSTEM:
2465 auth_alg = WLAN_AUTH_OPEN; 3163 auth_alg = WLAN_AUTH_OPEN;
2466 break; 3164 break;
2467 case NL80211_AUTHTYPE_SHARED_KEY: 3165 case NL80211_AUTHTYPE_SHARED_KEY:
2468 if (IS_ERR(sdata->local->wep_tx_tfm)) 3166 if (IS_ERR(local->wep_tx_tfm))
2469 return -EOPNOTSUPP; 3167 return -EOPNOTSUPP;
2470 auth_alg = WLAN_AUTH_SHARED_KEY; 3168 auth_alg = WLAN_AUTH_SHARED_KEY;
2471 break; 3169 break;
@@ -2479,201 +3177,154 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2479 return -EOPNOTSUPP; 3177 return -EOPNOTSUPP;
2480 } 3178 }
2481 3179
2482 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); 3180 auth_data = kzalloc(sizeof(*auth_data) + req->ie_len, GFP_KERNEL);
2483 if (!wk) 3181 if (!auth_data)
2484 return -ENOMEM; 3182 return -ENOMEM;
2485 3183
2486 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); 3184 auth_data->bss = req->bss;
2487 3185
2488 if (req->ie && req->ie_len) { 3186 if (req->ie && req->ie_len) {
2489 memcpy(wk->ie, req->ie, req->ie_len); 3187 memcpy(auth_data->ie, req->ie, req->ie_len);
2490 wk->ie_len = req->ie_len; 3188 auth_data->ie_len = req->ie_len;
2491 } 3189 }
2492 3190
2493 if (req->key && req->key_len) { 3191 if (req->key && req->key_len) {
2494 wk->probe_auth.key_len = req->key_len; 3192 auth_data->key_len = req->key_len;
2495 wk->probe_auth.key_idx = req->key_idx; 3193 auth_data->key_idx = req->key_idx;
2496 memcpy(wk->probe_auth.key, req->key, req->key_len); 3194 memcpy(auth_data->key, req->key, req->key_len);
2497 } 3195 }
2498 3196
2499 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 3197 auth_data->algorithm = auth_alg;
2500 memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
2501 wk->probe_auth.ssid_len = ssid[1];
2502 3198
2503 wk->probe_auth.algorithm = auth_alg; 3199 /* try to authenticate/probe */
2504 wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
2505 3200
2506 /* if we already have a probe, don't probe again */ 3201 mutex_lock(&ifmgd->mtx);
2507 if (req->bss->proberesp_ies)
2508 wk->type = IEEE80211_WORK_AUTH;
2509 else
2510 wk->type = IEEE80211_WORK_DIRECT_PROBE;
2511 wk->chan = req->bss->channel;
2512 wk->chan_type = NL80211_CHAN_NO_HT;
2513 wk->sdata = sdata;
2514 wk->done = ieee80211_probe_auth_done;
2515
2516 ieee80211_add_work(wk);
2517 return 0;
2518}
2519
2520/* create and insert a dummy station entry */
2521static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
2522 u8 *bssid) {
2523 struct sta_info *sta;
2524 int err;
2525
2526 sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
2527 if (!sta)
2528 return -ENOMEM;
2529
2530 sta->dummy = true;
2531
2532 err = sta_info_insert(sta);
2533 sta = NULL;
2534 if (err) {
2535 printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
2536 " the AP (error %d)\n", sdata->name, err);
2537 return err;
2538 }
2539
2540 return 0;
2541}
2542
2543static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2544 struct sk_buff *skb)
2545{
2546 struct ieee80211_local *local = wk->sdata->local;
2547 struct ieee80211_mgmt *mgmt;
2548 struct ieee80211_rx_status *rx_status;
2549 struct ieee802_11_elems elems;
2550 struct cfg80211_bss *cbss = wk->assoc.bss;
2551 u16 status;
2552 3202
2553 if (!skb) { 3203 if ((ifmgd->auth_data && !ifmgd->auth_data->done) ||
2554 sta_info_destroy_addr(wk->sdata, cbss->bssid); 3204 ifmgd->assoc_data) {
2555 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); 3205 err = -EBUSY;
2556 goto destroy; 3206 goto err_free;
2557 } 3207 }
2558 3208
2559 if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) { 3209 if (ifmgd->auth_data)
2560 mutex_lock(&wk->sdata->u.mgd.mtx); 3210 ieee80211_destroy_auth_data(sdata, false);
2561 rx_status = (void *) skb->cb;
2562 ieee802_11_parse_elems(skb->data + 24 + 12, skb->len - 24 - 12, &elems);
2563 ieee80211_rx_bss_info(wk->sdata, (void *)skb->data, skb->len, rx_status,
2564 &elems, true);
2565 mutex_unlock(&wk->sdata->u.mgd.mtx);
2566 3211
2567 wk->type = IEEE80211_WORK_ASSOC; 3212 /* prep auth_data so we don't go into idle on disassoc */
2568 /* not really done yet */ 3213 ifmgd->auth_data = auth_data;
2569 return WORK_DONE_REQUEUE;
2570 }
2571 3214
2572 mgmt = (void *)skb->data; 3215 if (ifmgd->associated)
2573 status = le16_to_cpu(mgmt->u.assoc_resp.status_code); 3216 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
2574 3217
2575 if (status == WLAN_STATUS_SUCCESS) { 3218 printk(KERN_DEBUG "%s: authenticate with %pM\n",
2576 if (wk->assoc.synced) 3219 sdata->name, req->bss->bssid);
2577 drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
2578 IEEE80211_TX_SYNC_ASSOC);
2579 3220
2580 mutex_lock(&wk->sdata->u.mgd.mtx); 3221 err = ieee80211_prep_connection(sdata, req->bss, false);
2581 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) { 3222 if (err)
2582 mutex_unlock(&wk->sdata->u.mgd.mtx); 3223 goto err_clear;
2583 /* oops -- internal error -- send timeout for now */
2584 sta_info_destroy_addr(wk->sdata, cbss->bssid);
2585 cfg80211_send_assoc_timeout(wk->sdata->dev,
2586 wk->filter_ta);
2587 return WORK_DONE_DESTROY;
2588 }
2589 3224
2590 mutex_unlock(&wk->sdata->u.mgd.mtx); 3225 err = ieee80211_probe_auth(sdata);
2591 } else { 3226 if (err) {
2592 /* assoc failed - destroy the dummy station entry */ 3227 sta_info_destroy_addr(sdata, req->bss->bssid);
2593 sta_info_destroy_addr(wk->sdata, cbss->bssid); 3228 goto err_clear;
2594 } 3229 }
2595 3230
2596 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); 3231 /* hold our own reference */
2597 destroy: 3232 cfg80211_ref_bss(auth_data->bss);
2598 if (wk->assoc.synced) 3233 err = 0;
2599 drv_finish_tx_sync(local, wk->sdata, wk->filter_ta, 3234 goto out_unlock;
2600 IEEE80211_TX_SYNC_ASSOC); 3235
3236 err_clear:
3237 ifmgd->auth_data = NULL;
3238 err_free:
3239 kfree(auth_data);
3240 out_unlock:
3241 mutex_unlock(&ifmgd->mtx);
2601 3242
2602 return WORK_DONE_DESTROY; 3243 return err;
2603} 3244}
2604 3245
2605int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 3246int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2606 struct cfg80211_assoc_request *req) 3247 struct cfg80211_assoc_request *req)
2607{ 3248{
3249 struct ieee80211_local *local = sdata->local;
2608 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3250 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2609 struct ieee80211_bss *bss = (void *)req->bss->priv; 3251 struct ieee80211_bss *bss = (void *)req->bss->priv;
2610 struct ieee80211_work *wk; 3252 struct ieee80211_mgd_assoc_data *assoc_data;
2611 const u8 *ssid; 3253 struct ieee80211_supported_band *sband;
3254 const u8 *ssidie;
2612 int i, err; 3255 int i, err;
2613 3256
3257 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
3258 if (!ssidie)
3259 return -EINVAL;
3260
3261 assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
3262 if (!assoc_data)
3263 return -ENOMEM;
3264
2614 mutex_lock(&ifmgd->mtx); 3265 mutex_lock(&ifmgd->mtx);
2615 if (ifmgd->associated) {
2616 if (!req->prev_bssid ||
2617 memcmp(req->prev_bssid, ifmgd->associated->bssid,
2618 ETH_ALEN)) {
2619 /*
2620 * We are already associated and the request was not a
2621 * reassociation request from the current BSS, so
2622 * reject it.
2623 */
2624 mutex_unlock(&ifmgd->mtx);
2625 return -EALREADY;
2626 }
2627 3266
2628 /* Trying to reassociate - clear previous association state */ 3267 if (ifmgd->associated)
2629 ieee80211_set_disassoc(sdata, true, false); 3268 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
3269
3270 if (ifmgd->auth_data && !ifmgd->auth_data->done) {
3271 err = -EBUSY;
3272 goto err_free;
2630 } 3273 }
2631 mutex_unlock(&ifmgd->mtx);
2632 3274
2633 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); 3275 if (ifmgd->assoc_data) {
2634 if (!wk) 3276 err = -EBUSY;
2635 return -ENOMEM; 3277 goto err_free;
3278 }
2636 3279
2637 /* 3280 if (ifmgd->auth_data) {
2638 * create a dummy station info entry in order 3281 bool match;
2639 * to start accepting incoming EAPOL packets from the station 3282
2640 */ 3283 /* keep sta info, bssid if matching */
2641 err = ieee80211_pre_assoc(sdata, req->bss->bssid); 3284 match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0;
2642 if (err) { 3285 ieee80211_destroy_auth_data(sdata, match);
2643 kfree(wk);
2644 return err;
2645 } 3286 }
2646 3287
3288 /* prepare assoc data */
3289
2647 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 3290 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
2648 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 3291 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2649 3292
2650 ifmgd->beacon_crc_valid = false; 3293 ifmgd->beacon_crc_valid = false;
2651 3294
3295 /*
3296 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
3297 * We still associate in non-HT mode (11a/b/g) if any one of these
3298 * ciphers is configured as pairwise.
3299 * We can set this to true for non-11n hardware, that'll be checked
3300 * separately along with the peer capabilities.
3301 */
2652 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 3302 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
2653 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 3303 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
2654 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3304 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
2655 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 3305 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
2656 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3306 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2657 3307
2658
2659 if (req->flags & ASSOC_REQ_DISABLE_HT) 3308 if (req->flags & ASSOC_REQ_DISABLE_HT)
2660 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3309 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2661 3310
3311 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3312 sband = local->hw.wiphy->bands[req->bss->channel->band];
3313 if (!sband->ht_cap.ht_supported ||
3314 local->hw.queues < 4 || !bss->wmm_used)
3315 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3316
2662 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3317 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
2663 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, 3318 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
2664 sizeof(ifmgd->ht_capa_mask)); 3319 sizeof(ifmgd->ht_capa_mask));
2665 3320
2666 if (req->ie && req->ie_len) { 3321 if (req->ie && req->ie_len) {
2667 memcpy(wk->ie, req->ie, req->ie_len); 3322 memcpy(assoc_data->ie, req->ie, req->ie_len);
2668 wk->ie_len = req->ie_len; 3323 assoc_data->ie_len = req->ie_len;
2669 } else 3324 }
2670 wk->ie_len = 0;
2671
2672 wk->assoc.bss = req->bss;
2673 3325
2674 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); 3326 assoc_data->bss = req->bss;
2675 3327
2676 /* new association always uses requested smps mode */
2677 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { 3328 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
2678 if (ifmgd->powersave) 3329 if (ifmgd->powersave)
2679 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC; 3330 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
@@ -2682,47 +3333,27 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2682 } else 3333 } else
2683 ifmgd->ap_smps = ifmgd->req_smps; 3334 ifmgd->ap_smps = ifmgd->req_smps;
2684 3335
2685 wk->assoc.smps = ifmgd->ap_smps; 3336 assoc_data->capability = req->bss->capability;
2686 /* 3337 assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4);
2687 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode. 3338 assoc_data->supp_rates = bss->supp_rates;
2688 * We still associate in non-HT mode (11a/b/g) if any one of these 3339 assoc_data->supp_rates_len = bss->supp_rates_len;
2689 * ciphers is configured as pairwise. 3340 assoc_data->ht_information_ie =
2690 * We can set this to true for non-11n hardware, that'll be checked
2691 * separately along with the peer capabilities.
2692 */
2693 wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
2694 wk->assoc.capability = req->bss->capability;
2695 wk->assoc.wmm_used = bss->wmm_used;
2696 wk->assoc.supp_rates = bss->supp_rates;
2697 wk->assoc.supp_rates_len = bss->supp_rates_len;
2698 wk->assoc.ht_information_ie =
2699 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION); 3341 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
2700 3342
2701 if (bss->wmm_used && bss->uapsd_supported && 3343 if (bss->wmm_used && bss->uapsd_supported &&
2702 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { 3344 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
2703 wk->assoc.uapsd_used = true; 3345 assoc_data->uapsd = true;
2704 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; 3346 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
2705 } else { 3347 } else {
2706 wk->assoc.uapsd_used = false; 3348 assoc_data->uapsd = false;
2707 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; 3349 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
2708 } 3350 }
2709 3351
2710 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 3352 memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]);
2711 memcpy(wk->assoc.ssid, ssid + 2, ssid[1]); 3353 assoc_data->ssid_len = ssidie[1];
2712 wk->assoc.ssid_len = ssid[1];
2713 3354
2714 if (req->prev_bssid) 3355 if (req->prev_bssid)
2715 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN); 3356 memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN);
2716
2717 wk->chan = req->bss->channel;
2718 wk->chan_type = NL80211_CHAN_NO_HT;
2719 wk->sdata = sdata;
2720 wk->done = ieee80211_assoc_done;
2721 if (!bss->dtim_period &&
2722 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
2723 wk->type = IEEE80211_WORK_ASSOC_BEACON_WAIT;
2724 else
2725 wk->type = IEEE80211_WORK_ASSOC;
2726 3357
2727 if (req->use_mfp) { 3358 if (req->use_mfp) {
2728 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 3359 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2740,91 +3371,87 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2740 sdata->control_port_protocol = req->crypto.control_port_ethertype; 3371 sdata->control_port_protocol = req->crypto.control_port_ethertype;
2741 sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt; 3372 sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
2742 3373
2743 ieee80211_add_work(wk); 3374 /* kick off associate process */
2744 return 0; 3375
3376 ifmgd->assoc_data = assoc_data;
3377
3378 err = ieee80211_prep_connection(sdata, req->bss, true);
3379 if (err)
3380 goto err_clear;
3381
3382 if (!bss->dtim_period &&
3383 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
3384 /*
3385 * Wait up to one beacon interval ...
3386 * should this be more if we miss one?
3387 */
3388 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
3389 sdata->name, ifmgd->bssid);
3390 assoc_data->timeout = jiffies +
3391 TU_TO_EXP_TIME(req->bss->beacon_interval);
3392 } else {
3393 assoc_data->have_beacon = true;
3394 assoc_data->sent_assoc = false;
3395 assoc_data->timeout = jiffies;
3396 }
3397 run_again(ifmgd, assoc_data->timeout);
3398
3399 if (bss->corrupt_data) {
3400 char *corrupt_type = "data";
3401 if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_BEACON) {
3402 if (bss->corrupt_data &
3403 IEEE80211_BSS_CORRUPT_PROBE_RESP)
3404 corrupt_type = "beacon and probe response";
3405 else
3406 corrupt_type = "beacon";
3407 } else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
3408 corrupt_type = "probe response";
3409 printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n",
3410 sdata->name, corrupt_type);
3411 }
3412
3413 err = 0;
3414 goto out;
3415 err_clear:
3416 ifmgd->assoc_data = NULL;
3417 err_free:
3418 kfree(assoc_data);
3419 out:
3420 mutex_unlock(&ifmgd->mtx);
3421
3422 return err;
2745} 3423}
2746 3424
2747int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 3425int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2748 struct cfg80211_deauth_request *req, 3426 struct cfg80211_deauth_request *req)
2749 void *cookie)
2750{ 3427{
2751 struct ieee80211_local *local = sdata->local;
2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3428 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2753 u8 bssid[ETH_ALEN]; 3429 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2754 bool assoc_bss = false;
2755 3430
2756 mutex_lock(&ifmgd->mtx); 3431 mutex_lock(&ifmgd->mtx);
2757 3432
2758 memcpy(bssid, req->bss->bssid, ETH_ALEN); 3433 if (ifmgd->auth_data) {
2759 if (ifmgd->associated == req->bss) { 3434 ieee80211_destroy_auth_data(sdata, false);
2760 ieee80211_set_disassoc(sdata, false, true);
2761 mutex_unlock(&ifmgd->mtx);
2762 assoc_bss = true;
2763 } else {
2764 bool not_auth_yet = false;
2765 struct ieee80211_work *tmp, *wk = NULL;
2766
2767 mutex_unlock(&ifmgd->mtx); 3435 mutex_unlock(&ifmgd->mtx);
2768 3436 return 0;
2769 mutex_lock(&local->mtx);
2770 list_for_each_entry(tmp, &local->work_list, list) {
2771 if (tmp->sdata != sdata)
2772 continue;
2773
2774 if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
2775 tmp->type != IEEE80211_WORK_AUTH &&
2776 tmp->type != IEEE80211_WORK_ASSOC &&
2777 tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2778 continue;
2779
2780 if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
2781 continue;
2782
2783 not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
2784 list_del_rcu(&tmp->list);
2785 synchronize_rcu();
2786 wk = tmp;
2787 break;
2788 }
2789 mutex_unlock(&local->mtx);
2790
2791 if (wk && wk->type == IEEE80211_WORK_ASSOC) {
2792 /* clean up dummy sta & TX sync */
2793 sta_info_destroy_addr(wk->sdata, wk->filter_ta);
2794 if (wk->assoc.synced)
2795 drv_finish_tx_sync(local, wk->sdata,
2796 wk->filter_ta,
2797 IEEE80211_TX_SYNC_ASSOC);
2798 } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
2799 if (wk->probe_auth.synced)
2800 drv_finish_tx_sync(local, wk->sdata,
2801 wk->filter_ta,
2802 IEEE80211_TX_SYNC_AUTH);
2803 }
2804 kfree(wk);
2805
2806 /*
2807 * If somebody requests authentication and we haven't
2808 * sent out an auth frame yet there's no need to send
2809 * out a deauth frame either. If the state was PROBE,
2810 * then this is the case. If it's AUTH we have sent a
2811 * frame, and if it's IDLE we have completed the auth
2812 * process already.
2813 */
2814 if (not_auth_yet) {
2815 __cfg80211_auth_canceled(sdata->dev, bssid);
2816 return 0;
2817 }
2818 } 3437 }
2819 3438
2820 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 3439 printk(KERN_DEBUG
2821 sdata->name, bssid, req->reason_code); 3440 "%s: deauthenticating from %pM by local choice (reason=%d)\n",
3441 sdata->name, req->bssid, req->reason_code);
3442
3443 if (ifmgd->associated &&
3444 compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0)
3445 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3446 req->reason_code, true, frame_buf);
3447 else
3448 ieee80211_send_deauth_disassoc(sdata, req->bssid,
3449 IEEE80211_STYPE_DEAUTH,
3450 req->reason_code, true,
3451 frame_buf);
3452 mutex_unlock(&ifmgd->mtx);
2822 3453
2823 ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, 3454 __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
2824 req->reason_code, cookie,
2825 !req->local_state_change);
2826 if (assoc_bss)
2827 sta_info_flush(sdata->local, sdata);
2828 3455
2829 mutex_lock(&sdata->local->mtx); 3456 mutex_lock(&sdata->local->mtx);
2830 ieee80211_recalc_idle(sdata->local); 3457 ieee80211_recalc_idle(sdata->local);
@@ -2834,11 +3461,11 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2834} 3461}
2835 3462
2836int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 3463int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2837 struct cfg80211_disassoc_request *req, 3464 struct cfg80211_disassoc_request *req)
2838 void *cookie)
2839{ 3465{
2840 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3466 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2841 u8 bssid[ETH_ALEN]; 3467 u8 bssid[ETH_ALEN];
3468 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2842 3469
2843 mutex_lock(&ifmgd->mtx); 3470 mutex_lock(&ifmgd->mtx);
2844 3471
@@ -2857,14 +3484,12 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2857 sdata->name, req->bss->bssid, req->reason_code); 3484 sdata->name, req->bss->bssid, req->reason_code);
2858 3485
2859 memcpy(bssid, req->bss->bssid, ETH_ALEN); 3486 memcpy(bssid, req->bss->bssid, ETH_ALEN);
2860 ieee80211_set_disassoc(sdata, false, true); 3487 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
2861 3488 req->reason_code, !req->local_state_change,
3489 frame_buf);
2862 mutex_unlock(&ifmgd->mtx); 3490 mutex_unlock(&ifmgd->mtx);
2863 3491
2864 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 3492 __cfg80211_send_disassoc(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
2865 IEEE80211_STYPE_DISASSOC, req->reason_code,
2866 cookie, !req->local_state_change);
2867 sta_info_flush(sdata->local, sdata);
2868 3493
2869 mutex_lock(&sdata->local->mtx); 3494 mutex_lock(&sdata->local->mtx);
2870 ieee80211_recalc_idle(sdata->local); 3495 ieee80211_recalc_idle(sdata->local);
@@ -2873,6 +3498,19 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2873 return 0; 3498 return 0;
2874} 3499}
2875 3500
3501void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata)
3502{
3503 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3504
3505 mutex_lock(&ifmgd->mtx);
3506 if (ifmgd->assoc_data)
3507 ieee80211_destroy_assoc_data(sdata, false);
3508 if (ifmgd->auth_data)
3509 ieee80211_destroy_auth_data(sdata, false);
3510 del_timer_sync(&ifmgd->timer);
3511 mutex_unlock(&ifmgd->mtx);
3512}
3513
2876void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, 3514void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2877 enum nl80211_cqm_rssi_threshold_event rssi_event, 3515 enum nl80211_cqm_rssi_threshold_event rssi_event,
2878 gfp_t gfp) 3516 gfp_t gfp)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 596efaf50e09..ef8eba1d736d 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -98,13 +98,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98 mutex_lock(&local->sta_mtx); 98 mutex_lock(&local->sta_mtx);
99 list_for_each_entry(sta, &local->sta_list, list) { 99 list_for_each_entry(sta, &local->sta_list, list) {
100 if (sta->uploaded) { 100 if (sta->uploaded) {
101 sdata = sta->sdata; 101 enum ieee80211_sta_state state;
102 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
103 sdata = container_of(sdata->bss,
104 struct ieee80211_sub_if_data,
105 u.ap);
106 102
107 drv_sta_remove(local, sdata, &sta->sta); 103 state = sta->sta_state;
104 for (; state > IEEE80211_STA_NOTEXIST; state--)
105 WARN_ON(drv_sta_state(local, sta->sdata, sta,
106 state, state - 1));
108 } 107 }
109 108
110 mesh_plink_quiesce(sta); 109 mesh_plink_quiesce(sta);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index f9b8e819ca63..b4f7600a3e36 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -159,7 +159,6 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
159 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); 159 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
160 if (!ref) 160 if (!ref)
161 goto fail_ref; 161 goto fail_ref;
162 kref_init(&ref->kref);
163 ref->local = local; 162 ref->local = local;
164 ref->ops = ieee80211_rate_control_ops_get(name); 163 ref->ops = ieee80211_rate_control_ops_get(name);
165 if (!ref->ops) 164 if (!ref->ops)
@@ -184,11 +183,8 @@ fail_ref:
184 return NULL; 183 return NULL;
185} 184}
186 185
187static void rate_control_release(struct kref *kref) 186static void rate_control_free(struct rate_control_ref *ctrl_ref)
188{ 187{
189 struct rate_control_ref *ctrl_ref;
190
191 ctrl_ref = container_of(kref, struct rate_control_ref, kref);
192 ctrl_ref->ops->free(ctrl_ref->priv); 188 ctrl_ref->ops->free(ctrl_ref->priv);
193 189
194#ifdef CONFIG_MAC80211_DEBUGFS 190#ifdef CONFIG_MAC80211_DEBUGFS
@@ -293,8 +289,8 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
293} 289}
294EXPORT_SYMBOL(rate_control_send_low); 290EXPORT_SYMBOL(rate_control_send_low);
295 291
296static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, 292static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
297 int n_bitrates, u32 mask) 293 int n_bitrates, u32 mask)
298{ 294{
299 int j; 295 int j;
300 296
@@ -303,7 +299,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
303 if (mask & (1 << j)) { 299 if (mask & (1 << j)) {
304 /* Okay, found a suitable rate. Use it. */ 300 /* Okay, found a suitable rate. Use it. */
305 rate->idx = j; 301 rate->idx = j;
306 return; 302 return true;
307 } 303 }
308 } 304 }
309 305
@@ -312,6 +308,112 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
312 if (mask & (1 << j)) { 308 if (mask & (1 << j)) {
313 /* Okay, found a suitable rate. Use it. */ 309 /* Okay, found a suitable rate. Use it. */
314 rate->idx = j; 310 rate->idx = j;
311 return true;
312 }
313 }
314 return false;
315}
316
317static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
318 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
319{
320 int i, j;
321 int ridx, rbit;
322
323 ridx = rate->idx / 8;
324 rbit = rate->idx % 8;
325
326 /* sanity check */
327 if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
328 return false;
329
330 /* See whether the selected rate or anything below it is allowed. */
331 for (i = ridx; i >= 0; i--) {
332 for (j = rbit; j >= 0; j--)
333 if (mcs_mask[i] & BIT(j)) {
334 rate->idx = i * 8 + j;
335 return true;
336 }
337 rbit = 7;
338 }
339
340 /* Try to find a higher rate that would be allowed */
341 ridx = (rate->idx + 1) / 8;
342 rbit = (rate->idx + 1) % 8;
343
344 for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
345 for (j = rbit; j < 8; j++)
346 if (mcs_mask[i] & BIT(j)) {
347 rate->idx = i * 8 + j;
348 return true;
349 }
350 rbit = 0;
351 }
352 return false;
353}
354
355
356
357static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
358 struct ieee80211_tx_rate_control *txrc,
359 u32 mask,
360 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
361{
362 struct ieee80211_tx_rate alt_rate;
363
364 /* handle HT rates */
365 if (rate->flags & IEEE80211_TX_RC_MCS) {
366 if (rate_idx_match_mcs_mask(rate, mcs_mask))
367 return;
368
369 /* also try the legacy rates. */
370 alt_rate.idx = 0;
371 /* keep protection flags */
372 alt_rate.flags = rate->flags &
373 (IEEE80211_TX_RC_USE_RTS_CTS |
374 IEEE80211_TX_RC_USE_CTS_PROTECT |
375 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
376 alt_rate.count = rate->count;
377 if (rate_idx_match_legacy_mask(&alt_rate,
378 txrc->sband->n_bitrates,
379 mask)) {
380 *rate = alt_rate;
381 return;
382 }
383 } else {
384 struct sk_buff *skb = txrc->skb;
385 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
386 __le16 fc;
387
388 /* handle legacy rates */
389 if (rate_idx_match_legacy_mask(rate, txrc->sband->n_bitrates,
390 mask))
391 return;
392
393 /* if HT BSS, and we handle a data frame, also try HT rates */
394 if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT)
395 return;
396
397 fc = hdr->frame_control;
398 if (!ieee80211_is_data(fc))
399 return;
400
401 alt_rate.idx = 0;
402 /* keep protection flags */
403 alt_rate.flags = rate->flags &
404 (IEEE80211_TX_RC_USE_RTS_CTS |
405 IEEE80211_TX_RC_USE_CTS_PROTECT |
406 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
407 alt_rate.count = rate->count;
408
409 alt_rate.flags |= IEEE80211_TX_RC_MCS;
410
411 if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) ||
412 (txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS))
413 alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
414
415 if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
416 *rate = alt_rate;
315 return; 417 return;
316 } 418 }
317 } 419 }
@@ -335,6 +437,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
335 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 437 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
336 int i; 438 int i;
337 u32 mask; 439 u32 mask;
440 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
338 441
339 if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) { 442 if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) {
340 ista = &sta->sta; 443 ista = &sta->sta;
@@ -358,10 +461,14 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
358 * the common case. 461 * the common case.
359 */ 462 */
360 mask = sdata->rc_rateidx_mask[info->band]; 463 mask = sdata->rc_rateidx_mask[info->band];
464 memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
465 sizeof(mcs_mask));
361 if (mask != (1 << txrc->sband->n_bitrates) - 1) { 466 if (mask != (1 << txrc->sband->n_bitrates) - 1) {
362 if (sta) { 467 if (sta) {
363 /* Filter out rates that the STA does not support */ 468 /* Filter out rates that the STA does not support */
364 mask &= sta->sta.supp_rates[info->band]; 469 mask &= sta->sta.supp_rates[info->band];
470 for (i = 0; i < sizeof(mcs_mask); i++)
471 mcs_mask[i] &= sta->sta.ht_cap.mcs.rx_mask[i];
365 } 472 }
366 /* 473 /*
367 * Make sure the rate index selected for each TX rate is 474 * Make sure the rate index selected for each TX rate is
@@ -372,32 +479,18 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
372 /* Skip invalid rates */ 479 /* Skip invalid rates */
373 if (info->control.rates[i].idx < 0) 480 if (info->control.rates[i].idx < 0)
374 break; 481 break;
375 /* Rate masking supports only legacy rates for now */ 482 rate_idx_match_mask(&info->control.rates[i], txrc,
376 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 483 mask, mcs_mask);
377 continue;
378 rate_idx_match_mask(&info->control.rates[i],
379 txrc->sband->n_bitrates, mask);
380 } 484 }
381 } 485 }
382 486
383 BUG_ON(info->control.rates[0].idx < 0); 487 BUG_ON(info->control.rates[0].idx < 0);
384} 488}
385 489
386struct rate_control_ref *rate_control_get(struct rate_control_ref *ref)
387{
388 kref_get(&ref->kref);
389 return ref;
390}
391
392void rate_control_put(struct rate_control_ref *ref)
393{
394 kref_put(&ref->kref, rate_control_release);
395}
396
397int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 490int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
398 const char *name) 491 const char *name)
399{ 492{
400 struct rate_control_ref *ref, *old; 493 struct rate_control_ref *ref;
401 494
402 ASSERT_RTNL(); 495 ASSERT_RTNL();
403 496
@@ -417,12 +510,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
417 return -ENOENT; 510 return -ENOENT;
418 } 511 }
419 512
420 old = local->rate_ctrl; 513 WARN_ON(local->rate_ctrl);
421 local->rate_ctrl = ref; 514 local->rate_ctrl = ref;
422 if (old) {
423 rate_control_put(old);
424 sta_info_flush(local, NULL);
425 }
426 515
427 wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n", 516 wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
428 ref->ops->name); 517 ref->ops->name);
@@ -440,6 +529,6 @@ void rate_control_deinitialize(struct ieee80211_local *local)
440 return; 529 return;
441 530
442 local->rate_ctrl = NULL; 531 local->rate_ctrl = NULL;
443 rate_control_put(ref); 532 rate_control_free(ref);
444} 533}
445 534
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 80cfc006dd74..fbb1efdc4d04 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -14,7 +14,6 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/kref.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
19#include "ieee80211_i.h" 18#include "ieee80211_i.h"
20#include "sta_info.h" 19#include "sta_info.h"
@@ -23,14 +22,11 @@ struct rate_control_ref {
23 struct ieee80211_local *local; 22 struct ieee80211_local *local;
24 struct rate_control_ops *ops; 23 struct rate_control_ops *ops;
25 void *priv; 24 void *priv;
26 struct kref kref;
27}; 25};
28 26
29void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 27void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
30 struct sta_info *sta, 28 struct sta_info *sta,
31 struct ieee80211_tx_rate_control *txrc); 29 struct ieee80211_tx_rate_control *txrc);
32struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
33void rate_control_put(struct rate_control_ref *ref);
34 30
35static inline void rate_control_tx_status(struct ieee80211_local *local, 31static inline void rate_control_tx_status(struct ieee80211_local *local,
36 struct ieee80211_supported_band *sband, 32 struct ieee80211_supported_band *sband,
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index ff5f7b84e825..16e0b277b9a8 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -568,6 +568,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
568 minstrel_next_sample_idx(mi); 568 minstrel_next_sample_idx(mi);
569 569
570 /* 570 /*
571 * Sampling might add some overhead (RTS, no aggregation)
572 * to the frame. Hence, don't use sampling for the currently
573 * used max TP rate.
574 */
575 if (sample_idx == mi->max_tp_rate)
576 return -1;
577 /*
571 * When not using MRR, do not sample if the probability is already 578 * When not using MRR, do not sample if the probability is already
572 * higher than 95% to avoid wasting airtime 579 * higher than 95% to avoid wasting airtime
573 */ 580 */
@@ -692,6 +699,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
692 int ack_dur; 699 int ack_dur;
693 int stbc; 700 int stbc;
694 int i; 701 int i;
702 unsigned int smps;
695 703
696 /* fall back to the old minstrel for legacy stations */ 704 /* fall back to the old minstrel for legacy stations */
697 if (!sta->ht_cap.ht_supported) 705 if (!sta->ht_cap.ht_supported)
@@ -731,6 +739,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
731 oper_chan_type != NL80211_CHAN_HT40PLUS) 739 oper_chan_type != NL80211_CHAN_HT40PLUS)
732 sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 740 sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
733 741
742 smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
743 IEEE80211_HT_CAP_SM_PS_SHIFT;
744
734 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { 745 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
735 u16 req = 0; 746 u16 req = 0;
736 747
@@ -748,6 +759,11 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
748 if ((sta_cap & req) != req) 759 if ((sta_cap & req) != req)
749 continue; 760 continue;
750 761
762 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
763 if (smps == WLAN_HT_CAP_SM_PS_STATIC &&
764 minstrel_mcs_groups[i].streams > 1)
765 continue;
766
751 mi->groups[i].supported = 767 mi->groups[i].supported =
752 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; 768 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
753 769
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5a5e504a8ffb..bcfe8c77c839 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -19,6 +19,7 @@
19#include <linux/export.h> 19#include <linux/export.h>
20#include <net/mac80211.h> 20#include <net/mac80211.h>
21#include <net/ieee80211_radiotap.h> 21#include <net/ieee80211_radiotap.h>
22#include <asm/unaligned.h>
22 23
23#include "ieee80211_i.h" 24#include "ieee80211_i.h"
24#include "driver-ops.h" 25#include "driver-ops.h"
@@ -176,7 +177,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
176 pos += 2; 177 pos += 2;
177 178
178 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 179 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
179 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { 180 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
181 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
180 *pos = status->signal; 182 *pos = status->signal;
181 rthdr->it_present |= 183 rthdr->it_present |=
182 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 184 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
@@ -226,7 +228,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
226{ 228{
227 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
228 struct ieee80211_sub_if_data *sdata; 230 struct ieee80211_sub_if_data *sdata;
229 int needed_headroom = 0; 231 int needed_headroom;
230 struct sk_buff *skb, *skb2; 232 struct sk_buff *skb, *skb2;
231 struct net_device *prev_dev = NULL; 233 struct net_device *prev_dev = NULL;
232 int present_fcs_len = 0; 234 int present_fcs_len = 0;
@@ -488,12 +490,12 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
488 if (ieee80211_has_tods(hdr->frame_control) || 490 if (ieee80211_has_tods(hdr->frame_control) ||
489 !ieee80211_has_fromds(hdr->frame_control)) 491 !ieee80211_has_fromds(hdr->frame_control))
490 return RX_DROP_MONITOR; 492 return RX_DROP_MONITOR;
491 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) 493 if (compare_ether_addr(hdr->addr3, dev_addr) == 0)
492 return RX_DROP_MONITOR; 494 return RX_DROP_MONITOR;
493 } else { 495 } else {
494 if (!ieee80211_has_a4(hdr->frame_control)) 496 if (!ieee80211_has_a4(hdr->frame_control))
495 return RX_DROP_MONITOR; 497 return RX_DROP_MONITOR;
496 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) 498 if (compare_ether_addr(hdr->addr4, dev_addr) == 0)
497 return RX_DROP_MONITOR; 499 return RX_DROP_MONITOR;
498 } 500 }
499 } 501 }
@@ -859,7 +861,12 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
859 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 861 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
860 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 862 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
861 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 863 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
862 if (rx->sta && rx->sta->dummy && 864 /*
865 * accept port control frames from the AP even when it's not
866 * yet marked ASSOC to prevent a race where we don't set the
867 * assoc bit quickly enough before it sends the first frame
868 */
869 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
863 ieee80211_is_data_present(hdr->frame_control)) { 870 ieee80211_is_data_present(hdr->frame_control)) {
864 u16 ethertype; 871 u16 ethertype;
865 u8 *payload; 872 u8 *payload;
@@ -1056,20 +1063,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1056 return RX_DROP_MONITOR; 1063 return RX_DROP_MONITOR;
1057 } 1064 }
1058 1065
1059 if (skb_linearize(rx->skb))
1060 return RX_DROP_UNUSABLE;
1061 /* the hdr variable is invalid now! */
1062
1063 switch (rx->key->conf.cipher) { 1066 switch (rx->key->conf.cipher) {
1064 case WLAN_CIPHER_SUITE_WEP40: 1067 case WLAN_CIPHER_SUITE_WEP40:
1065 case WLAN_CIPHER_SUITE_WEP104: 1068 case WLAN_CIPHER_SUITE_WEP104:
1066 /* Check for weak IVs if possible */
1067 if (rx->sta && ieee80211_is_data(fc) &&
1068 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1069 !(status->flag & RX_FLAG_DECRYPTED)) &&
1070 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1071 rx->sta->wep_weak_iv_count++;
1072
1073 result = ieee80211_crypto_wep_decrypt(rx); 1069 result = ieee80211_crypto_wep_decrypt(rx);
1074 break; 1070 break;
1075 case WLAN_CIPHER_SUITE_TKIP: 1071 case WLAN_CIPHER_SUITE_TKIP:
@@ -1089,6 +1085,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1089 return RX_DROP_UNUSABLE; 1085 return RX_DROP_UNUSABLE;
1090 } 1086 }
1091 1087
1088 /* the hdr variable is invalid after the decrypt handlers */
1089
1092 /* either the frame has been decrypted or will be dropped */ 1090 /* either the frame has been decrypted or will be dropped */
1093 status->flag |= RX_FLAG_DECRYPTED; 1091 status->flag |= RX_FLAG_DECRYPTED;
1094 1092
@@ -1145,19 +1143,15 @@ static void ap_sta_ps_start(struct sta_info *sta)
1145 1143
1146static void ap_sta_ps_end(struct sta_info *sta) 1144static void ap_sta_ps_end(struct sta_info *sta)
1147{ 1145{
1148 struct ieee80211_sub_if_data *sdata = sta->sdata;
1149
1150 atomic_dec(&sdata->bss->num_sta_ps);
1151
1152#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1146#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1153 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1147 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1154 sdata->name, sta->sta.addr, sta->sta.aid); 1148 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1155#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1149#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1156 1150
1157 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1151 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1158#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1152#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1159 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1153 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1160 sdata->name, sta->sta.addr, sta->sta.aid); 1154 sta->sdata->name, sta->sta.addr, sta->sta.aid);
1161#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1155#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1162 return; 1156 return;
1163 } 1157 }
@@ -1307,8 +1301,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1307 1301
1308 sta->rx_fragments++; 1302 sta->rx_fragments++;
1309 sta->rx_bytes += rx->skb->len; 1303 sta->rx_bytes += rx->skb->len;
1310 sta->last_signal = status->signal; 1304 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1311 ewma_add(&sta->avg_signal, -status->signal); 1305 sta->last_signal = status->signal;
1306 ewma_add(&sta->avg_signal, -status->signal);
1307 }
1312 1308
1313 /* 1309 /*
1314 * Change STA power saving mode only at the end of a frame 1310 * Change STA power saving mode only at the end of a frame
@@ -1955,6 +1951,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1955 return RX_DROP_MONITOR; 1951 return RX_DROP_MONITOR;
1956 } 1952 }
1957 1953
1954 if (!ifmsh->mshcfg.dot11MeshForwarding)
1955 goto out;
1956
1958 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1957 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1959 if (!fwd_skb) { 1958 if (!fwd_skb) {
1960 if (net_ratelimit()) 1959 if (net_ratelimit())
@@ -2180,12 +2179,14 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2180 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2179 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2181 ieee80211_is_beacon(mgmt->frame_control) && 2180 ieee80211_is_beacon(mgmt->frame_control) &&
2182 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2181 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2183 struct ieee80211_rx_status *status; 2182 int sig = 0;
2183
2184 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2185 sig = status->signal;
2184 2186
2185 status = IEEE80211_SKB_RXCB(rx->skb);
2186 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2187 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2187 rx->skb->data, rx->skb->len, 2188 rx->skb->data, rx->skb->len,
2188 status->freq, GFP_ATOMIC); 2189 status->freq, sig, GFP_ATOMIC);
2189 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2190 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2190 } 2191 }
2191 2192
@@ -2268,9 +2269,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2268 2269
2269 sband = rx->local->hw.wiphy->bands[status->band]; 2270 sband = rx->local->hw.wiphy->bands[status->band];
2270 2271
2271 rate_control_rate_update(local, sband, rx->sta, 2272 rate_control_rate_update(
2272 IEEE80211_RC_SMPS_CHANGED, 2273 local, sband, rx->sta,
2273 local->_oper_channel_type); 2274 IEEE80211_RC_SMPS_CHANGED,
2275 ieee80211_get_tx_channel_type(
2276 local, local->_oper_channel_type));
2274 goto handled; 2277 goto handled;
2275 } 2278 }
2276 default: 2279 default:
@@ -2337,7 +2340,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2337 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2340 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2338 break; 2341 break;
2339 2342
2340 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2343 if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid))
2341 break; 2344 break;
2342 2345
2343 goto queue; 2346 goto queue;
@@ -2409,6 +2412,7 @@ static ieee80211_rx_result debug_noinline
2409ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2412ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2410{ 2413{
2411 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2414 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2415 int sig = 0;
2412 2416
2413 /* skip known-bad action frames and return them in the next handler */ 2417 /* skip known-bad action frames and return them in the next handler */
2414 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2418 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
@@ -2421,7 +2425,10 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2421 * it transmitted were processed or returned. 2425 * it transmitted were processed or returned.
2422 */ 2426 */
2423 2427
2424 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, 2428 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2429 sig = status->signal;
2430
2431 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig,
2425 rx->skb->data, rx->skb->len, 2432 rx->skb->data, rx->skb->len,
2426 GFP_ATOMIC)) { 2433 GFP_ATOMIC)) {
2427 if (rx->sta) 2434 if (rx->sta)
@@ -2486,14 +2493,9 @@ static ieee80211_rx_result debug_noinline
2486ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2493ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2487{ 2494{
2488 struct ieee80211_sub_if_data *sdata = rx->sdata; 2495 struct ieee80211_sub_if_data *sdata = rx->sdata;
2489 ieee80211_rx_result rxs;
2490 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2496 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2491 __le16 stype; 2497 __le16 stype;
2492 2498
2493 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2494 if (rxs != RX_CONTINUE)
2495 return rxs;
2496
2497 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2499 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2498 2500
2499 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2501 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
@@ -2502,10 +2504,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2502 return RX_DROP_MONITOR; 2504 return RX_DROP_MONITOR;
2503 2505
2504 switch (stype) { 2506 switch (stype) {
2507 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2505 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2508 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2506 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2509 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2507 /* process for all: mesh, mlme, ibss */ 2510 /* process for all: mesh, mlme, ibss */
2508 break; 2511 break;
2512 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2513 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2509 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2514 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2510 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2515 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2511 if (is_multicast_ether_addr(mgmt->da) && 2516 if (is_multicast_ether_addr(mgmt->da) &&
@@ -2517,7 +2522,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2517 return RX_DROP_MONITOR; 2522 return RX_DROP_MONITOR;
2518 break; 2523 break;
2519 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2524 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2520 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2521 /* process only for ibss */ 2525 /* process only for ibss */
2522 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2526 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2523 return RX_DROP_MONITOR; 2527 return RX_DROP_MONITOR;
@@ -2542,16 +2546,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2542{ 2546{
2543 struct ieee80211_sub_if_data *sdata; 2547 struct ieee80211_sub_if_data *sdata;
2544 struct ieee80211_local *local = rx->local; 2548 struct ieee80211_local *local = rx->local;
2545 struct ieee80211_rtap_hdr {
2546 struct ieee80211_radiotap_header hdr;
2547 u8 flags;
2548 u8 rate_or_pad;
2549 __le16 chan_freq;
2550 __le16 chan_flags;
2551 } __packed *rthdr;
2552 struct sk_buff *skb = rx->skb, *skb2; 2549 struct sk_buff *skb = rx->skb, *skb2;
2553 struct net_device *prev_dev = NULL; 2550 struct net_device *prev_dev = NULL;
2554 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2551 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2552 int needed_headroom;
2555 2553
2556 /* 2554 /*
2557 * If cooked monitor has been processed already, then 2555 * If cooked monitor has been processed already, then
@@ -2565,30 +2563,15 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2565 if (!local->cooked_mntrs) 2563 if (!local->cooked_mntrs)
2566 goto out_free_skb; 2564 goto out_free_skb;
2567 2565
2568 if (skb_headroom(skb) < sizeof(*rthdr) && 2566 /* room for the radiotap header based on driver features */
2569 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2567 needed_headroom = ieee80211_rx_radiotap_len(local, status);
2570 goto out_free_skb;
2571
2572 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2573 memset(rthdr, 0, sizeof(*rthdr));
2574 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2575 rthdr->hdr.it_present =
2576 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2577 (1 << IEEE80211_RADIOTAP_CHANNEL));
2578 2568
2579 if (rate) { 2569 if (skb_headroom(skb) < needed_headroom &&
2580 rthdr->rate_or_pad = rate->bitrate / 5; 2570 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
2581 rthdr->hdr.it_present |= 2571 goto out_free_skb;
2582 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2583 }
2584 rthdr->chan_freq = cpu_to_le16(status->freq);
2585 2572
2586 if (status->band == IEEE80211_BAND_5GHZ) 2573 /* prepend radiotap information */
2587 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | 2574 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
2588 IEEE80211_CHAN_5GHZ);
2589 else
2590 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2591 IEEE80211_CHAN_2GHZ);
2592 2575
2593 skb_set_mac_header(skb, 0); 2576 skb_set_mac_header(skb, 0);
2594 skb->ip_summed = CHECKSUM_UNNECESSARY; 2577 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2956,7 +2939,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2956 if (ieee80211_is_data(fc)) { 2939 if (ieee80211_is_data(fc)) {
2957 prev_sta = NULL; 2940 prev_sta = NULL;
2958 2941
2959 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) { 2942 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2960 if (!prev_sta) { 2943 if (!prev_sta) {
2961 prev_sta = sta; 2944 prev_sta = sta;
2962 continue; 2945 continue;
@@ -3000,7 +2983,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
3000 continue; 2983 continue;
3001 } 2984 }
3002 2985
3003 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2); 2986 rx.sta = sta_info_get_bss(prev, hdr->addr2);
3004 rx.sdata = prev; 2987 rx.sdata = prev;
3005 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2988 ieee80211_prepare_and_rx_handle(&rx, skb, false);
3006 2989
@@ -3008,7 +2991,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
3008 } 2991 }
3009 2992
3010 if (prev) { 2993 if (prev) {
3011 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2); 2994 rx.sta = sta_info_get_bss(prev, hdr->addr2);
3012 rx.sdata = prev; 2995 rx.sdata = prev;
3013 2996
3014 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2997 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 9270771702fe..33cd16901378 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
17#include <linux/pm_qos.h> 18#include <linux/pm_qos.h>
18#include <net/sch_generic.h> 19#include <net/sch_generic.h>
@@ -103,16 +104,35 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
103 cbss->free_priv = ieee80211_rx_bss_free; 104 cbss->free_priv = ieee80211_rx_bss_free;
104 bss = (void *)cbss->priv; 105 bss = (void *)cbss->priv;
105 106
107 if (elems->parse_error) {
108 if (beacon)
109 bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
110 else
111 bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP;
112 } else {
113 if (beacon)
114 bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON;
115 else
116 bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP;
117 }
118
106 /* save the ERP value so that it is available at association time */ 119 /* save the ERP value so that it is available at association time */
107 if (elems->erp_info && elems->erp_info_len >= 1) { 120 if (elems->erp_info && elems->erp_info_len >= 1 &&
121 (!elems->parse_error ||
122 !(bss->valid_data & IEEE80211_BSS_VALID_ERP))) {
108 bss->erp_value = elems->erp_info[0]; 123 bss->erp_value = elems->erp_info[0];
109 bss->has_erp_value = true; 124 bss->has_erp_value = true;
125 if (!elems->parse_error)
126 bss->valid_data |= IEEE80211_BSS_VALID_ERP;
110 } 127 }
111 128
112 if (elems->tim) { 129 if (elems->tim && (!elems->parse_error ||
130 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
113 struct ieee80211_tim_ie *tim_ie = 131 struct ieee80211_tim_ie *tim_ie =
114 (struct ieee80211_tim_ie *)elems->tim; 132 (struct ieee80211_tim_ie *)elems->tim;
115 bss->dtim_period = tim_ie->dtim_period; 133 bss->dtim_period = tim_ie->dtim_period;
134 if (!elems->parse_error)
135 bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
116 } 136 }
117 137
118 /* If the beacon had no TIM IE, or it was invalid, use 1 */ 138 /* If the beacon had no TIM IE, or it was invalid, use 1 */
@@ -120,26 +140,38 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
120 bss->dtim_period = 1; 140 bss->dtim_period = 1;
121 141
122 /* replace old supported rates if we get new values */ 142 /* replace old supported rates if we get new values */
123 srlen = 0; 143 if (!elems->parse_error ||
124 if (elems->supp_rates) { 144 !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
125 clen = IEEE80211_MAX_SUPP_RATES; 145 srlen = 0;
126 if (clen > elems->supp_rates_len) 146 if (elems->supp_rates) {
127 clen = elems->supp_rates_len; 147 clen = IEEE80211_MAX_SUPP_RATES;
128 memcpy(bss->supp_rates, elems->supp_rates, clen); 148 if (clen > elems->supp_rates_len)
129 srlen += clen; 149 clen = elems->supp_rates_len;
130 } 150 memcpy(bss->supp_rates, elems->supp_rates, clen);
131 if (elems->ext_supp_rates) { 151 srlen += clen;
132 clen = IEEE80211_MAX_SUPP_RATES - srlen; 152 }
133 if (clen > elems->ext_supp_rates_len) 153 if (elems->ext_supp_rates) {
134 clen = elems->ext_supp_rates_len; 154 clen = IEEE80211_MAX_SUPP_RATES - srlen;
135 memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen); 155 if (clen > elems->ext_supp_rates_len)
136 srlen += clen; 156 clen = elems->ext_supp_rates_len;
157 memcpy(bss->supp_rates + srlen, elems->ext_supp_rates,
158 clen);
159 srlen += clen;
160 }
161 if (srlen) {
162 bss->supp_rates_len = srlen;
163 if (!elems->parse_error)
164 bss->valid_data |= IEEE80211_BSS_VALID_RATES;
165 }
137 } 166 }
138 if (srlen)
139 bss->supp_rates_len = srlen;
140 167
141 bss->wmm_used = elems->wmm_param || elems->wmm_info; 168 if (!elems->parse_error ||
142 bss->uapsd_supported = is_uapsd_supported(elems); 169 !(bss->valid_data & IEEE80211_BSS_VALID_WMM)) {
170 bss->wmm_used = elems->wmm_param || elems->wmm_info;
171 bss->uapsd_supported = is_uapsd_supported(elems);
172 if (!elems->parse_error)
173 bss->valid_data |= IEEE80211_BSS_VALID_WMM;
174 }
143 175
144 if (!beacon) 176 if (!beacon)
145 bss->last_probe_resp = jiffies; 177 bss->last_probe_resp = jiffies;
@@ -176,7 +208,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
176 presp = ieee80211_is_probe_resp(fc); 208 presp = ieee80211_is_probe_resp(fc);
177 if (presp) { 209 if (presp) {
178 /* ignore ProbeResp to foreign address */ 210 /* ignore ProbeResp to foreign address */
179 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) 211 if (compare_ether_addr(mgmt->da, sdata->vif.addr))
180 return RX_DROP_MONITOR; 212 return RX_DROP_MONITOR;
181 213
182 presp = true; 214 presp = true;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ff11f6bf8266..38137cb5f6f0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/etherdevice.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -100,27 +101,8 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
100 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], 101 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
101 lockdep_is_held(&local->sta_mtx)); 102 lockdep_is_held(&local->sta_mtx));
102 while (sta) { 103 while (sta) {
103 if (sta->sdata == sdata && !sta->dummy &&
104 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
105 break;
106 sta = rcu_dereference_check(sta->hnext,
107 lockdep_is_held(&local->sta_mtx));
108 }
109 return sta;
110}
111
112/* get a station info entry even if it is a dummy station*/
113struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
114 const u8 *addr)
115{
116 struct ieee80211_local *local = sdata->local;
117 struct sta_info *sta;
118
119 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
120 lockdep_is_held(&local->sta_mtx));
121 while (sta) {
122 if (sta->sdata == sdata && 104 if (sta->sdata == sdata &&
123 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 105 compare_ether_addr(sta->sta.addr, addr) == 0)
124 break; 106 break;
125 sta = rcu_dereference_check(sta->hnext, 107 sta = rcu_dereference_check(sta->hnext,
126 lockdep_is_held(&local->sta_mtx)); 108 lockdep_is_held(&local->sta_mtx));
@@ -143,31 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
143 while (sta) { 125 while (sta) {
144 if ((sta->sdata == sdata || 126 if ((sta->sdata == sdata ||
145 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) && 127 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
146 !sta->dummy && 128 compare_ether_addr(sta->sta.addr, addr) == 0)
147 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
148 break;
149 sta = rcu_dereference_check(sta->hnext,
150 lockdep_is_held(&local->sta_mtx));
151 }
152 return sta;
153}
154
155/*
156 * Get sta info either from the specified interface
157 * or from one of its vlans (including dummy stations)
158 */
159struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
160 const u8 *addr)
161{
162 struct ieee80211_local *local = sdata->local;
163 struct sta_info *sta;
164
165 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
166 lockdep_is_held(&local->sta_mtx));
167 while (sta) {
168 if ((sta->sdata == sdata ||
169 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
170 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
171 break; 129 break;
172 sta = rcu_dereference_check(sta->hnext, 130 sta = rcu_dereference_check(sta->hnext,
173 lockdep_is_held(&local->sta_mtx)); 131 lockdep_is_held(&local->sta_mtx));
@@ -208,10 +166,8 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
208 */ 166 */
209void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 167void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
210{ 168{
211 if (sta->rate_ctrl) { 169 if (sta->rate_ctrl)
212 rate_control_free_sta(sta); 170 rate_control_free_sta(sta);
213 rate_control_put(sta->rate_ctrl);
214 }
215 171
216#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 172#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
217 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr); 173 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
@@ -264,13 +220,11 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
264 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 220 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
265 return 0; 221 return 0;
266 222
267 sta->rate_ctrl = rate_control_get(local->rate_ctrl); 223 sta->rate_ctrl = local->rate_ctrl;
268 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 224 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
269 &sta->sta, gfp); 225 &sta->sta, gfp);
270 if (!sta->rate_ctrl_priv) { 226 if (!sta->rate_ctrl_priv)
271 rate_control_put(sta->rate_ctrl);
272 return -ENOMEM; 227 return -ENOMEM;
273 }
274 228
275 return 0; 229 return 0;
276} 230}
@@ -297,6 +251,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
297 sta->sdata = sdata; 251 sta->sdata = sdata;
298 sta->last_rx = jiffies; 252 sta->last_rx = jiffies;
299 253
254 sta->sta_state = IEEE80211_STA_NONE;
255
300 do_posix_clock_monotonic_gettime(&uptime); 256 do_posix_clock_monotonic_gettime(&uptime);
301 sta->last_connected = uptime.tv_sec; 257 sta->last_connected = uptime.tv_sec;
302 ewma_init(&sta->avg_signal, 1024, 8); 258 ewma_init(&sta->avg_signal, 1024, 8);
@@ -353,6 +309,43 @@ static int sta_info_insert_check(struct sta_info *sta)
353 return 0; 309 return 0;
354} 310}
355 311
312static int sta_info_insert_drv_state(struct ieee80211_local *local,
313 struct ieee80211_sub_if_data *sdata,
314 struct sta_info *sta)
315{
316 enum ieee80211_sta_state state;
317 int err = 0;
318
319 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) {
320 err = drv_sta_state(local, sdata, sta, state, state + 1);
321 if (err)
322 break;
323 }
324
325 if (!err) {
326 /*
327 * Drivers using legacy sta_add/sta_remove callbacks only
328 * get uploaded set to true after sta_add is called.
329 */
330 if (!local->ops->sta_add)
331 sta->uploaded = true;
332 return 0;
333 }
334
335 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
336 printk(KERN_DEBUG
337 "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n",
338 sdata->name, sta->sta.addr, state + 1, err);
339 err = 0;
340 }
341
342 /* unwind on error */
343 for (; state > IEEE80211_STA_NOTEXIST; state--)
344 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1));
345
346 return err;
347}
348
356/* 349/*
357 * should be called with sta_mtx locked 350 * should be called with sta_mtx locked
358 * this function replaces the mutex lock 351 * this function replaces the mutex lock
@@ -362,70 +355,43 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
362{ 355{
363 struct ieee80211_local *local = sta->local; 356 struct ieee80211_local *local = sta->local;
364 struct ieee80211_sub_if_data *sdata = sta->sdata; 357 struct ieee80211_sub_if_data *sdata = sta->sdata;
365 struct sta_info *exist_sta; 358 struct station_info sinfo;
366 bool dummy_reinsert = false;
367 int err = 0; 359 int err = 0;
368 360
369 lockdep_assert_held(&local->sta_mtx); 361 lockdep_assert_held(&local->sta_mtx);
370 362
371 /* 363 /* check if STA exists already */
372 * check if STA exists already. 364 if (sta_info_get_bss(sdata, sta->sta.addr)) {
373 * only accept a scenario of a second call to sta_info_insert_finish 365 err = -EEXIST;
374 * with a dummy station entry that was inserted earlier 366 goto out_err;
375 * in that case - assume that the dummy station flag should
376 * be removed.
377 */
378 exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
379 if (exist_sta) {
380 if (exist_sta == sta && sta->dummy) {
381 dummy_reinsert = true;
382 } else {
383 err = -EEXIST;
384 goto out_err;
385 }
386 } 367 }
387 368
388 if (!sta->dummy || dummy_reinsert) { 369 /* notify driver */
389 /* notify driver */ 370 err = sta_info_insert_drv_state(local, sdata, sta);
390 err = drv_sta_add(local, sdata, &sta->sta); 371 if (err)
391 if (err) { 372 goto out_err;
392 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
393 goto out_err;
394 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
395 "driver (%d) - keeping it anyway.\n",
396 sdata->name, sta->sta.addr, err);
397 } else
398 sta->uploaded = true;
399 }
400 373
401 if (!dummy_reinsert) { 374 local->num_sta++;
402 local->num_sta++; 375 local->sta_generation++;
403 local->sta_generation++; 376 smp_mb();
404 smp_mb();
405 377
406 /* make the station visible */ 378 /* make the station visible */
407 sta_info_hash_add(local, sta); 379 sta_info_hash_add(local, sta);
408 380
409 list_add(&sta->list, &local->sta_list); 381 list_add(&sta->list, &local->sta_list);
410 } else {
411 sta->dummy = false;
412 }
413 382
414 if (!sta->dummy) { 383 set_sta_flag(sta, WLAN_STA_INSERTED);
415 struct station_info sinfo;
416 384
417 ieee80211_sta_debugfs_add(sta); 385 ieee80211_sta_debugfs_add(sta);
418 rate_control_add_sta_debugfs(sta); 386 rate_control_add_sta_debugfs(sta);
419 387
420 memset(&sinfo, 0, sizeof(sinfo)); 388 memset(&sinfo, 0, sizeof(sinfo));
421 sinfo.filled = 0; 389 sinfo.filled = 0;
422 sinfo.generation = local->sta_generation; 390 sinfo.generation = local->sta_generation;
423 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); 391 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
424 }
425 392
426#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 393#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
427 wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n", 394 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
428 sta->dummy ? "dummy " : "", sta->sta.addr);
429#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 395#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
430 396
431 /* move reference to rcu-protected */ 397 /* move reference to rcu-protected */
@@ -477,25 +443,6 @@ int sta_info_insert(struct sta_info *sta)
477 return err; 443 return err;
478} 444}
479 445
480/* Caller must hold sta->local->sta_mtx */
481int sta_info_reinsert(struct sta_info *sta)
482{
483 struct ieee80211_local *local = sta->local;
484 int err = 0;
485
486 err = sta_info_insert_check(sta);
487 if (err) {
488 mutex_unlock(&local->sta_mtx);
489 return err;
490 }
491
492 might_sleep();
493
494 err = sta_info_insert_finish(sta);
495 rcu_read_unlock();
496 return err;
497}
498
499static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 446static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
500{ 447{
501 /* 448 /*
@@ -711,7 +658,7 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
711 return have_buffered; 658 return have_buffered;
712} 659}
713 660
714static int __must_check __sta_info_destroy(struct sta_info *sta) 661int __must_check __sta_info_destroy(struct sta_info *sta)
715{ 662{
716 struct ieee80211_local *local; 663 struct ieee80211_local *local;
717 struct ieee80211_sub_if_data *sdata; 664 struct ieee80211_sub_if_data *sdata;
@@ -726,6 +673,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
726 local = sta->local; 673 local = sta->local;
727 sdata = sta->sdata; 674 sdata = sta->sdata;
728 675
676 lockdep_assert_held(&local->sta_mtx);
677
729 /* 678 /*
730 * Before removing the station from the driver and 679 * Before removing the station from the driver and
731 * rate control, it might still start new aggregation 680 * rate control, it might still start new aggregation
@@ -750,33 +699,24 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
750 699
751 sta->dead = true; 700 sta->dead = true;
752 701
753 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
754 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
755 BUG_ON(!sdata->bss);
756
757 clear_sta_flag(sta, WLAN_STA_PS_STA);
758 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
759
760 atomic_dec(&sdata->bss->num_sta_ps);
761 sta_info_recalc_tim(sta);
762 }
763
764 local->num_sta--; 702 local->num_sta--;
765 local->sta_generation++; 703 local->sta_generation++;
766 704
767 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 705 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
768 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 706 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
769 707
770 while (sta->sta_state > IEEE80211_STA_NONE) 708 while (sta->sta_state > IEEE80211_STA_NONE) {
771 sta_info_move_state(sta, sta->sta_state - 1); 709 ret = sta_info_move_state(sta, sta->sta_state - 1);
710 if (ret) {
711 WARN_ON_ONCE(1);
712 break;
713 }
714 }
772 715
773 if (sta->uploaded) { 716 if (sta->uploaded) {
774 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 717 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE,
775 sdata = container_of(sdata->bss, 718 IEEE80211_STA_NOTEXIST);
776 struct ieee80211_sub_if_data, 719 WARN_ON_ONCE(ret != 0);
777 u.ap);
778 drv_sta_remove(local, sdata, &sta->sta);
779 sdata = sta->sdata;
780 } 720 }
781 721
782 /* 722 /*
@@ -787,6 +727,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
787 */ 727 */
788 synchronize_rcu(); 728 synchronize_rcu();
789 729
730 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
731 BUG_ON(!sdata->bss);
732
733 clear_sta_flag(sta, WLAN_STA_PS_STA);
734
735 atomic_dec(&sdata->bss->num_sta_ps);
736 sta_info_recalc_tim(sta);
737 }
738
790 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 739 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
791 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 740 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
792 __skb_queue_purge(&sta->ps_tx_buf[ac]); 741 __skb_queue_purge(&sta->ps_tx_buf[ac]);
@@ -815,35 +764,20 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
815 } 764 }
816#endif 765#endif
817 766
818 /* There could be some memory leaks because of ampdu tx pending queue 767 /*
819 * not being freed before destroying the station info. 768 * Destroy aggregation state here. It would be nice to wait for the
820 * 769 * driver to finish aggregation stop and then clean up, but for now
821 * Make sure that such queues are purged before freeing the station 770 * drivers have to handle aggregation stop being requested, followed
822 * info. 771 * directly by station destruction.
823 * TODO: We have to somehow postpone the full destruction
824 * until the aggregation stop completes. Refer
825 * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936
826 */ 772 */
827
828 mutex_lock(&sta->ampdu_mlme.mtx);
829
830 for (i = 0; i < STA_TID_NUM; i++) { 773 for (i = 0; i < STA_TID_NUM; i++) {
831 tid_tx = rcu_dereference_protected_tid_tx(sta, i); 774 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
832 if (!tid_tx) 775 if (!tid_tx)
833 continue; 776 continue;
834 if (skb_queue_len(&tid_tx->pending)) { 777 __skb_queue_purge(&tid_tx->pending);
835#ifdef CONFIG_MAC80211_HT_DEBUG 778 kfree(tid_tx);
836 wiphy_debug(local->hw.wiphy, "TX A-MPDU purging %d "
837 "packets for tid=%d\n",
838 skb_queue_len(&tid_tx->pending), i);
839#endif /* CONFIG_MAC80211_HT_DEBUG */
840 __skb_queue_purge(&tid_tx->pending);
841 }
842 kfree_rcu(tid_tx, rcu_head);
843 } 779 }
844 780
845 mutex_unlock(&sta->ampdu_mlme.mtx);
846
847 sta_info_free(local, sta); 781 sta_info_free(local, sta);
848 782
849 return 0; 783 return 0;
@@ -855,7 +789,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
855 int ret; 789 int ret;
856 790
857 mutex_lock(&sdata->local->sta_mtx); 791 mutex_lock(&sdata->local->sta_mtx);
858 sta = sta_info_get_rx(sdata, addr); 792 sta = sta_info_get(sdata, addr);
859 ret = __sta_info_destroy(sta); 793 ret = __sta_info_destroy(sta);
860 mutex_unlock(&sdata->local->sta_mtx); 794 mutex_unlock(&sdata->local->sta_mtx);
861 795
@@ -869,7 +803,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
869 int ret; 803 int ret;
870 804
871 mutex_lock(&sdata->local->sta_mtx); 805 mutex_lock(&sdata->local->sta_mtx);
872 sta = sta_info_get_bss_rx(sdata, addr); 806 sta = sta_info_get_bss(sdata, addr);
873 ret = __sta_info_destroy(sta); 807 ret = __sta_info_destroy(sta);
874 mutex_unlock(&sdata->local->sta_mtx); 808 mutex_unlock(&sdata->local->sta_mtx);
875 809
@@ -932,8 +866,10 @@ int sta_info_flush(struct ieee80211_local *local,
932 866
933 mutex_lock(&local->sta_mtx); 867 mutex_lock(&local->sta_mtx);
934 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 868 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
935 if (!sdata || sdata == sta->sdata) 869 if (!sdata || sdata == sta->sdata) {
936 WARN_ON(__sta_info_destroy(sta)); 870 WARN_ON(__sta_info_destroy(sta));
871 ret++;
872 }
937 } 873 }
938 mutex_unlock(&local->sta_mtx); 874 mutex_unlock(&local->sta_mtx);
939 875
@@ -1009,9 +945,11 @@ EXPORT_SYMBOL(ieee80211_find_sta);
1009static void clear_sta_ps_flags(void *_sta) 945static void clear_sta_ps_flags(void *_sta)
1010{ 946{
1011 struct sta_info *sta = _sta; 947 struct sta_info *sta = _sta;
948 struct ieee80211_sub_if_data *sdata = sta->sdata;
1012 949
1013 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 950 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1014 clear_sta_flag(sta, WLAN_STA_PS_STA); 951 if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
952 atomic_dec(&sdata->bss->num_sta_ps);
1015} 953}
1016 954
1017/* powersave support code */ 955/* powersave support code */
@@ -1113,7 +1051,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1113 * exchange. Also set EOSP to indicate this packet 1051 * exchange. Also set EOSP to indicate this packet
1114 * ends the poll/service period. 1052 * ends the poll/service period.
1115 */ 1053 */
1116 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE | 1054 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
1117 IEEE80211_TX_STATUS_EOSP | 1055 IEEE80211_TX_STATUS_EOSP |
1118 IEEE80211_TX_CTL_REQ_TX_STATUS; 1056 IEEE80211_TX_CTL_REQ_TX_STATUS;
1119 1057
@@ -1240,7 +1178,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1240 * STA may still remain is PS mode after this frame 1178 * STA may still remain is PS mode after this frame
1241 * exchange. 1179 * exchange.
1242 */ 1180 */
1243 info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE; 1181 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1244 1182
1245 /* 1183 /*
1246 * Use MoreData flag to indicate whether there are 1184 * Use MoreData flag to indicate whether there are
@@ -1410,28 +1348,68 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1410} 1348}
1411EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1349EXPORT_SYMBOL(ieee80211_sta_set_buffered);
1412 1350
1413int sta_info_move_state_checked(struct sta_info *sta, 1351int sta_info_move_state(struct sta_info *sta,
1414 enum ieee80211_sta_state new_state) 1352 enum ieee80211_sta_state new_state)
1415{ 1353{
1416 might_sleep(); 1354 might_sleep();
1417 1355
1418 if (sta->sta_state == new_state) 1356 if (sta->sta_state == new_state)
1419 return 0; 1357 return 0;
1420 1358
1359 /* check allowed transitions first */
1360
1361 switch (new_state) {
1362 case IEEE80211_STA_NONE:
1363 if (sta->sta_state != IEEE80211_STA_AUTH)
1364 return -EINVAL;
1365 break;
1366 case IEEE80211_STA_AUTH:
1367 if (sta->sta_state != IEEE80211_STA_NONE &&
1368 sta->sta_state != IEEE80211_STA_ASSOC)
1369 return -EINVAL;
1370 break;
1371 case IEEE80211_STA_ASSOC:
1372 if (sta->sta_state != IEEE80211_STA_AUTH &&
1373 sta->sta_state != IEEE80211_STA_AUTHORIZED)
1374 return -EINVAL;
1375 break;
1376 case IEEE80211_STA_AUTHORIZED:
1377 if (sta->sta_state != IEEE80211_STA_ASSOC)
1378 return -EINVAL;
1379 break;
1380 default:
1381 WARN(1, "invalid state %d", new_state);
1382 return -EINVAL;
1383 }
1384
1385#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1386 printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
1387 sta->sdata->name, sta->sta.addr, new_state);
1388#endif
1389
1390 /*
1391 * notify the driver before the actual changes so it can
1392 * fail the transition
1393 */
1394 if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
1395 int err = drv_sta_state(sta->local, sta->sdata, sta,
1396 sta->sta_state, new_state);
1397 if (err)
1398 return err;
1399 }
1400
1401 /* reflect the change in all state variables */
1402
1421 switch (new_state) { 1403 switch (new_state) {
1422 case IEEE80211_STA_NONE: 1404 case IEEE80211_STA_NONE:
1423 if (sta->sta_state == IEEE80211_STA_AUTH) 1405 if (sta->sta_state == IEEE80211_STA_AUTH)
1424 clear_bit(WLAN_STA_AUTH, &sta->_flags); 1406 clear_bit(WLAN_STA_AUTH, &sta->_flags);
1425 else
1426 return -EINVAL;
1427 break; 1407 break;
1428 case IEEE80211_STA_AUTH: 1408 case IEEE80211_STA_AUTH:
1429 if (sta->sta_state == IEEE80211_STA_NONE) 1409 if (sta->sta_state == IEEE80211_STA_NONE)
1430 set_bit(WLAN_STA_AUTH, &sta->_flags); 1410 set_bit(WLAN_STA_AUTH, &sta->_flags);
1431 else if (sta->sta_state == IEEE80211_STA_ASSOC) 1411 else if (sta->sta_state == IEEE80211_STA_ASSOC)
1432 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 1412 clear_bit(WLAN_STA_ASSOC, &sta->_flags);
1433 else
1434 return -EINVAL;
1435 break; 1413 break;
1436 case IEEE80211_STA_ASSOC: 1414 case IEEE80211_STA_ASSOC:
1437 if (sta->sta_state == IEEE80211_STA_AUTH) { 1415 if (sta->sta_state == IEEE80211_STA_AUTH) {
@@ -1440,24 +1418,19 @@ int sta_info_move_state_checked(struct sta_info *sta,
1440 if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1418 if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
1441 atomic_dec(&sta->sdata->u.ap.num_sta_authorized); 1419 atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
1442 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1420 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1443 } else 1421 }
1444 return -EINVAL;
1445 break; 1422 break;
1446 case IEEE80211_STA_AUTHORIZED: 1423 case IEEE80211_STA_AUTHORIZED:
1447 if (sta->sta_state == IEEE80211_STA_ASSOC) { 1424 if (sta->sta_state == IEEE80211_STA_ASSOC) {
1448 if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1425 if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
1449 atomic_inc(&sta->sdata->u.ap.num_sta_authorized); 1426 atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
1450 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1427 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1451 } else 1428 }
1452 return -EINVAL;
1453 break; 1429 break;
1454 default: 1430 default:
1455 WARN(1, "invalid state %d", new_state); 1431 break;
1456 return -EINVAL;
1457 } 1432 }
1458 1433
1459 printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
1460 sta->sdata->name, sta->sta.addr, new_state);
1461 sta->sta_state = new_state; 1434 sta->sta_state = new_state;
1462 1435
1463 return 0; 1436 return 0;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bfed851d0d36..ab0576827baf 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -14,6 +14,7 @@
14#include <linux/if_ether.h> 14#include <linux/if_ether.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/average.h> 16#include <linux/average.h>
17#include <linux/etherdevice.h>
17#include "key.h" 18#include "key.h"
18 19
19/** 20/**
@@ -52,6 +53,7 @@
52 * @WLAN_STA_SP: Station is in a service period, so don't try to 53 * @WLAN_STA_SP: Station is in a service period, so don't try to
53 * reply to other uAPSD trigger frames or PS-Poll. 54 * reply to other uAPSD trigger frames or PS-Poll.
54 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. 55 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
56 * @WLAN_STA_INSERTED: This station is inserted into the hash table.
55 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. 57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
56 */ 58 */
57enum ieee80211_sta_info_flags { 59enum ieee80211_sta_info_flags {
@@ -72,17 +74,10 @@ enum ieee80211_sta_info_flags {
72 WLAN_STA_UAPSD, 74 WLAN_STA_UAPSD,
73 WLAN_STA_SP, 75 WLAN_STA_SP,
74 WLAN_STA_4ADDR_EVENT, 76 WLAN_STA_4ADDR_EVENT,
77 WLAN_STA_INSERTED,
75 WLAN_STA_RATE_CONTROL, 78 WLAN_STA_RATE_CONTROL,
76}; 79};
77 80
78enum ieee80211_sta_state {
79 /* NOTE: These need to be ordered correctly! */
80 IEEE80211_STA_NONE,
81 IEEE80211_STA_AUTH,
82 IEEE80211_STA_ASSOC,
83 IEEE80211_STA_AUTHORIZED,
84};
85
86#define STA_TID_NUM 16 81#define STA_TID_NUM 16
87#define ADDBA_RESP_INTERVAL HZ 82#define ADDBA_RESP_INTERVAL HZ
88#define HT_AGG_MAX_RETRIES 15 83#define HT_AGG_MAX_RETRIES 15
@@ -273,8 +268,6 @@ struct sta_ampdu_mlme {
273 * @dead: set to true when sta is unlinked 268 * @dead: set to true when sta is unlinked
274 * @uploaded: set to true when sta is uploaded to the driver 269 * @uploaded: set to true when sta is uploaded to the driver
275 * @lost_packets: number of consecutive lost packets 270 * @lost_packets: number of consecutive lost packets
276 * @dummy: indicate a dummy station created for receiving
277 * EAP frames before association
278 * @sta: station information we share with the driver 271 * @sta: station information we share with the driver
279 * @sta_state: duplicates information about station state (for debug) 272 * @sta_state: duplicates information about station state (for debug)
280 * @beacon_loss_count: number of times beacon loss has triggered 273 * @beacon_loss_count: number of times beacon loss has triggered
@@ -372,9 +365,6 @@ struct sta_info {
372 unsigned int lost_packets; 365 unsigned int lost_packets;
373 unsigned int beacon_loss_count; 366 unsigned int beacon_loss_count;
374 367
375 /* should be right in front of sta to be in the same cache line */
376 bool dummy;
377
378 /* keep last! */ 368 /* keep last! */
379 struct ieee80211_sta sta; 369 struct ieee80211_sta sta;
380}; 370};
@@ -429,13 +419,17 @@ static inline int test_and_set_sta_flag(struct sta_info *sta,
429 return test_and_set_bit(flag, &sta->_flags); 419 return test_and_set_bit(flag, &sta->_flags);
430} 420}
431 421
432int sta_info_move_state_checked(struct sta_info *sta, 422int sta_info_move_state(struct sta_info *sta,
433 enum ieee80211_sta_state new_state); 423 enum ieee80211_sta_state new_state);
434 424
435static inline void sta_info_move_state(struct sta_info *sta, 425static inline void sta_info_pre_move_state(struct sta_info *sta,
436 enum ieee80211_sta_state new_state) 426 enum ieee80211_sta_state new_state)
437{ 427{
438 int ret = sta_info_move_state_checked(sta, new_state); 428 int ret;
429
430 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
431
432 ret = sta_info_move_state(sta, new_state);
439 WARN_ON_ONCE(ret); 433 WARN_ON_ONCE(ret);
440} 434}
441 435
@@ -472,15 +466,9 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
472struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 466struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
473 const u8 *addr); 467 const u8 *addr);
474 468
475struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
476 const u8 *addr);
477
478struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 469struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
479 const u8 *addr); 470 const u8 *addr);
480 471
481struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
482 const u8 *addr);
483
484static inline 472static inline
485void for_each_sta_info_type_check(struct ieee80211_local *local, 473void for_each_sta_info_type_check(struct ieee80211_local *local,
486 const u8 *addr, 474 const u8 *addr,
@@ -489,23 +477,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
489{ 477{
490} 478}
491 479
492#define for_each_sta_info(local, _addr, _sta, nxt) \ 480#define for_each_sta_info(local, _addr, _sta, nxt) \
493 for ( /* initialise loop */ \
494 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
495 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
496 /* typecheck */ \
497 for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
498 /* continue condition */ \
499 _sta; \
500 /* advance loop */ \
501 _sta = nxt, \
502 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
503 ) \
504 /* run code only if address matches and it's not a dummy sta */ \
505 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
506 !_sta->dummy)
507
508#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
509 for ( /* initialise loop */ \ 481 for ( /* initialise loop */ \
510 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\ 482 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
511 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \ 483 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
@@ -518,7 +490,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
518 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ 490 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
519 ) \ 491 ) \
520 /* compare address and run code only if it matches */ \ 492 /* compare address and run code only if it matches */ \
521 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0) 493 if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0)
522 494
523/* 495/*
524 * Get STA info by index, BROKEN! 496 * Get STA info by index, BROKEN!
@@ -544,8 +516,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
544 */ 516 */
545int sta_info_insert(struct sta_info *sta); 517int sta_info_insert(struct sta_info *sta);
546int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); 518int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
547int sta_info_reinsert(struct sta_info *sta);
548 519
520int __must_check __sta_info_destroy(struct sta_info *sta);
549int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, 521int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
550 const u8 *addr); 522 const u8 *addr);
551int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 523int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
@@ -557,6 +529,9 @@ void sta_info_init(struct ieee80211_local *local);
557void sta_info_stop(struct ieee80211_local *local); 529void sta_info_stop(struct ieee80211_local *local);
558int sta_info_flush(struct ieee80211_local *local, 530int sta_info_flush(struct ieee80211_local *local,
559 struct ieee80211_sub_if_data *sdata); 531 struct ieee80211_sub_if_data *sdata);
532void sta_set_rate_info_tx(struct sta_info *sta,
533 const struct ieee80211_tx_rate *rate,
534 struct rate_info *rinfo);
560void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 535void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
561 unsigned long exp_time); 536 unsigned long exp_time);
562 537
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 30c265c98f73..5f8f89e89d6b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -10,7 +10,9 @@
10 */ 10 */
11 11
12#include <linux/export.h> 12#include <linux/export.h>
13#include <linux/etherdevice.h>
13#include <net/mac80211.h> 14#include <net/mac80211.h>
15#include <asm/unaligned.h>
14#include "ieee80211_i.h" 16#include "ieee80211_i.h"
15#include "rate.h" 17#include "rate.h"
16#include "mesh.h" 18#include "mesh.h"
@@ -350,7 +352,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
350 bool send_to_cooked; 352 bool send_to_cooked;
351 bool acked; 353 bool acked;
352 struct ieee80211_bar *bar; 354 struct ieee80211_bar *bar;
353 u16 tid;
354 int rtap_len; 355 int rtap_len;
355 356
356 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 357 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -377,7 +378,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
377 378
378 for_each_sta_info(local, hdr->addr1, sta, tmp) { 379 for_each_sta_info(local, hdr->addr1, sta, tmp) {
379 /* skip wrong virtual interface */ 380 /* skip wrong virtual interface */
380 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN)) 381 if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr))
381 continue; 382 continue;
382 383
383 if (info->flags & IEEE80211_TX_STATUS_EOSP) 384 if (info->flags & IEEE80211_TX_STATUS_EOSP)
@@ -412,7 +413,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
412 } 413 }
413 414
414 if (!acked && ieee80211_is_back_req(fc)) { 415 if (!acked && ieee80211_is_back_req(fc)) {
415 u16 control; 416 u16 tid, control;
416 417
417 /* 418 /*
418 * BAR failed, store the last SSN and retry sending 419 * BAR failed, store the last SSN and retry sending
@@ -516,7 +517,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
516 517
517 if (ieee80211_is_nullfunc(hdr->frame_control) || 518 if (ieee80211_is_nullfunc(hdr->frame_control) ||
518 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 519 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
519 bool acked = info->flags & IEEE80211_TX_STAT_ACK; 520 acked = info->flags & IEEE80211_TX_STAT_ACK;
521
520 cfg80211_probe_status(skb->dev, hdr->addr1, 522 cfg80211_probe_status(skb->dev, hdr->addr1,
521 cookie, acked, GFP_ATOMIC); 523 cookie, acked, GFP_ATOMIC);
522 } else { 524 } else {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e05667cd5e76..782a60198df4 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -226,12 +226,12 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
226 * have correct qos tag for some reason, due the network or the 226 * have correct qos tag for some reason, due the network or the
227 * peer application. 227 * peer application.
228 * 228 *
229 * Note: local->uapsd_queues access is racy here. If the value is 229 * Note: ifmgd->uapsd_queues access is racy here. If the value is
230 * changed via debugfs, user needs to reassociate manually to have 230 * changed via debugfs, user needs to reassociate manually to have
231 * everything in sync. 231 * everything in sync.
232 */ 232 */
233 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 233 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
234 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 234 && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 && skb_get_queue_mapping(tx->skb) == 0) 235 && skb_get_queue_mapping(tx->skb) == 0)
236 return TX_CONTINUE; 236 return TX_CONTINUE;
237 237
@@ -448,18 +448,23 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
448 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 448 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
449 struct ieee80211_local *local = tx->local; 449 struct ieee80211_local *local = tx->local;
450 450
451 if (unlikely(!sta || 451 if (unlikely(!sta))
452 ieee80211_is_probe_resp(hdr->frame_control) ||
453 ieee80211_is_auth(hdr->frame_control) ||
454 ieee80211_is_assoc_resp(hdr->frame_control) ||
455 ieee80211_is_reassoc_resp(hdr->frame_control)))
456 return TX_CONTINUE; 452 return TX_CONTINUE;
457 453
458 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) || 454 if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
459 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) && 455 test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
460 !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) { 456 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
461 int ac = skb_get_queue_mapping(tx->skb); 457 int ac = skb_get_queue_mapping(tx->skb);
462 458
459 /* only deauth, disassoc and action are bufferable MMPDUs */
460 if (ieee80211_is_mgmt(hdr->frame_control) &&
461 !ieee80211_is_deauth(hdr->frame_control) &&
462 !ieee80211_is_disassoc(hdr->frame_control) &&
463 !ieee80211_is_action(hdr->frame_control)) {
464 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
465 return TX_CONTINUE;
466 }
467
463#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 468#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
464 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n", 469 printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
465 sta->sta.addr, sta->sta.aid, ac); 470 sta->sta.addr, sta->sta.aid, ac);
@@ -625,7 +630,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
625 tx->local->hw.wiphy->frag_threshold); 630 tx->local->hw.wiphy->frag_threshold);
626 631
627 /* set up the tx rate control struct we give the RC algo */ 632 /* set up the tx rate control struct we give the RC algo */
628 txrc.hw = local_to_hw(tx->local); 633 txrc.hw = &tx->local->hw;
629 txrc.sband = sband; 634 txrc.sband = sband;
630 txrc.bss_conf = &tx->sdata->vif.bss_conf; 635 txrc.bss_conf = &tx->sdata->vif.bss_conf;
631 txrc.skb = tx->skb; 636 txrc.skb = tx->skb;
@@ -635,6 +640,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
635 txrc.max_rate_idx = -1; 640 txrc.max_rate_idx = -1;
636 else 641 else
637 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 642 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
643 memcpy(txrc.rate_idx_mcs_mask,
644 tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
645 sizeof(txrc.rate_idx_mcs_mask));
638 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || 646 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
639 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || 647 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
640 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); 648 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
@@ -1057,6 +1065,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1057{ 1065{
1058 bool queued = false; 1066 bool queued = false;
1059 bool reset_agg_timer = false; 1067 bool reset_agg_timer = false;
1068 struct sk_buff *purge_skb = NULL;
1060 1069
1061 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1070 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1062 info->flags |= IEEE80211_TX_CTL_AMPDU; 1071 info->flags |= IEEE80211_TX_CTL_AMPDU;
@@ -1098,8 +1107,13 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1098 info->control.vif = &tx->sdata->vif; 1107 info->control.vif = &tx->sdata->vif;
1099 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1108 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1100 __skb_queue_tail(&tid_tx->pending, skb); 1109 __skb_queue_tail(&tid_tx->pending, skb);
1110 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1111 purge_skb = __skb_dequeue(&tid_tx->pending);
1101 } 1112 }
1102 spin_unlock(&tx->sta->lock); 1113 spin_unlock(&tx->sta->lock);
1114
1115 if (purge_skb)
1116 dev_kfree_skb(purge_skb);
1103 } 1117 }
1104 1118
1105 /* reset session timer */ 1119 /* reset session timer */
@@ -2203,7 +2217,8 @@ void ieee80211_tx_pending(unsigned long data)
2203 2217
2204/* functions for drivers to get certain frames */ 2218/* functions for drivers to get certain frames */
2205 2219
2206static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, 2220static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2221 struct ieee80211_if_ap *bss,
2207 struct sk_buff *skb, 2222 struct sk_buff *skb,
2208 struct beacon_data *beacon) 2223 struct beacon_data *beacon)
2209{ 2224{
@@ -2220,7 +2235,7 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2220 IEEE80211_MAX_AID+1); 2235 IEEE80211_MAX_AID+1);
2221 2236
2222 if (bss->dtim_count == 0) 2237 if (bss->dtim_count == 0)
2223 bss->dtim_count = beacon->dtim_period - 1; 2238 bss->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
2224 else 2239 else
2225 bss->dtim_count--; 2240 bss->dtim_count--;
2226 2241
@@ -2228,7 +2243,7 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2228 *pos++ = WLAN_EID_TIM; 2243 *pos++ = WLAN_EID_TIM;
2229 *pos++ = 4; 2244 *pos++ = 4;
2230 *pos++ = bss->dtim_count; 2245 *pos++ = bss->dtim_count;
2231 *pos++ = beacon->dtim_period; 2246 *pos++ = sdata->vif.bss_conf.dtim_period;
2232 2247
2233 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) 2248 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
2234 aid0 = 1; 2249 aid0 = 1;
@@ -2321,12 +2336,14 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2321 * of the tim bitmap in mac80211 and the driver. 2336 * of the tim bitmap in mac80211 and the driver.
2322 */ 2337 */
2323 if (local->tim_in_locked_section) { 2338 if (local->tim_in_locked_section) {
2324 ieee80211_beacon_add_tim(ap, skb, beacon); 2339 ieee80211_beacon_add_tim(sdata, ap, skb,
2340 beacon);
2325 } else { 2341 } else {
2326 unsigned long flags; 2342 unsigned long flags;
2327 2343
2328 spin_lock_irqsave(&local->tim_lock, flags); 2344 spin_lock_irqsave(&local->tim_lock, flags);
2329 ieee80211_beacon_add_tim(ap, skb, beacon); 2345 ieee80211_beacon_add_tim(sdata, ap, skb,
2346 beacon);
2330 spin_unlock_irqrestore(&local->tim_lock, flags); 2347 spin_unlock_irqrestore(&local->tim_lock, flags);
2331 } 2348 }
2332 2349
@@ -2431,6 +2448,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2431 txrc.max_rate_idx = -1; 2448 txrc.max_rate_idx = -1;
2432 else 2449 else
2433 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 2450 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2451 memcpy(txrc.rate_idx_mcs_mask, sdata->rc_rateidx_mcs_mask[band],
2452 sizeof(txrc.rate_idx_mcs_mask));
2434 txrc.bss = true; 2453 txrc.bss = true;
2435 rate_control_get_rate(sdata, NULL, &txrc); 2454 rate_control_get_rate(sdata, NULL, &txrc);
2436 2455
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 9919892575f4..32f7a3b3d43c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -572,24 +572,40 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
572 size_t left = len; 572 size_t left = len;
573 u8 *pos = start; 573 u8 *pos = start;
574 bool calc_crc = filter != 0; 574 bool calc_crc = filter != 0;
575 DECLARE_BITMAP(seen_elems, 256);
575 576
577 bitmap_zero(seen_elems, 256);
576 memset(elems, 0, sizeof(*elems)); 578 memset(elems, 0, sizeof(*elems));
577 elems->ie_start = start; 579 elems->ie_start = start;
578 elems->total_len = len; 580 elems->total_len = len;
579 581
580 while (left >= 2) { 582 while (left >= 2) {
581 u8 id, elen; 583 u8 id, elen;
584 bool elem_parse_failed;
582 585
583 id = *pos++; 586 id = *pos++;
584 elen = *pos++; 587 elen = *pos++;
585 left -= 2; 588 left -= 2;
586 589
587 if (elen > left) 590 if (elen > left) {
591 elems->parse_error = true;
588 break; 592 break;
593 }
594
595 if (id != WLAN_EID_VENDOR_SPECIFIC &&
596 id != WLAN_EID_QUIET &&
597 test_bit(id, seen_elems)) {
598 elems->parse_error = true;
599 left -= elen;
600 pos += elen;
601 continue;
602 }
589 603
590 if (calc_crc && id < 64 && (filter & (1ULL << id))) 604 if (calc_crc && id < 64 && (filter & (1ULL << id)))
591 crc = crc32_be(crc, pos - 2, elen + 2); 605 crc = crc32_be(crc, pos - 2, elen + 2);
592 606
607 elem_parse_failed = false;
608
593 switch (id) { 609 switch (id) {
594 case WLAN_EID_SSID: 610 case WLAN_EID_SSID:
595 elems->ssid = pos; 611 elems->ssid = pos;
@@ -615,7 +631,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
615 if (elen >= sizeof(struct ieee80211_tim_ie)) { 631 if (elen >= sizeof(struct ieee80211_tim_ie)) {
616 elems->tim = (void *)pos; 632 elems->tim = (void *)pos;
617 elems->tim_len = elen; 633 elems->tim_len = elen;
618 } 634 } else
635 elem_parse_failed = true;
619 break; 636 break;
620 case WLAN_EID_IBSS_PARAMS: 637 case WLAN_EID_IBSS_PARAMS:
621 elems->ibss_params = pos; 638 elems->ibss_params = pos;
@@ -664,10 +681,14 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
664 case WLAN_EID_HT_CAPABILITY: 681 case WLAN_EID_HT_CAPABILITY:
665 if (elen >= sizeof(struct ieee80211_ht_cap)) 682 if (elen >= sizeof(struct ieee80211_ht_cap))
666 elems->ht_cap_elem = (void *)pos; 683 elems->ht_cap_elem = (void *)pos;
684 else
685 elem_parse_failed = true;
667 break; 686 break;
668 case WLAN_EID_HT_INFORMATION: 687 case WLAN_EID_HT_INFORMATION:
669 if (elen >= sizeof(struct ieee80211_ht_info)) 688 if (elen >= sizeof(struct ieee80211_ht_info))
670 elems->ht_info_elem = (void *)pos; 689 elems->ht_info_elem = (void *)pos;
690 else
691 elem_parse_failed = true;
671 break; 692 break;
672 case WLAN_EID_MESH_ID: 693 case WLAN_EID_MESH_ID:
673 elems->mesh_id = pos; 694 elems->mesh_id = pos;
@@ -676,6 +697,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
676 case WLAN_EID_MESH_CONFIG: 697 case WLAN_EID_MESH_CONFIG:
677 if (elen >= sizeof(struct ieee80211_meshconf_ie)) 698 if (elen >= sizeof(struct ieee80211_meshconf_ie))
678 elems->mesh_config = (void *)pos; 699 elems->mesh_config = (void *)pos;
700 else
701 elem_parse_failed = true;
679 break; 702 break;
680 case WLAN_EID_PEER_MGMT: 703 case WLAN_EID_PEER_MGMT:
681 elems->peering = pos; 704 elems->peering = pos;
@@ -696,6 +719,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
696 case WLAN_EID_RANN: 719 case WLAN_EID_RANN:
697 if (elen >= sizeof(struct ieee80211_rann_ie)) 720 if (elen >= sizeof(struct ieee80211_rann_ie))
698 elems->rann = (void *)pos; 721 elems->rann = (void *)pos;
722 else
723 elem_parse_failed = true;
699 break; 724 break;
700 case WLAN_EID_CHANNEL_SWITCH: 725 case WLAN_EID_CHANNEL_SWITCH:
701 elems->ch_switch_elem = pos; 726 elems->ch_switch_elem = pos;
@@ -724,10 +749,18 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
724 break; 749 break;
725 } 750 }
726 751
752 if (elem_parse_failed)
753 elems->parse_error = true;
754 else
755 set_bit(id, seen_elems);
756
727 left -= elen; 757 left -= elen;
728 pos += elen; 758 pos += elen;
729 } 759 }
730 760
761 if (left != 0)
762 elems->parse_error = true;
763
731 return crc; 764 return crc;
732} 765}
733 766
@@ -737,7 +770,8 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
737 ieee802_11_parse_elems_crc(start, len, elems, 0, 0); 770 ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
738} 771}
739 772
740void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) 773void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
774 bool bss_notify)
741{ 775{
742 struct ieee80211_local *local = sdata->local; 776 struct ieee80211_local *local = sdata->local;
743 struct ieee80211_tx_queue_params qparam; 777 struct ieee80211_tx_queue_params qparam;
@@ -753,7 +787,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
753 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 787 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
754 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 788 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
755 789
756 for (queue = 0; queue < local_to_hw(local)->queues; queue++) { 790 for (queue = 0; queue < local->hw.queues; queue++) {
757 /* Set defaults according to 802.11-2007 Table 7-37 */ 791 /* Set defaults according to 802.11-2007 Table 7-37 */
758 aCWmax = 1023; 792 aCWmax = 1023;
759 if (use_11b) 793 if (use_11b)
@@ -807,7 +841,9 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
807 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 841 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
808 sdata->vif.bss_conf.qos = 842 sdata->vif.bss_conf.qos =
809 sdata->vif.type != NL80211_IFTYPE_STATION; 843 sdata->vif.type != NL80211_IFTYPE_STATION;
810 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); 844 if (bss_notify)
845 ieee80211_bss_info_change_notify(sdata,
846 BSS_CHANGED_QOS);
811 } 847 }
812} 848}
813 849
@@ -829,7 +865,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
829 else 865 else
830 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 866 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
831 867
832 ieee80211_set_wmm_default(sdata); 868 ieee80211_set_wmm_default(sdata, true);
833} 869}
834 870
835u32 ieee80211_mandatory_rates(struct ieee80211_local *local, 871u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
@@ -862,8 +898,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
862 898
863void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 899void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
864 u16 transaction, u16 auth_alg, 900 u16 transaction, u16 auth_alg,
865 u8 *extra, size_t extra_len, const u8 *bssid, 901 u8 *extra, size_t extra_len, const u8 *da,
866 const u8 *key, u8 key_len, u8 key_idx) 902 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
867{ 903{
868 struct ieee80211_local *local = sdata->local; 904 struct ieee80211_local *local = sdata->local;
869 struct sk_buff *skb; 905 struct sk_buff *skb;
@@ -881,7 +917,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
881 memset(mgmt, 0, 24 + 6); 917 memset(mgmt, 0, 24 + 6);
882 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 918 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
883 IEEE80211_STYPE_AUTH); 919 IEEE80211_STYPE_AUTH);
884 memcpy(mgmt->da, bssid, ETH_ALEN); 920 memcpy(mgmt->da, da, ETH_ALEN);
885 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 921 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
886 memcpy(mgmt->bssid, bssid, ETH_ALEN); 922 memcpy(mgmt->bssid, bssid, ETH_ALEN);
887 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 923 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
@@ -1185,13 +1221,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1185 mutex_lock(&local->sta_mtx); 1221 mutex_lock(&local->sta_mtx);
1186 list_for_each_entry(sta, &local->sta_list, list) { 1222 list_for_each_entry(sta, &local->sta_list, list) {
1187 if (sta->uploaded) { 1223 if (sta->uploaded) {
1188 sdata = sta->sdata; 1224 enum ieee80211_sta_state state;
1189 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1190 sdata = container_of(sdata->bss,
1191 struct ieee80211_sub_if_data,
1192 u.ap);
1193 1225
1194 WARN_ON(drv_sta_add(local, sdata, &sta->sta)); 1226 for (state = IEEE80211_STA_NOTEXIST;
1227 state < sta->sta_state - 1; state++)
1228 WARN_ON(drv_sta_state(local, sta->sdata, sta,
1229 state, state + 1));
1195 } 1230 }
1196 } 1231 }
1197 mutex_unlock(&local->sta_mtx); 1232 mutex_unlock(&local->sta_mtx);
@@ -1272,6 +1307,21 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1272 ieee80211_recalc_ps(local, -1); 1307 ieee80211_recalc_ps(local, -1);
1273 1308
1274 /* 1309 /*
1310 * The sta might be in psm against the ap (e.g. because
1311 * this was the state before a hw restart), so we
1312 * explicitly send a null packet in order to make sure
1313 * it'll sync against the ap (and get out of psm).
1314 */
1315 if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
1316 list_for_each_entry(sdata, &local->interfaces, list) {
1317 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1318 continue;
1319
1320 ieee80211_send_nullfunc(local, sdata, 0);
1321 }
1322 }
1323
1324 /*
1275 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation 1325 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
1276 * sessions can be established after a resume. 1326 * sessions can be established after a resume.
1277 * 1327 *
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 68ad351479df..7aa31bbfaa3b 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -263,16 +263,14 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
263} 263}
264 264
265 265
266bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) 266static bool ieee80211_wep_is_weak_iv(struct sk_buff *skb,
267 struct ieee80211_key *key)
267{ 268{
268 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 269 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
269 unsigned int hdrlen; 270 unsigned int hdrlen;
270 u8 *ivpos; 271 u8 *ivpos;
271 u32 iv; 272 u32 iv;
272 273
273 if (!ieee80211_has_protected(hdr->frame_control))
274 return false;
275
276 hdrlen = ieee80211_hdrlen(hdr->frame_control); 274 hdrlen = ieee80211_hdrlen(hdr->frame_control);
277 ivpos = skb->data + hdrlen; 275 ivpos = skb->data + hdrlen;
278 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; 276 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
@@ -286,18 +284,27 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
286 struct sk_buff *skb = rx->skb; 284 struct sk_buff *skb = rx->skb;
287 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 285 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
288 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 286 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
287 __le16 fc = hdr->frame_control;
289 288
290 if (!ieee80211_is_data(hdr->frame_control) && 289 if (!ieee80211_is_data(fc) && !ieee80211_is_auth(fc))
291 !ieee80211_is_auth(hdr->frame_control))
292 return RX_CONTINUE; 290 return RX_CONTINUE;
293 291
294 if (!(status->flag & RX_FLAG_DECRYPTED)) { 292 if (!(status->flag & RX_FLAG_DECRYPTED)) {
293 if (skb_linearize(rx->skb))
294 return RX_DROP_UNUSABLE;
295 if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
296 rx->sta->wep_weak_iv_count++;
295 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) 297 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
296 return RX_DROP_UNUSABLE; 298 return RX_DROP_UNUSABLE;
297 } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 299 } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
300 if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + WEP_IV_LEN))
301 return RX_DROP_UNUSABLE;
302 if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
303 rx->sta->wep_weak_iv_count++;
298 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
299 /* remove ICV */ 305 /* remove ICV */
300 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN); 306 if (pskb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN))
307 return RX_DROP_UNUSABLE;
301 } 308 }
302 309
303 return RX_CONTINUE; 310 return RX_CONTINUE;
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 01e54840a628..9615749d1f65 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -25,7 +25,6 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
25 const u8 *key, int keylen, int keyidx); 25 const u8 *key, int keylen, int keyidx);
26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, 26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
27 size_t klen, u8 *data, size_t data_len); 27 size_t klen, u8 *data, size_t data_len);
28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
29 28
30ieee80211_rx_result 29ieee80211_rx_result
31ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); 30ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx);
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6dd01a05291..c6e230efa049 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -27,16 +27,9 @@
27#include "rate.h" 27#include "rate.h"
28#include "driver-ops.h" 28#include "driver-ops.h"
29 29
30#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
31#define IEEE80211_AUTH_MAX_TRIES 3
32#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
33#define IEEE80211_ASSOC_MAX_TRIES 3
34
35enum work_action { 30enum work_action {
36 WORK_ACT_MISMATCH,
37 WORK_ACT_NONE, 31 WORK_ACT_NONE,
38 WORK_ACT_TIMEOUT, 32 WORK_ACT_TIMEOUT,
39 WORK_ACT_DONE,
40}; 33};
41 34
42 35
@@ -71,464 +64,6 @@ void free_work(struct ieee80211_work *wk)
71 kfree_rcu(wk, rcu_head); 64 kfree_rcu(wk, rcu_head);
72} 65}
73 66
74static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
75 struct ieee80211_supported_band *sband,
76 u32 *rates)
77{
78 int i, j, count;
79 *rates = 0;
80 count = 0;
81 for (i = 0; i < supp_rates_len; i++) {
82 int rate = (supp_rates[i] & 0x7F) * 5;
83
84 for (j = 0; j < sband->n_bitrates; j++)
85 if (sband->bitrates[j].bitrate == rate) {
86 *rates |= BIT(j);
87 count++;
88 break;
89 }
90 }
91
92 return count;
93}
94
95/* frame sending functions */
96
97static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
98 struct sk_buff *skb, const u8 *ht_info_ie,
99 struct ieee80211_supported_band *sband,
100 struct ieee80211_channel *channel,
101 enum ieee80211_smps_mode smps)
102{
103 struct ieee80211_ht_info *ht_info;
104 u8 *pos;
105 u32 flags = channel->flags;
106 u16 cap;
107 struct ieee80211_sta_ht_cap ht_cap;
108
109 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
110
111 if (!sband->ht_cap.ht_supported)
112 return;
113
114 if (!ht_info_ie)
115 return;
116
117 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
118 return;
119
120 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
121 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
122
123 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
124
125 /* determine capability flags */
126 cap = ht_cap.cap;
127
128 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
129 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
130 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
131 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
132 cap &= ~IEEE80211_HT_CAP_SGI_40;
133 }
134 break;
135 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
136 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
137 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
138 cap &= ~IEEE80211_HT_CAP_SGI_40;
139 }
140 break;
141 }
142
143 /* set SM PS mode properly */
144 cap &= ~IEEE80211_HT_CAP_SM_PS;
145 switch (smps) {
146 case IEEE80211_SMPS_AUTOMATIC:
147 case IEEE80211_SMPS_NUM_MODES:
148 WARN_ON(1);
149 case IEEE80211_SMPS_OFF:
150 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
151 IEEE80211_HT_CAP_SM_PS_SHIFT;
152 break;
153 case IEEE80211_SMPS_STATIC:
154 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
155 IEEE80211_HT_CAP_SM_PS_SHIFT;
156 break;
157 case IEEE80211_SMPS_DYNAMIC:
158 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
159 IEEE80211_HT_CAP_SM_PS_SHIFT;
160 break;
161 }
162
163 /* reserve and fill IE */
164 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
165 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
166}
167
168static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_work *wk)
170{
171 struct ieee80211_local *local = sdata->local;
172 struct sk_buff *skb;
173 struct ieee80211_mgmt *mgmt;
174 u8 *pos, qos_info;
175 size_t offset = 0, noffset;
176 int i, count, rates_len, supp_rates_len;
177 u16 capab;
178 struct ieee80211_supported_band *sband;
179 u32 rates = 0;
180
181 sband = local->hw.wiphy->bands[wk->chan->band];
182
183 if (wk->assoc.supp_rates_len) {
184 /*
185 * Get all rates supported by the device and the AP as
186 * some APs don't like getting a superset of their rates
187 * in the association request (e.g. D-Link DAP 1353 in
188 * b-only mode)...
189 */
190 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
191 wk->assoc.supp_rates_len,
192 sband, &rates);
193 } else {
194 /*
195 * In case AP not provide any supported rates information
196 * before association, we send information element(s) with
197 * all rates that we support.
198 */
199 rates = ~0;
200 rates_len = sband->n_bitrates;
201 }
202
203 skb = alloc_skb(local->hw.extra_tx_headroom +
204 sizeof(*mgmt) + /* bit too much but doesn't matter */
205 2 + wk->assoc.ssid_len + /* SSID */
206 4 + rates_len + /* (extended) rates */
207 4 + /* power capability */
208 2 + 2 * sband->n_channels + /* supported channels */
209 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
210 wk->ie_len + /* extra IEs */
211 9, /* WMM */
212 GFP_KERNEL);
213 if (!skb)
214 return;
215
216 skb_reserve(skb, local->hw.extra_tx_headroom);
217
218 capab = WLAN_CAPABILITY_ESS;
219
220 if (sband->band == IEEE80211_BAND_2GHZ) {
221 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
222 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
223 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
224 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
225 }
226
227 if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
228 capab |= WLAN_CAPABILITY_PRIVACY;
229
230 if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
231 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
232 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
233
234 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
235 memset(mgmt, 0, 24);
236 memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
237 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
238 memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
239
240 if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
241 skb_put(skb, 10);
242 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
243 IEEE80211_STYPE_REASSOC_REQ);
244 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
245 mgmt->u.reassoc_req.listen_interval =
246 cpu_to_le16(local->hw.conf.listen_interval);
247 memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
248 ETH_ALEN);
249 } else {
250 skb_put(skb, 4);
251 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
252 IEEE80211_STYPE_ASSOC_REQ);
253 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
254 mgmt->u.assoc_req.listen_interval =
255 cpu_to_le16(local->hw.conf.listen_interval);
256 }
257
258 /* SSID */
259 pos = skb_put(skb, 2 + wk->assoc.ssid_len);
260 *pos++ = WLAN_EID_SSID;
261 *pos++ = wk->assoc.ssid_len;
262 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
263
264 /* add all rates which were marked to be used above */
265 supp_rates_len = rates_len;
266 if (supp_rates_len > 8)
267 supp_rates_len = 8;
268
269 pos = skb_put(skb, supp_rates_len + 2);
270 *pos++ = WLAN_EID_SUPP_RATES;
271 *pos++ = supp_rates_len;
272
273 count = 0;
274 for (i = 0; i < sband->n_bitrates; i++) {
275 if (BIT(i) & rates) {
276 int rate = sband->bitrates[i].bitrate;
277 *pos++ = (u8) (rate / 5);
278 if (++count == 8)
279 break;
280 }
281 }
282
283 if (rates_len > count) {
284 pos = skb_put(skb, rates_len - count + 2);
285 *pos++ = WLAN_EID_EXT_SUPP_RATES;
286 *pos++ = rates_len - count;
287
288 for (i++; i < sband->n_bitrates; i++) {
289 if (BIT(i) & rates) {
290 int rate = sband->bitrates[i].bitrate;
291 *pos++ = (u8) (rate / 5);
292 }
293 }
294 }
295
296 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
297 /* 1. power capabilities */
298 pos = skb_put(skb, 4);
299 *pos++ = WLAN_EID_PWR_CAPABILITY;
300 *pos++ = 2;
301 *pos++ = 0; /* min tx power */
302 *pos++ = wk->chan->max_power; /* max tx power */
303
304 /* 2. supported channels */
305 /* TODO: get this in reg domain format */
306 pos = skb_put(skb, 2 * sband->n_channels + 2);
307 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
308 *pos++ = 2 * sband->n_channels;
309 for (i = 0; i < sband->n_channels; i++) {
310 *pos++ = ieee80211_frequency_to_channel(
311 sband->channels[i].center_freq);
312 *pos++ = 1; /* one channel in the subband*/
313 }
314 }
315
316 /* if present, add any custom IEs that go before HT */
317 if (wk->ie_len && wk->ie) {
318 static const u8 before_ht[] = {
319 WLAN_EID_SSID,
320 WLAN_EID_SUPP_RATES,
321 WLAN_EID_EXT_SUPP_RATES,
322 WLAN_EID_PWR_CAPABILITY,
323 WLAN_EID_SUPPORTED_CHANNELS,
324 WLAN_EID_RSN,
325 WLAN_EID_QOS_CAPA,
326 WLAN_EID_RRM_ENABLED_CAPABILITIES,
327 WLAN_EID_MOBILITY_DOMAIN,
328 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
329 };
330 noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
331 before_ht, ARRAY_SIZE(before_ht),
332 offset);
333 pos = skb_put(skb, noffset - offset);
334 memcpy(pos, wk->ie + offset, noffset - offset);
335 offset = noffset;
336 }
337
338 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
339 local->hw.queues >= 4)
340 ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
341 sband, wk->chan, wk->assoc.smps);
342
343 /* if present, add any custom non-vendor IEs that go after HT */
344 if (wk->ie_len && wk->ie) {
345 noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
346 offset);
347 pos = skb_put(skb, noffset - offset);
348 memcpy(pos, wk->ie + offset, noffset - offset);
349 offset = noffset;
350 }
351
352 if (wk->assoc.wmm_used && local->hw.queues >= 4) {
353 if (wk->assoc.uapsd_used) {
354 qos_info = local->uapsd_queues;
355 qos_info |= (local->uapsd_max_sp_len <<
356 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
357 } else {
358 qos_info = 0;
359 }
360
361 pos = skb_put(skb, 9);
362 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
363 *pos++ = 7; /* len */
364 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
365 *pos++ = 0x50;
366 *pos++ = 0xf2;
367 *pos++ = 2; /* WME */
368 *pos++ = 0; /* WME info */
369 *pos++ = 1; /* WME ver */
370 *pos++ = qos_info;
371 }
372
373 /* add any remaining custom (i.e. vendor specific here) IEs */
374 if (wk->ie_len && wk->ie) {
375 noffset = wk->ie_len;
376 pos = skb_put(skb, noffset - offset);
377 memcpy(pos, wk->ie + offset, noffset - offset);
378 }
379
380 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
381 ieee80211_tx_skb(sdata, skb);
382}
383
384static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
385 struct ieee80211_work *wk)
386{
387 struct cfg80211_bss *cbss;
388 u16 capa_val = WLAN_CAPABILITY_ESS;
389
390 if (wk->probe_auth.privacy)
391 capa_val |= WLAN_CAPABILITY_PRIVACY;
392
393 cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
394 wk->probe_auth.ssid, wk->probe_auth.ssid_len,
395 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
396 capa_val);
397 if (!cbss)
398 return;
399
400 cfg80211_unlink_bss(local->hw.wiphy, cbss);
401 cfg80211_put_bss(cbss);
402}
403
404static enum work_action __must_check
405ieee80211_direct_probe(struct ieee80211_work *wk)
406{
407 struct ieee80211_sub_if_data *sdata = wk->sdata;
408 struct ieee80211_local *local = sdata->local;
409
410 if (!wk->probe_auth.synced) {
411 int ret = drv_tx_sync(local, sdata, wk->filter_ta,
412 IEEE80211_TX_SYNC_AUTH);
413 if (ret)
414 return WORK_ACT_TIMEOUT;
415 }
416 wk->probe_auth.synced = true;
417
418 wk->probe_auth.tries++;
419 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
420 printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
421 sdata->name, wk->filter_ta);
422
423 /*
424 * Most likely AP is not in the range so remove the
425 * bss struct for that AP.
426 */
427 ieee80211_remove_auth_bss(local, wk);
428
429 return WORK_ACT_TIMEOUT;
430 }
431
432 printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
433 sdata->name, wk->filter_ta, wk->probe_auth.tries,
434 IEEE80211_AUTH_MAX_TRIES);
435
436 /*
437 * Direct probe is sent to broadcast address as some APs
438 * will not answer to direct packet in unassociated state.
439 */
440 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
441 wk->probe_auth.ssid_len, NULL, 0,
442 (u32) -1, true, false);
443
444 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
445 run_again(local, wk->timeout);
446
447 return WORK_ACT_NONE;
448}
449
450
451static enum work_action __must_check
452ieee80211_authenticate(struct ieee80211_work *wk)
453{
454 struct ieee80211_sub_if_data *sdata = wk->sdata;
455 struct ieee80211_local *local = sdata->local;
456
457 if (!wk->probe_auth.synced) {
458 int ret = drv_tx_sync(local, sdata, wk->filter_ta,
459 IEEE80211_TX_SYNC_AUTH);
460 if (ret)
461 return WORK_ACT_TIMEOUT;
462 }
463 wk->probe_auth.synced = true;
464
465 wk->probe_auth.tries++;
466 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
467 printk(KERN_DEBUG "%s: authentication with %pM"
468 " timed out\n", sdata->name, wk->filter_ta);
469
470 /*
471 * Most likely AP is not in the range so remove the
472 * bss struct for that AP.
473 */
474 ieee80211_remove_auth_bss(local, wk);
475
476 return WORK_ACT_TIMEOUT;
477 }
478
479 printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
480 sdata->name, wk->filter_ta, wk->probe_auth.tries);
481
482 ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
483 wk->ie_len, wk->filter_ta, NULL, 0, 0);
484 wk->probe_auth.transaction = 2;
485
486 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
487 run_again(local, wk->timeout);
488
489 return WORK_ACT_NONE;
490}
491
492static enum work_action __must_check
493ieee80211_associate(struct ieee80211_work *wk)
494{
495 struct ieee80211_sub_if_data *sdata = wk->sdata;
496 struct ieee80211_local *local = sdata->local;
497
498 if (!wk->assoc.synced) {
499 int ret = drv_tx_sync(local, sdata, wk->filter_ta,
500 IEEE80211_TX_SYNC_ASSOC);
501 if (ret)
502 return WORK_ACT_TIMEOUT;
503 }
504 wk->assoc.synced = true;
505
506 wk->assoc.tries++;
507 if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
508 printk(KERN_DEBUG "%s: association with %pM"
509 " timed out\n",
510 sdata->name, wk->filter_ta);
511
512 /*
513 * Most likely AP is not in the range so remove the
514 * bss struct for that AP.
515 */
516 if (wk->assoc.bss)
517 cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
518
519 return WORK_ACT_TIMEOUT;
520 }
521
522 printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
523 sdata->name, wk->filter_ta, wk->assoc.tries);
524 ieee80211_send_assoc(sdata, wk);
525
526 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
527 run_again(local, wk->timeout);
528
529 return WORK_ACT_NONE;
530}
531
532static enum work_action __must_check 67static enum work_action __must_check
533ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk) 68ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
534{ 69{
@@ -568,300 +103,6 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
568 return WORK_ACT_TIMEOUT; 103 return WORK_ACT_TIMEOUT;
569} 104}
570 105
571static enum work_action __must_check
572ieee80211_assoc_beacon_wait(struct ieee80211_work *wk)
573{
574 if (wk->started)
575 return WORK_ACT_TIMEOUT;
576
577 /*
578 * Wait up to one beacon interval ...
579 * should this be more if we miss one?
580 */
581 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
582 wk->sdata->name, wk->filter_ta);
583 wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval);
584 return WORK_ACT_NONE;
585}
586
587static void ieee80211_auth_challenge(struct ieee80211_work *wk,
588 struct ieee80211_mgmt *mgmt,
589 size_t len)
590{
591 struct ieee80211_sub_if_data *sdata = wk->sdata;
592 u8 *pos;
593 struct ieee802_11_elems elems;
594
595 pos = mgmt->u.auth.variable;
596 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
597 if (!elems.challenge)
598 return;
599 ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
600 elems.challenge - 2, elems.challenge_len + 2,
601 wk->filter_ta, wk->probe_auth.key,
602 wk->probe_auth.key_len, wk->probe_auth.key_idx);
603 wk->probe_auth.transaction = 4;
604}
605
606static enum work_action __must_check
607ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
608 struct ieee80211_mgmt *mgmt, size_t len)
609{
610 u16 auth_alg, auth_transaction, status_code;
611
612 if (wk->type != IEEE80211_WORK_AUTH)
613 return WORK_ACT_MISMATCH;
614
615 if (len < 24 + 6)
616 return WORK_ACT_NONE;
617
618 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
619 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
620 status_code = le16_to_cpu(mgmt->u.auth.status_code);
621
622 if (auth_alg != wk->probe_auth.algorithm ||
623 auth_transaction != wk->probe_auth.transaction)
624 return WORK_ACT_NONE;
625
626 if (status_code != WLAN_STATUS_SUCCESS) {
627 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
628 wk->sdata->name, mgmt->sa, status_code);
629 return WORK_ACT_DONE;
630 }
631
632 switch (wk->probe_auth.algorithm) {
633 case WLAN_AUTH_OPEN:
634 case WLAN_AUTH_LEAP:
635 case WLAN_AUTH_FT:
636 break;
637 case WLAN_AUTH_SHARED_KEY:
638 if (wk->probe_auth.transaction != 4) {
639 ieee80211_auth_challenge(wk, mgmt, len);
640 /* need another frame */
641 return WORK_ACT_NONE;
642 }
643 break;
644 default:
645 WARN_ON(1);
646 return WORK_ACT_NONE;
647 }
648
649 printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
650 return WORK_ACT_DONE;
651}
652
653static enum work_action __must_check
654ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
655 struct ieee80211_mgmt *mgmt, size_t len,
656 bool reassoc)
657{
658 struct ieee80211_sub_if_data *sdata = wk->sdata;
659 struct ieee80211_local *local = sdata->local;
660 u16 capab_info, status_code, aid;
661 struct ieee802_11_elems elems;
662 u8 *pos;
663
664 if (wk->type != IEEE80211_WORK_ASSOC)
665 return WORK_ACT_MISMATCH;
666
667 /*
668 * AssocResp and ReassocResp have identical structure, so process both
669 * of them in this function.
670 */
671
672 if (len < 24 + 6)
673 return WORK_ACT_NONE;
674
675 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
676 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
677 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
678
679 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
680 "status=%d aid=%d)\n",
681 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
682 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
683
684 pos = mgmt->u.assoc_resp.variable;
685 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
686
687 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
688 elems.timeout_int && elems.timeout_int_len == 5 &&
689 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
690 u32 tu, ms;
691 tu = get_unaligned_le32(elems.timeout_int + 1);
692 ms = tu * 1024 / 1000;
693 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
694 "comeback duration %u TU (%u ms)\n",
695 sdata->name, mgmt->sa, tu, ms);
696 wk->timeout = jiffies + msecs_to_jiffies(ms);
697 if (ms > IEEE80211_ASSOC_TIMEOUT)
698 run_again(local, wk->timeout);
699 return WORK_ACT_NONE;
700 }
701
702 if (status_code != WLAN_STATUS_SUCCESS)
703 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
704 sdata->name, mgmt->sa, status_code);
705 else
706 printk(KERN_DEBUG "%s: associated\n", sdata->name);
707
708 return WORK_ACT_DONE;
709}
710
711static enum work_action __must_check
712ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
713 struct ieee80211_mgmt *mgmt, size_t len,
714 struct ieee80211_rx_status *rx_status)
715{
716 struct ieee80211_sub_if_data *sdata = wk->sdata;
717 struct ieee80211_local *local = sdata->local;
718 size_t baselen;
719
720 ASSERT_WORK_MTX(local);
721
722 if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
723 return WORK_ACT_MISMATCH;
724
725 if (len < 24 + 12)
726 return WORK_ACT_NONE;
727
728 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
729 if (baselen > len)
730 return WORK_ACT_NONE;
731
732 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
733 return WORK_ACT_DONE;
734}
735
736static enum work_action __must_check
737ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk,
738 struct ieee80211_mgmt *mgmt, size_t len)
739{
740 struct ieee80211_sub_if_data *sdata = wk->sdata;
741 struct ieee80211_local *local = sdata->local;
742
743 ASSERT_WORK_MTX(local);
744
745 if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
746 return WORK_ACT_MISMATCH;
747
748 if (len < 24 + 12)
749 return WORK_ACT_NONE;
750
751 printk(KERN_DEBUG "%s: beacon received\n", sdata->name);
752 return WORK_ACT_DONE;
753}
754
755static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
756 struct sk_buff *skb)
757{
758 struct ieee80211_rx_status *rx_status;
759 struct ieee80211_mgmt *mgmt;
760 struct ieee80211_work *wk;
761 enum work_action rma = WORK_ACT_NONE;
762 u16 fc;
763
764 rx_status = (struct ieee80211_rx_status *) skb->cb;
765 mgmt = (struct ieee80211_mgmt *) skb->data;
766 fc = le16_to_cpu(mgmt->frame_control);
767
768 mutex_lock(&local->mtx);
769
770 list_for_each_entry(wk, &local->work_list, list) {
771 const u8 *bssid = NULL;
772
773 switch (wk->type) {
774 case IEEE80211_WORK_DIRECT_PROBE:
775 case IEEE80211_WORK_AUTH:
776 case IEEE80211_WORK_ASSOC:
777 case IEEE80211_WORK_ASSOC_BEACON_WAIT:
778 bssid = wk->filter_ta;
779 break;
780 default:
781 continue;
782 }
783
784 /*
785 * Before queuing, we already verified mgmt->sa,
786 * so this is needed just for matching.
787 */
788 if (compare_ether_addr(bssid, mgmt->bssid))
789 continue;
790
791 switch (fc & IEEE80211_FCTL_STYPE) {
792 case IEEE80211_STYPE_BEACON:
793 rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len);
794 break;
795 case IEEE80211_STYPE_PROBE_RESP:
796 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
797 rx_status);
798 break;
799 case IEEE80211_STYPE_AUTH:
800 rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
801 break;
802 case IEEE80211_STYPE_ASSOC_RESP:
803 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
804 skb->len, false);
805 break;
806 case IEEE80211_STYPE_REASSOC_RESP:
807 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
808 skb->len, true);
809 break;
810 default:
811 WARN_ON(1);
812 rma = WORK_ACT_NONE;
813 }
814
815 /*
816 * We've either received an unexpected frame, or we have
817 * multiple work items and need to match the frame to the
818 * right one.
819 */
820 if (rma == WORK_ACT_MISMATCH)
821 continue;
822
823 /*
824 * We've processed this frame for that work, so it can't
825 * belong to another work struct.
826 * NB: this is also required for correctness for 'rma'!
827 */
828 break;
829 }
830
831 switch (rma) {
832 case WORK_ACT_MISMATCH:
833 /* ignore this unmatched frame */
834 break;
835 case WORK_ACT_NONE:
836 break;
837 case WORK_ACT_DONE:
838 list_del_rcu(&wk->list);
839 break;
840 default:
841 WARN(1, "unexpected: %d", rma);
842 }
843
844 mutex_unlock(&local->mtx);
845
846 if (rma != WORK_ACT_DONE)
847 goto out;
848
849 switch (wk->done(wk, skb)) {
850 case WORK_DONE_DESTROY:
851 free_work(wk);
852 break;
853 case WORK_DONE_REQUEUE:
854 synchronize_rcu();
855 wk->started = false; /* restart */
856 mutex_lock(&local->mtx);
857 list_add_tail(&wk->list, &local->work_list);
858 mutex_unlock(&local->mtx);
859 }
860
861 out:
862 kfree_skb(skb);
863}
864
865static void ieee80211_work_timer(unsigned long data) 106static void ieee80211_work_timer(unsigned long data)
866{ 107{
867 struct ieee80211_local *local = (void *) data; 108 struct ieee80211_local *local = (void *) data;
@@ -876,7 +117,6 @@ static void ieee80211_work_work(struct work_struct *work)
876{ 117{
877 struct ieee80211_local *local = 118 struct ieee80211_local *local =
878 container_of(work, struct ieee80211_local, work_work); 119 container_of(work, struct ieee80211_local, work_work);
879 struct sk_buff *skb;
880 struct ieee80211_work *wk, *tmp; 120 struct ieee80211_work *wk, *tmp;
881 LIST_HEAD(free_work); 121 LIST_HEAD(free_work);
882 enum work_action rma; 122 enum work_action rma;
@@ -892,10 +132,6 @@ static void ieee80211_work_work(struct work_struct *work)
892 if (WARN(local->suspended, "work scheduled while going to suspend\n")) 132 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
893 return; 133 return;
894 134
895 /* first process frames to avoid timing out while a frame is pending */
896 while ((skb = skb_dequeue(&local->work_skb_queue)))
897 ieee80211_work_rx_queued_mgmt(local, skb);
898
899 mutex_lock(&local->mtx); 135 mutex_lock(&local->mtx);
900 136
901 ieee80211_recalc_idle(local); 137 ieee80211_recalc_idle(local);
@@ -946,24 +182,12 @@ static void ieee80211_work_work(struct work_struct *work)
946 case IEEE80211_WORK_ABORT: 182 case IEEE80211_WORK_ABORT:
947 rma = WORK_ACT_TIMEOUT; 183 rma = WORK_ACT_TIMEOUT;
948 break; 184 break;
949 case IEEE80211_WORK_DIRECT_PROBE:
950 rma = ieee80211_direct_probe(wk);
951 break;
952 case IEEE80211_WORK_AUTH:
953 rma = ieee80211_authenticate(wk);
954 break;
955 case IEEE80211_WORK_ASSOC:
956 rma = ieee80211_associate(wk);
957 break;
958 case IEEE80211_WORK_REMAIN_ON_CHANNEL: 185 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
959 rma = ieee80211_remain_on_channel_timeout(wk); 186 rma = ieee80211_remain_on_channel_timeout(wk);
960 break; 187 break;
961 case IEEE80211_WORK_OFFCHANNEL_TX: 188 case IEEE80211_WORK_OFFCHANNEL_TX:
962 rma = ieee80211_offchannel_tx(wk); 189 rma = ieee80211_offchannel_tx(wk);
963 break; 190 break;
964 case IEEE80211_WORK_ASSOC_BEACON_WAIT:
965 rma = ieee80211_assoc_beacon_wait(wk);
966 break;
967 } 191 }
968 192
969 wk->started = started; 193 wk->started = started;
@@ -1051,7 +275,6 @@ void ieee80211_work_init(struct ieee80211_local *local)
1051 setup_timer(&local->work_timer, ieee80211_work_timer, 275 setup_timer(&local->work_timer, ieee80211_work_timer,
1052 (unsigned long)local); 276 (unsigned long)local);
1053 INIT_WORK(&local->work_work, ieee80211_work_work); 277 INIT_WORK(&local->work_work, ieee80211_work_work);
1054 skb_queue_head_init(&local->work_skb_queue);
1055} 278}
1056 279
1057void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata) 280void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
@@ -1085,43 +308,6 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1085 mutex_unlock(&local->mtx); 308 mutex_unlock(&local->mtx);
1086} 309}
1087 310
1088ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1089 struct sk_buff *skb)
1090{
1091 struct ieee80211_local *local = sdata->local;
1092 struct ieee80211_mgmt *mgmt;
1093 struct ieee80211_work *wk;
1094 u16 fc;
1095
1096 if (skb->len < 24)
1097 return RX_DROP_MONITOR;
1098
1099 mgmt = (struct ieee80211_mgmt *) skb->data;
1100 fc = le16_to_cpu(mgmt->frame_control);
1101
1102 list_for_each_entry_rcu(wk, &local->work_list, list) {
1103 if (sdata != wk->sdata)
1104 continue;
1105 if (compare_ether_addr(wk->filter_ta, mgmt->sa))
1106 continue;
1107 if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
1108 continue;
1109
1110 switch (fc & IEEE80211_FCTL_STYPE) {
1111 case IEEE80211_STYPE_AUTH:
1112 case IEEE80211_STYPE_PROBE_RESP:
1113 case IEEE80211_STYPE_ASSOC_RESP:
1114 case IEEE80211_STYPE_REASSOC_RESP:
1115 case IEEE80211_STYPE_BEACON:
1116 skb_queue_tail(&local->work_skb_queue, skb);
1117 ieee80211_queue_work(&local->hw, &local->work_work);
1118 return RX_QUEUED;
1119 }
1120 }
1121
1122 return RX_CONTINUE;
1123}
1124
1125static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk, 311static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
1126 struct sk_buff *skb) 312 struct sk_buff *skb)
1127{ 313{
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b758350919ff..0ae23c60968c 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -138,6 +138,10 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
138 if (skb->len < hdrlen + MICHAEL_MIC_LEN) 138 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
139 return RX_DROP_UNUSABLE; 139 return RX_DROP_UNUSABLE;
140 140
141 if (skb_linearize(rx->skb))
142 return RX_DROP_UNUSABLE;
143 hdr = (void *)skb->data;
144
141 data = skb->data + hdrlen; 145 data = skb->data + hdrlen;
142 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 146 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
143 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 147 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
@@ -253,6 +257,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
253 if (!rx->sta || skb->len - hdrlen < 12) 257 if (!rx->sta || skb->len - hdrlen < 12)
254 return RX_DROP_UNUSABLE; 258 return RX_DROP_UNUSABLE;
255 259
260 /* it may be possible to optimize this a bit more */
261 if (skb_linearize(rx->skb))
262 return RX_DROP_UNUSABLE;
263 hdr = (void *)skb->data;
264
256 /* 265 /*
257 * Let TKIP code verify IV, but skip decryption. 266 * Let TKIP code verify IV, but skip decryption.
258 * In the case where hardware checks the IV as well, 267 * In the case where hardware checks the IV as well,
@@ -484,6 +493,14 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
484 if (!rx->sta || data_len < 0) 493 if (!rx->sta || data_len < 0)
485 return RX_DROP_UNUSABLE; 494 return RX_DROP_UNUSABLE;
486 495
496 if (status->flag & RX_FLAG_DECRYPTED) {
497 if (!pskb_may_pull(rx->skb, hdrlen + CCMP_HDR_LEN))
498 return RX_DROP_UNUSABLE;
499 } else {
500 if (skb_linearize(rx->skb))
501 return RX_DROP_UNUSABLE;
502 }
503
487 ccmp_hdr2pn(pn, skb->data + hdrlen); 504 ccmp_hdr2pn(pn, skb->data + hdrlen);
488 505
489 queue = rx->security_idx; 506 queue = rx->security_idx;
@@ -509,7 +526,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
509 memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); 526 memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
510 527
511 /* Remove CCMP header and MIC */ 528 /* Remove CCMP header and MIC */
512 skb_trim(skb, skb->len - CCMP_MIC_LEN); 529 if (pskb_trim(skb, skb->len - CCMP_MIC_LEN))
530 return RX_DROP_UNUSABLE;
513 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); 531 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen);
514 skb_pull(skb, CCMP_HDR_LEN); 532 skb_pull(skb, CCMP_HDR_LEN);
515 533
@@ -609,6 +627,8 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
609 if (!ieee80211_is_mgmt(hdr->frame_control)) 627 if (!ieee80211_is_mgmt(hdr->frame_control))
610 return RX_CONTINUE; 628 return RX_CONTINUE;
611 629
630 /* management frames are already linear */
631
612 if (skb->len < 24 + sizeof(*mmie)) 632 if (skb->len < 24 + sizeof(*mmie))
613 return RX_DROP_UNUSABLE; 633 return RX_DROP_UNUSABLE;
614 634
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f8ac4ef0b794..0c6f67e8f2e5 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -103,6 +103,16 @@ config NF_CONNTRACK_EVENTS
103 103
104 If unsure, say `N'. 104 If unsure, say `N'.
105 105
106config NF_CONNTRACK_TIMEOUT
107 bool 'Connection tracking timeout'
108 depends on NETFILTER_ADVANCED
109 help
110 This option enables support for connection tracking timeout
111 extension. This allows you to attach timeout policies to flow
112 via the CT target.
113
114 If unsure, say `N'.
115
106config NF_CONNTRACK_TIMESTAMP 116config NF_CONNTRACK_TIMESTAMP
107 bool 'Connection tracking timestamping' 117 bool 'Connection tracking timestamping'
108 depends on NETFILTER_ADVANCED 118 depends on NETFILTER_ADVANCED
@@ -314,6 +324,17 @@ config NF_CT_NETLINK
314 help 324 help
315 This option enables support for a netlink-based userspace interface 325 This option enables support for a netlink-based userspace interface
316 326
327config NF_CT_NETLINK_TIMEOUT
328 tristate 'Connection tracking timeout tuning via Netlink'
329 select NETFILTER_NETLINK
330 depends on NETFILTER_ADVANCED
331 help
332 This option enables support for connection tracking timeout
333 fine-grain tuning. This allows you to attach specific timeout
334 policies to flows, instead of using the global timeout policy.
335
336 If unsure, say `N'.
337
317endif # NF_CONNTRACK 338endif # NF_CONNTRACK
318 339
319# transparent proxy support 340# transparent proxy support
@@ -524,6 +545,15 @@ config NETFILTER_XT_TARGET_LED
524 For more information on the LEDs available on your system, see 545 For more information on the LEDs available on your system, see
525 Documentation/leds/leds-class.txt 546 Documentation/leds/leds-class.txt
526 547
548config NETFILTER_XT_TARGET_LOG
549 tristate "LOG target support"
550 default m if NETFILTER_ADVANCED=n
551 help
552 This option adds a `LOG' target, which allows you to create rules in
553 any iptables table which records the packet header to the syslog.
554
555 To compile it as a module, choose M here. If unsure, say N.
556
527config NETFILTER_XT_TARGET_MARK 557config NETFILTER_XT_TARGET_MARK
528 tristate '"MARK" target support' 558 tristate '"MARK" target support'
529 depends on NETFILTER_ADVANCED 559 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 40f4c3d636c5..ca3676586f51 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o 1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
2 2
3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o 3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
4nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
4nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o 5nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
5nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o 6nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
6 7
@@ -22,6 +23,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
22 23
23# netlink interface for nf_conntrack 24# netlink interface for nf_conntrack
24obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o 25obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
26obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
25 27
26# connection tracking helpers 28# connection tracking helpers
27nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o 29nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
@@ -58,6 +60,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
58obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 60obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
59obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o 61obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
60obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 62obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
63obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
61obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 64obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
62obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o 65obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
63obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o 66obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index e3e73997c3be..a72a4dff0031 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -442,7 +442,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
442 map->timeout = IPSET_NO_TIMEOUT; 442 map->timeout = IPSET_NO_TIMEOUT;
443 443
444 set->data = map; 444 set->data = map;
445 set->family = AF_INET; 445 set->family = NFPROTO_IPV4;
446 446
447 return true; 447 return true;
448} 448}
@@ -550,7 +550,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
550 .protocol = IPSET_PROTOCOL, 550 .protocol = IPSET_PROTOCOL,
551 .features = IPSET_TYPE_IP, 551 .features = IPSET_TYPE_IP,
552 .dimension = IPSET_DIM_ONE, 552 .dimension = IPSET_DIM_ONE,
553 .family = AF_INET, 553 .family = NFPROTO_IPV4,
554 .revision_min = 0, 554 .revision_min = 0,
555 .revision_max = 0, 555 .revision_max = 0,
556 .create = bitmap_ip_create, 556 .create = bitmap_ip_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 56096f544978..81324c12c5be 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -543,7 +543,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
543 map->timeout = IPSET_NO_TIMEOUT; 543 map->timeout = IPSET_NO_TIMEOUT;
544 544
545 set->data = map; 545 set->data = map;
546 set->family = AF_INET; 546 set->family = NFPROTO_IPV4;
547 547
548 return true; 548 return true;
549} 549}
@@ -623,7 +623,7 @@ static struct ip_set_type bitmap_ipmac_type = {
623 .protocol = IPSET_PROTOCOL, 623 .protocol = IPSET_PROTOCOL,
624 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC, 624 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
625 .dimension = IPSET_DIM_TWO, 625 .dimension = IPSET_DIM_TWO,
626 .family = AF_INET, 626 .family = NFPROTO_IPV4,
627 .revision_min = 0, 627 .revision_min = 0,
628 .revision_max = 0, 628 .revision_max = 0,
629 .create = bitmap_ipmac_create, 629 .create = bitmap_ipmac_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 29ba93bb94be..382ec28ba72e 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -422,7 +422,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
422 map->timeout = IPSET_NO_TIMEOUT; 422 map->timeout = IPSET_NO_TIMEOUT;
423 423
424 set->data = map; 424 set->data = map;
425 set->family = AF_UNSPEC; 425 set->family = NFPROTO_UNSPEC;
426 426
427 return true; 427 return true;
428} 428}
@@ -483,7 +483,7 @@ static struct ip_set_type bitmap_port_type = {
483 .protocol = IPSET_PROTOCOL, 483 .protocol = IPSET_PROTOCOL,
484 .features = IPSET_TYPE_PORT, 484 .features = IPSET_TYPE_PORT,
485 .dimension = IPSET_DIM_ONE, 485 .dimension = IPSET_DIM_ONE,
486 .family = AF_UNSPEC, 486 .family = NFPROTO_UNSPEC,
487 .revision_min = 0, 487 .revision_min = 0,
488 .revision_max = 0, 488 .revision_max = 0,
489 .create = bitmap_port_create, 489 .create = bitmap_port_create,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 32dbf0fa89db..e6c1c9605a58 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -69,7 +69,7 @@ find_set_type(const char *name, u8 family, u8 revision)
69 69
70 list_for_each_entry_rcu(type, &ip_set_type_list, list) 70 list_for_each_entry_rcu(type, &ip_set_type_list, list)
71 if (STREQ(type->name, name) && 71 if (STREQ(type->name, name) &&
72 (type->family == family || type->family == AF_UNSPEC) && 72 (type->family == family || type->family == NFPROTO_UNSPEC) &&
73 revision >= type->revision_min && 73 revision >= type->revision_min &&
74 revision <= type->revision_max) 74 revision <= type->revision_max)
75 return type; 75 return type;
@@ -149,7 +149,7 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
149 rcu_read_lock(); 149 rcu_read_lock();
150 list_for_each_entry_rcu(type, &ip_set_type_list, list) 150 list_for_each_entry_rcu(type, &ip_set_type_list, list)
151 if (STREQ(type->name, name) && 151 if (STREQ(type->name, name) &&
152 (type->family == family || type->family == AF_UNSPEC)) { 152 (type->family == family || type->family == NFPROTO_UNSPEC)) {
153 found = true; 153 found = true;
154 if (type->revision_min < *min) 154 if (type->revision_min < *min)
155 *min = type->revision_min; 155 *min = type->revision_min;
@@ -164,8 +164,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
164 __find_set_type_minmax(name, family, min, max, true); 164 __find_set_type_minmax(name, family, min, max, true);
165} 165}
166 166
167#define family_name(f) ((f) == AF_INET ? "inet" : \ 167#define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
168 (f) == AF_INET6 ? "inet6" : "any") 168 (f) == NFPROTO_IPV6 ? "inet6" : "any")
169 169
170/* Register a set type structure. The type is identified by 170/* Register a set type structure. The type is identified by
171 * the unique triple of name, family and revision. 171 * the unique triple of name, family and revision.
@@ -354,7 +354,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
354 pr_debug("set %s, index %u\n", set->name, index); 354 pr_debug("set %s, index %u\n", set->name, index);
355 355
356 if (opt->dim < set->type->dimension || 356 if (opt->dim < set->type->dimension ||
357 !(opt->family == set->family || set->family == AF_UNSPEC)) 357 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
358 return 0; 358 return 0;
359 359
360 read_lock_bh(&set->lock); 360 read_lock_bh(&set->lock);
@@ -387,7 +387,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
387 pr_debug("set %s, index %u\n", set->name, index); 387 pr_debug("set %s, index %u\n", set->name, index);
388 388
389 if (opt->dim < set->type->dimension || 389 if (opt->dim < set->type->dimension ||
390 !(opt->family == set->family || set->family == AF_UNSPEC)) 390 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
391 return 0; 391 return 0;
392 392
393 write_lock_bh(&set->lock); 393 write_lock_bh(&set->lock);
@@ -410,7 +410,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
410 pr_debug("set %s, index %u\n", set->name, index); 410 pr_debug("set %s, index %u\n", set->name, index);
411 411
412 if (opt->dim < set->type->dimension || 412 if (opt->dim < set->type->dimension ||
413 !(opt->family == set->family || set->family == AF_UNSPEC)) 413 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
414 return 0; 414 return 0;
415 415
416 write_lock_bh(&set->lock); 416 write_lock_bh(&set->lock);
@@ -575,7 +575,7 @@ start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
575 return NULL; 575 return NULL;
576 576
577 nfmsg = nlmsg_data(nlh); 577 nfmsg = nlmsg_data(nlh);
578 nfmsg->nfgen_family = AF_INET; 578 nfmsg->nfgen_family = NFPROTO_IPV4;
579 nfmsg->version = NFNETLINK_V0; 579 nfmsg->version = NFNETLINK_V0;
580 nfmsg->res_id = 0; 580 nfmsg->res_id = 0;
581 581
@@ -1162,9 +1162,13 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
1162 if (unlikely(protocol_failed(attr))) 1162 if (unlikely(protocol_failed(attr)))
1163 return -IPSET_ERR_PROTOCOL; 1163 return -IPSET_ERR_PROTOCOL;
1164 1164
1165 return netlink_dump_start(ctnl, skb, nlh, 1165 {
1166 ip_set_dump_start, 1166 struct netlink_dump_control c = {
1167 ip_set_dump_done, 0); 1167 .dump = ip_set_dump_start,
1168 .done = ip_set_dump_done,
1169 };
1170 return netlink_dump_start(ctnl, skb, nlh, &c);
1171 }
1168} 1172}
1169 1173
1170/* Add, del and test */ 1174/* Add, del and test */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 1f03556666f4..6fdf88ae2353 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -136,10 +136,10 @@ ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
136 u8 proto; 136 u8 proto;
137 137
138 switch (pf) { 138 switch (pf) {
139 case AF_INET: 139 case NFPROTO_IPV4:
140 ret = ip_set_get_ip4_port(skb, src, port, &proto); 140 ret = ip_set_get_ip4_port(skb, src, port, &proto);
141 break; 141 break;
142 case AF_INET6: 142 case NFPROTO_IPV6:
143 ret = ip_set_get_ip6_port(skb, src, port, &proto); 143 ret = ip_set_get_ip6_port(skb, src, port, &proto);
144 break; 144 break;
145 default: 145 default:
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 4015fcaf87bc..5139dea6019e 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -366,11 +366,11 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
366 u8 netmask, hbits; 366 u8 netmask, hbits;
367 struct ip_set_hash *h; 367 struct ip_set_hash *h;
368 368
369 if (!(set->family == AF_INET || set->family == AF_INET6)) 369 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
370 return -IPSET_ERR_INVALID_FAMILY; 370 return -IPSET_ERR_INVALID_FAMILY;
371 netmask = set->family == AF_INET ? 32 : 128; 371 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
372 pr_debug("Create set %s with family %s\n", 372 pr_debug("Create set %s with family %s\n",
373 set->name, set->family == AF_INET ? "inet" : "inet6"); 373 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
374 374
375 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 375 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
376 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || 376 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
@@ -389,8 +389,8 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
389 if (tb[IPSET_ATTR_NETMASK]) { 389 if (tb[IPSET_ATTR_NETMASK]) {
390 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); 390 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
391 391
392 if ((set->family == AF_INET && netmask > 32) || 392 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
393 (set->family == AF_INET6 && netmask > 128) || 393 (set->family == NFPROTO_IPV6 && netmask > 128) ||
394 netmask == 0) 394 netmask == 0)
395 return -IPSET_ERR_INVALID_NETMASK; 395 return -IPSET_ERR_INVALID_NETMASK;
396 } 396 }
@@ -419,15 +419,15 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
419 if (tb[IPSET_ATTR_TIMEOUT]) { 419 if (tb[IPSET_ATTR_TIMEOUT]) {
420 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 420 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
421 421
422 set->variant = set->family == AF_INET 422 set->variant = set->family == NFPROTO_IPV4
423 ? &hash_ip4_tvariant : &hash_ip6_tvariant; 423 ? &hash_ip4_tvariant : &hash_ip6_tvariant;
424 424
425 if (set->family == AF_INET) 425 if (set->family == NFPROTO_IPV4)
426 hash_ip4_gc_init(set); 426 hash_ip4_gc_init(set);
427 else 427 else
428 hash_ip6_gc_init(set); 428 hash_ip6_gc_init(set);
429 } else { 429 } else {
430 set->variant = set->family == AF_INET 430 set->variant = set->family == NFPROTO_IPV4
431 ? &hash_ip4_variant : &hash_ip6_variant; 431 ? &hash_ip4_variant : &hash_ip6_variant;
432 } 432 }
433 433
@@ -443,7 +443,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
443 .protocol = IPSET_PROTOCOL, 443 .protocol = IPSET_PROTOCOL,
444 .features = IPSET_TYPE_IP, 444 .features = IPSET_TYPE_IP,
445 .dimension = IPSET_DIM_ONE, 445 .dimension = IPSET_DIM_ONE,
446 .family = AF_UNSPEC, 446 .family = NFPROTO_UNSPEC,
447 .revision_min = 0, 447 .revision_min = 0,
448 .revision_max = 0, 448 .revision_max = 0,
449 .create = hash_ip_create, 449 .create = hash_ip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 37d667e3f6f8..9c27e249c171 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -450,7 +450,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
450 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 450 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
451 u8 hbits; 451 u8 hbits;
452 452
453 if (!(set->family == AF_INET || set->family == AF_INET6)) 453 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
454 return -IPSET_ERR_INVALID_FAMILY; 454 return -IPSET_ERR_INVALID_FAMILY;
455 455
456 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 456 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -490,15 +490,15 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
490 if (tb[IPSET_ATTR_TIMEOUT]) { 490 if (tb[IPSET_ATTR_TIMEOUT]) {
491 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 491 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
492 492
493 set->variant = set->family == AF_INET 493 set->variant = set->family == NFPROTO_IPV4
494 ? &hash_ipport4_tvariant : &hash_ipport6_tvariant; 494 ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
495 495
496 if (set->family == AF_INET) 496 if (set->family == NFPROTO_IPV4)
497 hash_ipport4_gc_init(set); 497 hash_ipport4_gc_init(set);
498 else 498 else
499 hash_ipport6_gc_init(set); 499 hash_ipport6_gc_init(set);
500 } else { 500 } else {
501 set->variant = set->family == AF_INET 501 set->variant = set->family == NFPROTO_IPV4
502 ? &hash_ipport4_variant : &hash_ipport6_variant; 502 ? &hash_ipport4_variant : &hash_ipport6_variant;
503 } 503 }
504 504
@@ -514,7 +514,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
514 .protocol = IPSET_PROTOCOL, 514 .protocol = IPSET_PROTOCOL,
515 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 515 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
516 .dimension = IPSET_DIM_TWO, 516 .dimension = IPSET_DIM_TWO,
517 .family = AF_UNSPEC, 517 .family = NFPROTO_UNSPEC,
518 .revision_min = 0, 518 .revision_min = 0,
519 .revision_max = 1, /* SCTP and UDPLITE support added */ 519 .revision_max = 1, /* SCTP and UDPLITE support added */
520 .create = hash_ipport_create, 520 .create = hash_ipport_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index e69e2718fbe1..9134057c0728 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -468,7 +468,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
468 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 468 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
469 u8 hbits; 469 u8 hbits;
470 470
471 if (!(set->family == AF_INET || set->family == AF_INET6)) 471 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
472 return -IPSET_ERR_INVALID_FAMILY; 472 return -IPSET_ERR_INVALID_FAMILY;
473 473
474 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 474 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -508,15 +508,15 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
508 if (tb[IPSET_ATTR_TIMEOUT]) { 508 if (tb[IPSET_ATTR_TIMEOUT]) {
509 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 509 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
510 510
511 set->variant = set->family == AF_INET 511 set->variant = set->family == NFPROTO_IPV4
512 ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant; 512 ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
513 513
514 if (set->family == AF_INET) 514 if (set->family == NFPROTO_IPV4)
515 hash_ipportip4_gc_init(set); 515 hash_ipportip4_gc_init(set);
516 else 516 else
517 hash_ipportip6_gc_init(set); 517 hash_ipportip6_gc_init(set);
518 } else { 518 } else {
519 set->variant = set->family == AF_INET 519 set->variant = set->family == NFPROTO_IPV4
520 ? &hash_ipportip4_variant : &hash_ipportip6_variant; 520 ? &hash_ipportip4_variant : &hash_ipportip6_variant;
521 } 521 }
522 522
@@ -532,7 +532,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
532 .protocol = IPSET_PROTOCOL, 532 .protocol = IPSET_PROTOCOL,
533 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 533 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
534 .dimension = IPSET_DIM_THREE, 534 .dimension = IPSET_DIM_THREE,
535 .family = AF_UNSPEC, 535 .family = NFPROTO_UNSPEC,
536 .revision_min = 0, 536 .revision_min = 0,
537 .revision_max = 1, /* SCTP and UDPLITE support added */ 537 .revision_max = 1, /* SCTP and UDPLITE support added */
538 .create = hash_ipportip_create, 538 .create = hash_ipportip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 64199b4e93c9..5d05e6969862 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -41,12 +41,19 @@ hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
41 41
42/* The type variant functions: IPv4 */ 42/* The type variant functions: IPv4 */
43 43
44/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
45 * However this way we have to store internally cidr - 1,
46 * dancing back and forth.
47 */
48#define IP_SET_HASH_WITH_NETS_PACKED
49
44/* Member elements without timeout */ 50/* Member elements without timeout */
45struct hash_ipportnet4_elem { 51struct hash_ipportnet4_elem {
46 __be32 ip; 52 __be32 ip;
47 __be32 ip2; 53 __be32 ip2;
48 __be16 port; 54 __be16 port;
49 u8 cidr; 55 u8 cidr:7;
56 u8 nomatch:1;
50 u8 proto; 57 u8 proto;
51}; 58};
52 59
@@ -55,7 +62,8 @@ struct hash_ipportnet4_telem {
55 __be32 ip; 62 __be32 ip;
56 __be32 ip2; 63 __be32 ip2;
57 __be16 port; 64 __be16 port;
58 u8 cidr; 65 u8 cidr:7;
66 u8 nomatch:1;
59 u8 proto; 67 u8 proto;
60 unsigned long timeout; 68 unsigned long timeout;
61}; 69};
@@ -86,10 +94,22 @@ hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
86} 94}
87 95
88static inline void 96static inline void
97hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
98{
99 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
100}
101
102static inline bool
103hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
104{
105 return !elem->nomatch;
106}
107
108static inline void
89hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) 109hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
90{ 110{
91 elem->ip2 &= ip_set_netmask(cidr); 111 elem->ip2 &= ip_set_netmask(cidr);
92 elem->cidr = cidr; 112 elem->cidr = cidr - 1;
93} 113}
94 114
95static inline void 115static inline void
@@ -102,11 +122,15 @@ static bool
102hash_ipportnet4_data_list(struct sk_buff *skb, 122hash_ipportnet4_data_list(struct sk_buff *skb,
103 const struct hash_ipportnet4_elem *data) 123 const struct hash_ipportnet4_elem *data)
104{ 124{
125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
126
105 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 127 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
106 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); 128 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
107 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 129 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
108 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 130 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
109 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 131 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
132 if (flags)
133 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
110 return 0; 134 return 0;
111 135
112nla_put_failure: 136nla_put_failure:
@@ -119,14 +143,17 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
119{ 143{
120 const struct hash_ipportnet4_telem *tdata = 144 const struct hash_ipportnet4_telem *tdata =
121 (const struct hash_ipportnet4_telem *)data; 145 (const struct hash_ipportnet4_telem *)data;
146 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
122 147
123 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 148 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
124 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); 149 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
125 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 150 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
126 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 151 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
127 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 152 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
128 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 153 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
129 htonl(ip_set_timeout_get(tdata->timeout))); 154 htonl(ip_set_timeout_get(tdata->timeout)));
155 if (flags)
156 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
130 157
131 return 0; 158 return 0;
132 159
@@ -158,13 +185,11 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
158 const struct ip_set_hash *h = set->data; 185 const struct ip_set_hash *h = set->data;
159 ipset_adtfn adtfn = set->variant->adt[adt]; 186 ipset_adtfn adtfn = set->variant->adt[adt];
160 struct hash_ipportnet4_elem data = { 187 struct hash_ipportnet4_elem data = {
161 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 188 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
162 }; 189 };
163 190
164 if (data.cidr == 0)
165 return -EINVAL;
166 if (adt == IPSET_TEST) 191 if (adt == IPSET_TEST)
167 data.cidr = HOST_MASK; 192 data.cidr = HOST_MASK - 1;
168 193
169 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 194 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
170 &data.port, &data.proto)) 195 &data.port, &data.proto))
@@ -172,7 +197,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
172 197
173 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); 198 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
174 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); 199 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
175 data.ip2 &= ip_set_netmask(data.cidr); 200 data.ip2 &= ip_set_netmask(data.cidr + 1);
176 201
177 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 202 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
178} 203}
@@ -183,17 +208,19 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
183{ 208{
184 const struct ip_set_hash *h = set->data; 209 const struct ip_set_hash *h = set->data;
185 ipset_adtfn adtfn = set->variant->adt[adt]; 210 ipset_adtfn adtfn = set->variant->adt[adt];
186 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; 211 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
187 u32 ip, ip_to = 0, p = 0, port, port_to; 212 u32 ip, ip_to = 0, p = 0, port, port_to;
188 u32 ip2_from = 0, ip2_to, ip2_last, ip2; 213 u32 ip2_from = 0, ip2_to, ip2_last, ip2;
189 u32 timeout = h->timeout; 214 u32 timeout = h->timeout;
190 bool with_ports = false; 215 bool with_ports = false;
216 u8 cidr;
191 int ret; 217 int ret;
192 218
193 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 219 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
194 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 220 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
195 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 221 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
196 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 222 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
223 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
197 return -IPSET_ERR_PROTOCOL; 224 return -IPSET_ERR_PROTOCOL;
198 225
199 if (tb[IPSET_ATTR_LINENO]) 226 if (tb[IPSET_ATTR_LINENO])
@@ -208,9 +235,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
208 return ret; 235 return ret;
209 236
210 if (tb[IPSET_ATTR_CIDR2]) { 237 if (tb[IPSET_ATTR_CIDR2]) {
211 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 238 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
212 if (!data.cidr) 239 if (!cidr || cidr > HOST_MASK)
213 return -IPSET_ERR_INVALID_CIDR; 240 return -IPSET_ERR_INVALID_CIDR;
241 data.cidr = cidr - 1;
214 } 242 }
215 243
216 if (tb[IPSET_ATTR_PORT]) 244 if (tb[IPSET_ATTR_PORT])
@@ -236,12 +264,18 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
236 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 264 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
237 } 265 }
238 266
267 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
268 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
269 if (cadt_flags & IPSET_FLAG_NOMATCH)
270 flags |= (cadt_flags << 16);
271 }
272
239 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; 273 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
240 if (adt == IPSET_TEST || 274 if (adt == IPSET_TEST ||
241 !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || 275 !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
242 tb[IPSET_ATTR_IP2_TO])) { 276 tb[IPSET_ATTR_IP2_TO])) {
243 data.ip = htonl(ip); 277 data.ip = htonl(ip);
244 data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr)); 278 data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1));
245 ret = adtfn(set, &data, timeout, flags); 279 ret = adtfn(set, &data, timeout, flags);
246 return ip_set_eexist(ret, flags) ? 0 : ret; 280 return ip_set_eexist(ret, flags) ? 0 : ret;
247 } 281 }
@@ -275,7 +309,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
275 if (ip2_from + UINT_MAX == ip2_to) 309 if (ip2_from + UINT_MAX == ip2_to)
276 return -IPSET_ERR_HASH_RANGE; 310 return -IPSET_ERR_HASH_RANGE;
277 } else { 311 } else {
278 ip_set_mask_from_to(ip2_from, ip2_to, data.cidr); 312 ip_set_mask_from_to(ip2_from, ip2_to, data.cidr + 1);
279 } 313 }
280 314
281 if (retried) 315 if (retried)
@@ -290,7 +324,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
290 while (!after(ip2, ip2_to)) { 324 while (!after(ip2, ip2_to)) {
291 data.ip2 = htonl(ip2); 325 data.ip2 = htonl(ip2);
292 ip2_last = ip_set_range_to_cidr(ip2, ip2_to, 326 ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
293 &data.cidr); 327 &cidr);
328 data.cidr = cidr - 1;
294 ret = adtfn(set, &data, timeout, flags); 329 ret = adtfn(set, &data, timeout, flags);
295 330
296 if (ret && !ip_set_eexist(ret, flags)) 331 if (ret && !ip_set_eexist(ret, flags))
@@ -321,7 +356,8 @@ struct hash_ipportnet6_elem {
321 union nf_inet_addr ip; 356 union nf_inet_addr ip;
322 union nf_inet_addr ip2; 357 union nf_inet_addr ip2;
323 __be16 port; 358 __be16 port;
324 u8 cidr; 359 u8 cidr:7;
360 u8 nomatch:1;
325 u8 proto; 361 u8 proto;
326}; 362};
327 363
@@ -329,7 +365,8 @@ struct hash_ipportnet6_telem {
329 union nf_inet_addr ip; 365 union nf_inet_addr ip;
330 union nf_inet_addr ip2; 366 union nf_inet_addr ip2;
331 __be16 port; 367 __be16 port;
332 u8 cidr; 368 u8 cidr:7;
369 u8 nomatch:1;
333 u8 proto; 370 u8 proto;
334 unsigned long timeout; 371 unsigned long timeout;
335}; 372};
@@ -360,6 +397,18 @@ hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
360} 397}
361 398
362static inline void 399static inline void
400hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
401{
402 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
403}
404
405static inline bool
406hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
407{
408 return !elem->nomatch;
409}
410
411static inline void
363hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem) 412hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
364{ 413{
365 elem->proto = 0; 414 elem->proto = 0;
@@ -378,18 +427,22 @@ static inline void
378hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) 427hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
379{ 428{
380 ip6_netmask(&elem->ip2, cidr); 429 ip6_netmask(&elem->ip2, cidr);
381 elem->cidr = cidr; 430 elem->cidr = cidr - 1;
382} 431}
383 432
384static bool 433static bool
385hash_ipportnet6_data_list(struct sk_buff *skb, 434hash_ipportnet6_data_list(struct sk_buff *skb,
386 const struct hash_ipportnet6_elem *data) 435 const struct hash_ipportnet6_elem *data)
387{ 436{
437 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
438
388 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 439 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
389 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 440 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
390 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 441 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
391 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 442 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
392 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 443 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
444 if (flags)
445 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
393 return 0; 446 return 0;
394 447
395nla_put_failure: 448nla_put_failure:
@@ -402,14 +455,17 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
402{ 455{
403 const struct hash_ipportnet6_telem *e = 456 const struct hash_ipportnet6_telem *e =
404 (const struct hash_ipportnet6_telem *)data; 457 (const struct hash_ipportnet6_telem *)data;
458 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
405 459
406 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 460 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
407 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 461 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
408 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 462 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
409 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr); 463 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
410 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 464 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
411 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 465 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
412 htonl(ip_set_timeout_get(e->timeout))); 466 htonl(ip_set_timeout_get(e->timeout)));
467 if (flags)
468 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
413 return 0; 469 return 0;
414 470
415nla_put_failure: 471nla_put_failure:
@@ -438,13 +494,11 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
438 const struct ip_set_hash *h = set->data; 494 const struct ip_set_hash *h = set->data;
439 ipset_adtfn adtfn = set->variant->adt[adt]; 495 ipset_adtfn adtfn = set->variant->adt[adt];
440 struct hash_ipportnet6_elem data = { 496 struct hash_ipportnet6_elem data = {
441 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 497 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
442 }; 498 };
443 499
444 if (data.cidr == 0)
445 return -EINVAL;
446 if (adt == IPSET_TEST) 500 if (adt == IPSET_TEST)
447 data.cidr = HOST_MASK; 501 data.cidr = HOST_MASK - 1;
448 502
449 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 503 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
450 &data.port, &data.proto)) 504 &data.port, &data.proto))
@@ -452,7 +506,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
452 506
453 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 507 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
454 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); 508 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
455 ip6_netmask(&data.ip2, data.cidr); 509 ip6_netmask(&data.ip2, data.cidr + 1);
456 510
457 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 511 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
458} 512}
@@ -463,16 +517,18 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
463{ 517{
464 const struct ip_set_hash *h = set->data; 518 const struct ip_set_hash *h = set->data;
465 ipset_adtfn adtfn = set->variant->adt[adt]; 519 ipset_adtfn adtfn = set->variant->adt[adt];
466 struct hash_ipportnet6_elem data = { .cidr = HOST_MASK }; 520 struct hash_ipportnet6_elem data = { .cidr = HOST_MASK - 1 };
467 u32 port, port_to; 521 u32 port, port_to;
468 u32 timeout = h->timeout; 522 u32 timeout = h->timeout;
469 bool with_ports = false; 523 bool with_ports = false;
524 u8 cidr;
470 int ret; 525 int ret;
471 526
472 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 527 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
473 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 528 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
474 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 529 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
475 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || 530 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
531 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
476 tb[IPSET_ATTR_IP_TO] || 532 tb[IPSET_ATTR_IP_TO] ||
477 tb[IPSET_ATTR_CIDR])) 533 tb[IPSET_ATTR_CIDR]))
478 return -IPSET_ERR_PROTOCOL; 534 return -IPSET_ERR_PROTOCOL;
@@ -490,13 +546,14 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
490 if (ret) 546 if (ret)
491 return ret; 547 return ret;
492 548
493 if (tb[IPSET_ATTR_CIDR2]) 549 if (tb[IPSET_ATTR_CIDR2]) {
494 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 550 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
495 551 if (!cidr || cidr > HOST_MASK)
496 if (!data.cidr) 552 return -IPSET_ERR_INVALID_CIDR;
497 return -IPSET_ERR_INVALID_CIDR; 553 data.cidr = cidr - 1;
554 }
498 555
499 ip6_netmask(&data.ip2, data.cidr); 556 ip6_netmask(&data.ip2, data.cidr + 1);
500 557
501 if (tb[IPSET_ATTR_PORT]) 558 if (tb[IPSET_ATTR_PORT])
502 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); 559 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -521,6 +578,12 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
521 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 578 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
522 } 579 }
523 580
581 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
582 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
583 if (cadt_flags & IPSET_FLAG_NOMATCH)
584 flags |= (cadt_flags << 16);
585 }
586
524 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 587 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
525 ret = adtfn(set, &data, timeout, flags); 588 ret = adtfn(set, &data, timeout, flags);
526 return ip_set_eexist(ret, flags) ? 0 : ret; 589 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -554,7 +617,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
554 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 617 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
555 u8 hbits; 618 u8 hbits;
556 619
557 if (!(set->family == AF_INET || set->family == AF_INET6)) 620 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
558 return -IPSET_ERR_INVALID_FAMILY; 621 return -IPSET_ERR_INVALID_FAMILY;
559 622
560 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 623 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -573,7 +636,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
573 636
574 h = kzalloc(sizeof(*h) 637 h = kzalloc(sizeof(*h)
575 + sizeof(struct ip_set_hash_nets) 638 + sizeof(struct ip_set_hash_nets)
576 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 639 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
577 if (!h) 640 if (!h)
578 return -ENOMEM; 641 return -ENOMEM;
579 642
@@ -596,16 +659,16 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
596 if (tb[IPSET_ATTR_TIMEOUT]) { 659 if (tb[IPSET_ATTR_TIMEOUT]) {
597 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 660 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
598 661
599 set->variant = set->family == AF_INET 662 set->variant = set->family == NFPROTO_IPV4
600 ? &hash_ipportnet4_tvariant 663 ? &hash_ipportnet4_tvariant
601 : &hash_ipportnet6_tvariant; 664 : &hash_ipportnet6_tvariant;
602 665
603 if (set->family == AF_INET) 666 if (set->family == NFPROTO_IPV4)
604 hash_ipportnet4_gc_init(set); 667 hash_ipportnet4_gc_init(set);
605 else 668 else
606 hash_ipportnet6_gc_init(set); 669 hash_ipportnet6_gc_init(set);
607 } else { 670 } else {
608 set->variant = set->family == AF_INET 671 set->variant = set->family == NFPROTO_IPV4
609 ? &hash_ipportnet4_variant : &hash_ipportnet6_variant; 672 ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
610 } 673 }
611 674
@@ -621,10 +684,11 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
621 .protocol = IPSET_PROTOCOL, 684 .protocol = IPSET_PROTOCOL,
622 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 685 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
623 .dimension = IPSET_DIM_THREE, 686 .dimension = IPSET_DIM_THREE,
624 .family = AF_UNSPEC, 687 .family = NFPROTO_UNSPEC,
625 .revision_min = 0, 688 .revision_min = 0,
626 /* 1 SCTP and UDPLITE support added */ 689 /* 1 SCTP and UDPLITE support added */
627 .revision_max = 2, /* Range as input support for IPv4 added */ 690 /* 2 Range as input support for IPv4 added */
691 .revision_max = 3, /* nomatch flag support added */
628 .create = hash_ipportnet_create, 692 .create = hash_ipportnet_create,
629 .create_policy = { 693 .create_policy = {
630 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 694 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -643,6 +707,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
643 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 707 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
644 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, 708 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
645 [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, 709 [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
710 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
646 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 711 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
647 [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, 712 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
648 }, 713 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 28988196775e..7c3d945517cf 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -43,7 +43,7 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
43struct hash_net4_elem { 43struct hash_net4_elem {
44 __be32 ip; 44 __be32 ip;
45 u16 padding0; 45 u16 padding0;
46 u8 padding1; 46 u8 nomatch;
47 u8 cidr; 47 u8 cidr;
48}; 48};
49 49
@@ -51,7 +51,7 @@ struct hash_net4_elem {
51struct hash_net4_telem { 51struct hash_net4_telem {
52 __be32 ip; 52 __be32 ip;
53 u16 padding0; 53 u16 padding0;
54 u8 padding1; 54 u8 nomatch;
55 u8 cidr; 55 u8 cidr;
56 unsigned long timeout; 56 unsigned long timeout;
57}; 57};
@@ -61,7 +61,8 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1,
61 const struct hash_net4_elem *ip2, 61 const struct hash_net4_elem *ip2,
62 u32 *multi) 62 u32 *multi)
63{ 63{
64 return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr; 64 return ip1->ip == ip2->ip &&
65 ip1->cidr == ip2->cidr;
65} 66}
66 67
67static inline bool 68static inline bool
@@ -76,6 +77,19 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
76{ 77{
77 dst->ip = src->ip; 78 dst->ip = src->ip;
78 dst->cidr = src->cidr; 79 dst->cidr = src->cidr;
80 dst->nomatch = src->nomatch;
81}
82
83static inline void
84hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
85{
86 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
87}
88
89static inline bool
90hash_net4_data_match(const struct hash_net4_elem *elem)
91{
92 return !elem->nomatch;
79} 93}
80 94
81static inline void 95static inline void
@@ -95,8 +109,12 @@ hash_net4_data_zero_out(struct hash_net4_elem *elem)
95static bool 109static bool
96hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) 110hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
97{ 111{
112 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
113
98 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 114 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
99 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 115 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
116 if (flags)
117 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
100 return 0; 118 return 0;
101 119
102nla_put_failure: 120nla_put_failure:
@@ -108,11 +126,14 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
108{ 126{
109 const struct hash_net4_telem *tdata = 127 const struct hash_net4_telem *tdata =
110 (const struct hash_net4_telem *)data; 128 (const struct hash_net4_telem *)data;
129 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
111 130
112 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 131 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
113 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); 132 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
114 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 133 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
115 htonl(ip_set_timeout_get(tdata->timeout))); 134 htonl(ip_set_timeout_get(tdata->timeout)));
135 if (flags)
136 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
116 137
117 return 0; 138 return 0;
118 139
@@ -167,7 +188,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
167 int ret; 188 int ret;
168 189
169 if (unlikely(!tb[IPSET_ATTR_IP] || 190 if (unlikely(!tb[IPSET_ATTR_IP] ||
170 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 191 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
192 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
171 return -IPSET_ERR_PROTOCOL; 193 return -IPSET_ERR_PROTOCOL;
172 194
173 if (tb[IPSET_ATTR_LINENO]) 195 if (tb[IPSET_ATTR_LINENO])
@@ -179,7 +201,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
179 201
180 if (tb[IPSET_ATTR_CIDR]) { 202 if (tb[IPSET_ATTR_CIDR]) {
181 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 203 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
182 if (!data.cidr) 204 if (!data.cidr || data.cidr > HOST_MASK)
183 return -IPSET_ERR_INVALID_CIDR; 205 return -IPSET_ERR_INVALID_CIDR;
184 } 206 }
185 207
@@ -189,6 +211,12 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
189 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 211 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
190 } 212 }
191 213
214 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
215 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
216 if (cadt_flags & IPSET_FLAG_NOMATCH)
217 flags |= (cadt_flags << 16);
218 }
219
192 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 220 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
193 data.ip = htonl(ip & ip_set_hostmask(data.cidr)); 221 data.ip = htonl(ip & ip_set_hostmask(data.cidr));
194 ret = adtfn(set, &data, timeout, flags); 222 ret = adtfn(set, &data, timeout, flags);
@@ -236,14 +264,14 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
236struct hash_net6_elem { 264struct hash_net6_elem {
237 union nf_inet_addr ip; 265 union nf_inet_addr ip;
238 u16 padding0; 266 u16 padding0;
239 u8 padding1; 267 u8 nomatch;
240 u8 cidr; 268 u8 cidr;
241}; 269};
242 270
243struct hash_net6_telem { 271struct hash_net6_telem {
244 union nf_inet_addr ip; 272 union nf_inet_addr ip;
245 u16 padding0; 273 u16 padding0;
246 u8 padding1; 274 u8 nomatch;
247 u8 cidr; 275 u8 cidr;
248 unsigned long timeout; 276 unsigned long timeout;
249}; 277};
@@ -269,6 +297,19 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
269{ 297{
270 dst->ip.in6 = src->ip.in6; 298 dst->ip.in6 = src->ip.in6;
271 dst->cidr = src->cidr; 299 dst->cidr = src->cidr;
300 dst->nomatch = src->nomatch;
301}
302
303static inline void
304hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
305{
306 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
307}
308
309static inline bool
310hash_net6_data_match(const struct hash_net6_elem *elem)
311{
312 return !elem->nomatch;
272} 313}
273 314
274static inline void 315static inline void
@@ -296,8 +337,12 @@ hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
296static bool 337static bool
297hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) 338hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
298{ 339{
340 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
341
299 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 342 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
300 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 343 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
344 if (flags)
345 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
301 return 0; 346 return 0;
302 347
303nla_put_failure: 348nla_put_failure:
@@ -309,11 +354,14 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
309{ 354{
310 const struct hash_net6_telem *e = 355 const struct hash_net6_telem *e =
311 (const struct hash_net6_telem *)data; 356 (const struct hash_net6_telem *)data;
357 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
312 358
313 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 359 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
314 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); 360 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
315 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 361 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
316 htonl(ip_set_timeout_get(e->timeout))); 362 htonl(ip_set_timeout_get(e->timeout)));
363 if (flags)
364 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
317 return 0; 365 return 0;
318 366
319nla_put_failure: 367nla_put_failure:
@@ -366,7 +414,8 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
366 int ret; 414 int ret;
367 415
368 if (unlikely(!tb[IPSET_ATTR_IP] || 416 if (unlikely(!tb[IPSET_ATTR_IP] ||
369 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 417 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
418 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
370 return -IPSET_ERR_PROTOCOL; 419 return -IPSET_ERR_PROTOCOL;
371 if (unlikely(tb[IPSET_ATTR_IP_TO])) 420 if (unlikely(tb[IPSET_ATTR_IP_TO]))
372 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; 421 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -381,7 +430,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
381 if (tb[IPSET_ATTR_CIDR]) 430 if (tb[IPSET_ATTR_CIDR])
382 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 431 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
383 432
384 if (!data.cidr) 433 if (!data.cidr || data.cidr > HOST_MASK)
385 return -IPSET_ERR_INVALID_CIDR; 434 return -IPSET_ERR_INVALID_CIDR;
386 435
387 ip6_netmask(&data.ip, data.cidr); 436 ip6_netmask(&data.ip, data.cidr);
@@ -392,6 +441,12 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
392 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 441 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
393 } 442 }
394 443
444 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
445 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
446 if (cadt_flags & IPSET_FLAG_NOMATCH)
447 flags |= (cadt_flags << 16);
448 }
449
395 ret = adtfn(set, &data, timeout, flags); 450 ret = adtfn(set, &data, timeout, flags);
396 451
397 return ip_set_eexist(ret, flags) ? 0 : ret; 452 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -406,7 +461,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
406 struct ip_set_hash *h; 461 struct ip_set_hash *h;
407 u8 hbits; 462 u8 hbits;
408 463
409 if (!(set->family == AF_INET || set->family == AF_INET6)) 464 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
410 return -IPSET_ERR_INVALID_FAMILY; 465 return -IPSET_ERR_INVALID_FAMILY;
411 466
412 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 467 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -425,7 +480,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
425 480
426 h = kzalloc(sizeof(*h) 481 h = kzalloc(sizeof(*h)
427 + sizeof(struct ip_set_hash_nets) 482 + sizeof(struct ip_set_hash_nets)
428 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 483 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
429 if (!h) 484 if (!h)
430 return -ENOMEM; 485 return -ENOMEM;
431 486
@@ -448,15 +503,15 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
448 if (tb[IPSET_ATTR_TIMEOUT]) { 503 if (tb[IPSET_ATTR_TIMEOUT]) {
449 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 504 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
450 505
451 set->variant = set->family == AF_INET 506 set->variant = set->family == NFPROTO_IPV4
452 ? &hash_net4_tvariant : &hash_net6_tvariant; 507 ? &hash_net4_tvariant : &hash_net6_tvariant;
453 508
454 if (set->family == AF_INET) 509 if (set->family == NFPROTO_IPV4)
455 hash_net4_gc_init(set); 510 hash_net4_gc_init(set);
456 else 511 else
457 hash_net6_gc_init(set); 512 hash_net6_gc_init(set);
458 } else { 513 } else {
459 set->variant = set->family == AF_INET 514 set->variant = set->family == NFPROTO_IPV4
460 ? &hash_net4_variant : &hash_net6_variant; 515 ? &hash_net4_variant : &hash_net6_variant;
461 } 516 }
462 517
@@ -472,9 +527,10 @@ static struct ip_set_type hash_net_type __read_mostly = {
472 .protocol = IPSET_PROTOCOL, 527 .protocol = IPSET_PROTOCOL,
473 .features = IPSET_TYPE_IP, 528 .features = IPSET_TYPE_IP,
474 .dimension = IPSET_DIM_ONE, 529 .dimension = IPSET_DIM_ONE,
475 .family = AF_UNSPEC, 530 .family = NFPROTO_UNSPEC,
476 .revision_min = 0, 531 .revision_min = 0,
477 .revision_max = 1, /* Range as input support for IPv4 added */ 532 /* = 1 Range as input support for IPv4 added */
533 .revision_max = 2, /* nomatch flag support added */
478 .create = hash_net_create, 534 .create = hash_net_create,
479 .create_policy = { 535 .create_policy = {
480 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 536 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -488,6 +544,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
488 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, 544 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
489 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 545 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
490 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 546 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
547 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
491 }, 548 },
492 .me = THIS_MODULE, 549 .me = THIS_MODULE,
493}; 550};
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index e13095deb50d..f24037ff4322 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -163,7 +163,8 @@ struct hash_netiface4_elem_hashed {
163 __be32 ip; 163 __be32 ip;
164 u8 physdev; 164 u8 physdev;
165 u8 cidr; 165 u8 cidr;
166 u16 padding; 166 u8 nomatch;
167 u8 padding;
167}; 168};
168 169
169#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) 170#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
@@ -173,7 +174,8 @@ struct hash_netiface4_elem {
173 __be32 ip; 174 __be32 ip;
174 u8 physdev; 175 u8 physdev;
175 u8 cidr; 176 u8 cidr;
176 u16 padding; 177 u8 nomatch;
178 u8 padding;
177 const char *iface; 179 const char *iface;
178}; 180};
179 181
@@ -182,7 +184,8 @@ struct hash_netiface4_telem {
182 __be32 ip; 184 __be32 ip;
183 u8 physdev; 185 u8 physdev;
184 u8 cidr; 186 u8 cidr;
185 u16 padding; 187 u8 nomatch;
188 u8 padding;
186 const char *iface; 189 const char *iface;
187 unsigned long timeout; 190 unsigned long timeout;
188}; 191};
@@ -207,11 +210,25 @@ hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
207 210
208static inline void 211static inline void
209hash_netiface4_data_copy(struct hash_netiface4_elem *dst, 212hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
210 const struct hash_netiface4_elem *src) { 213 const struct hash_netiface4_elem *src)
214{
211 dst->ip = src->ip; 215 dst->ip = src->ip;
212 dst->cidr = src->cidr; 216 dst->cidr = src->cidr;
213 dst->physdev = src->physdev; 217 dst->physdev = src->physdev;
214 dst->iface = src->iface; 218 dst->iface = src->iface;
219 dst->nomatch = src->nomatch;
220}
221
222static inline void
223hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
224{
225 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
226}
227
228static inline bool
229hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
230{
231 return !elem->nomatch;
215} 232}
216 233
217static inline void 234static inline void
@@ -233,11 +250,13 @@ hash_netiface4_data_list(struct sk_buff *skb,
233{ 250{
234 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 251 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
235 252
253 if (data->nomatch)
254 flags |= IPSET_FLAG_NOMATCH;
236 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 255 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
237 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 256 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
238 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 257 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
239 if (flags) 258 if (flags)
240 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 259 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
241 return 0; 260 return 0;
242 261
243nla_put_failure: 262nla_put_failure:
@@ -252,11 +271,13 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
252 (const struct hash_netiface4_telem *)data; 271 (const struct hash_netiface4_telem *)data;
253 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 272 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
254 273
274 if (data->nomatch)
275 flags |= IPSET_FLAG_NOMATCH;
255 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 276 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
256 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 277 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
257 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 278 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
258 if (flags) 279 if (flags)
259 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 280 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
260 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 281 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
261 htonl(ip_set_timeout_get(tdata->timeout))); 282 htonl(ip_set_timeout_get(tdata->timeout)));
262 283
@@ -361,7 +382,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
361 382
362 if (tb[IPSET_ATTR_CIDR]) { 383 if (tb[IPSET_ATTR_CIDR]) {
363 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 384 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
364 if (!data.cidr) 385 if (!data.cidr || data.cidr > HOST_MASK)
365 return -IPSET_ERR_INVALID_CIDR; 386 return -IPSET_ERR_INVALID_CIDR;
366 } 387 }
367 388
@@ -387,6 +408,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
387 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 408 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
388 if (cadt_flags & IPSET_FLAG_PHYSDEV) 409 if (cadt_flags & IPSET_FLAG_PHYSDEV)
389 data.physdev = 1; 410 data.physdev = 1;
411 if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
412 flags |= (cadt_flags << 16);
390 } 413 }
391 414
392 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 415 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
@@ -440,7 +463,8 @@ struct hash_netiface6_elem_hashed {
440 union nf_inet_addr ip; 463 union nf_inet_addr ip;
441 u8 physdev; 464 u8 physdev;
442 u8 cidr; 465 u8 cidr;
443 u16 padding; 466 u8 nomatch;
467 u8 padding;
444}; 468};
445 469
446#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) 470#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
@@ -449,7 +473,8 @@ struct hash_netiface6_elem {
449 union nf_inet_addr ip; 473 union nf_inet_addr ip;
450 u8 physdev; 474 u8 physdev;
451 u8 cidr; 475 u8 cidr;
452 u16 padding; 476 u8 nomatch;
477 u8 padding;
453 const char *iface; 478 const char *iface;
454}; 479};
455 480
@@ -457,7 +482,8 @@ struct hash_netiface6_telem {
457 union nf_inet_addr ip; 482 union nf_inet_addr ip;
458 u8 physdev; 483 u8 physdev;
459 u8 cidr; 484 u8 cidr;
460 u16 padding; 485 u8 nomatch;
486 u8 padding;
461 const char *iface; 487 const char *iface;
462 unsigned long timeout; 488 unsigned long timeout;
463}; 489};
@@ -488,8 +514,21 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
488} 514}
489 515
490static inline void 516static inline void
517hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
518{
519 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
520}
521
522static inline bool
523hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
524{
525 return !elem->nomatch;
526}
527
528static inline void
491hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) 529hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
492{ 530{
531 elem->cidr = 0;
493} 532}
494 533
495static inline void 534static inline void
@@ -514,11 +553,13 @@ hash_netiface6_data_list(struct sk_buff *skb,
514{ 553{
515 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 554 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
516 555
556 if (data->nomatch)
557 flags |= IPSET_FLAG_NOMATCH;
517 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 558 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
518 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 559 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
519 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 560 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
520 if (flags) 561 if (flags)
521 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 562 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
522 return 0; 563 return 0;
523 564
524nla_put_failure: 565nla_put_failure:
@@ -533,11 +574,13 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
533 (const struct hash_netiface6_telem *)data; 574 (const struct hash_netiface6_telem *)data;
534 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; 575 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
535 576
577 if (data->nomatch)
578 flags |= IPSET_FLAG_NOMATCH;
536 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 579 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
537 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 580 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
538 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 581 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
539 if (flags) 582 if (flags)
540 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags); 583 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
541 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 584 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
542 htonl(ip_set_timeout_get(e->timeout))); 585 htonl(ip_set_timeout_get(e->timeout)));
543 return 0; 586 return 0;
@@ -636,7 +679,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
636 679
637 if (tb[IPSET_ATTR_CIDR]) 680 if (tb[IPSET_ATTR_CIDR])
638 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 681 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
639 if (!data.cidr) 682 if (!data.cidr || data.cidr > HOST_MASK)
640 return -IPSET_ERR_INVALID_CIDR; 683 return -IPSET_ERR_INVALID_CIDR;
641 ip6_netmask(&data.ip, data.cidr); 684 ip6_netmask(&data.ip, data.cidr);
642 685
@@ -662,6 +705,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
662 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 705 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
663 if (cadt_flags & IPSET_FLAG_PHYSDEV) 706 if (cadt_flags & IPSET_FLAG_PHYSDEV)
664 data.physdev = 1; 707 data.physdev = 1;
708 if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
709 flags |= (cadt_flags << 16);
665 } 710 }
666 711
667 ret = adtfn(set, &data, timeout, flags); 712 ret = adtfn(set, &data, timeout, flags);
@@ -678,7 +723,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
678 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 723 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
679 u8 hbits; 724 u8 hbits;
680 725
681 if (!(set->family == AF_INET || set->family == AF_INET6)) 726 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
682 return -IPSET_ERR_INVALID_FAMILY; 727 return -IPSET_ERR_INVALID_FAMILY;
683 728
684 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 729 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -697,7 +742,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
697 742
698 h = kzalloc(sizeof(*h) 743 h = kzalloc(sizeof(*h)
699 + sizeof(struct ip_set_hash_nets) 744 + sizeof(struct ip_set_hash_nets)
700 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 745 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
701 if (!h) 746 if (!h)
702 return -ENOMEM; 747 return -ENOMEM;
703 748
@@ -722,15 +767,15 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
722 if (tb[IPSET_ATTR_TIMEOUT]) { 767 if (tb[IPSET_ATTR_TIMEOUT]) {
723 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 768 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
724 769
725 set->variant = set->family == AF_INET 770 set->variant = set->family == NFPROTO_IPV4
726 ? &hash_netiface4_tvariant : &hash_netiface6_tvariant; 771 ? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
727 772
728 if (set->family == AF_INET) 773 if (set->family == NFPROTO_IPV4)
729 hash_netiface4_gc_init(set); 774 hash_netiface4_gc_init(set);
730 else 775 else
731 hash_netiface6_gc_init(set); 776 hash_netiface6_gc_init(set);
732 } else { 777 } else {
733 set->variant = set->family == AF_INET 778 set->variant = set->family == NFPROTO_IPV4
734 ? &hash_netiface4_variant : &hash_netiface6_variant; 779 ? &hash_netiface4_variant : &hash_netiface6_variant;
735 } 780 }
736 781
@@ -746,8 +791,9 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
746 .protocol = IPSET_PROTOCOL, 791 .protocol = IPSET_PROTOCOL,
747 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE, 792 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
748 .dimension = IPSET_DIM_TWO, 793 .dimension = IPSET_DIM_TWO,
749 .family = AF_UNSPEC, 794 .family = NFPROTO_UNSPEC,
750 .revision_min = 0, 795 .revision_min = 0,
796 .revision_max = 1, /* nomatch flag support added */
751 .create = hash_netiface_create, 797 .create = hash_netiface_create,
752 .create_policy = { 798 .create_policy = {
753 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 799 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8f9de7207ec9..ce2e77100b64 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -40,12 +40,19 @@ hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
40 40
41/* The type variant functions: IPv4 */ 41/* The type variant functions: IPv4 */
42 42
43/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
44 * However this way we have to store internally cidr - 1,
45 * dancing back and forth.
46 */
47#define IP_SET_HASH_WITH_NETS_PACKED
48
43/* Member elements without timeout */ 49/* Member elements without timeout */
44struct hash_netport4_elem { 50struct hash_netport4_elem {
45 __be32 ip; 51 __be32 ip;
46 __be16 port; 52 __be16 port;
47 u8 proto; 53 u8 proto;
48 u8 cidr; 54 u8 cidr:7;
55 u8 nomatch:1;
49}; 56};
50 57
51/* Member elements with timeout support */ 58/* Member elements with timeout support */
@@ -53,7 +60,8 @@ struct hash_netport4_telem {
53 __be32 ip; 60 __be32 ip;
54 __be16 port; 61 __be16 port;
55 u8 proto; 62 u8 proto;
56 u8 cidr; 63 u8 cidr:7;
64 u8 nomatch:1;
57 unsigned long timeout; 65 unsigned long timeout;
58}; 66};
59 67
@@ -82,13 +90,26 @@ hash_netport4_data_copy(struct hash_netport4_elem *dst,
82 dst->port = src->port; 90 dst->port = src->port;
83 dst->proto = src->proto; 91 dst->proto = src->proto;
84 dst->cidr = src->cidr; 92 dst->cidr = src->cidr;
93 dst->nomatch = src->nomatch;
94}
95
96static inline void
97hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
98{
99 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
100}
101
102static inline bool
103hash_netport4_data_match(const struct hash_netport4_elem *elem)
104{
105 return !elem->nomatch;
85} 106}
86 107
87static inline void 108static inline void
88hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) 109hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
89{ 110{
90 elem->ip &= ip_set_netmask(cidr); 111 elem->ip &= ip_set_netmask(cidr);
91 elem->cidr = cidr; 112 elem->cidr = cidr - 1;
92} 113}
93 114
94static inline void 115static inline void
@@ -101,10 +122,14 @@ static bool
101hash_netport4_data_list(struct sk_buff *skb, 122hash_netport4_data_list(struct sk_buff *skb,
102 const struct hash_netport4_elem *data) 123 const struct hash_netport4_elem *data)
103{ 124{
125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
126
104 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 127 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
105 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 128 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
106 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 129 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
107 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 130 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
131 if (flags)
132 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
108 return 0; 133 return 0;
109 134
110nla_put_failure: 135nla_put_failure:
@@ -117,13 +142,16 @@ hash_netport4_data_tlist(struct sk_buff *skb,
117{ 142{
118 const struct hash_netport4_telem *tdata = 143 const struct hash_netport4_telem *tdata =
119 (const struct hash_netport4_telem *)data; 144 (const struct hash_netport4_telem *)data;
145 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
120 146
121 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 147 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
122 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 148 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
123 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 149 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
124 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 150 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
125 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 151 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
126 htonl(ip_set_timeout_get(tdata->timeout))); 152 htonl(ip_set_timeout_get(tdata->timeout)));
153 if (flags)
154 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
127 155
128 return 0; 156 return 0;
129 157
@@ -154,20 +182,18 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
154 const struct ip_set_hash *h = set->data; 182 const struct ip_set_hash *h = set->data;
155 ipset_adtfn adtfn = set->variant->adt[adt]; 183 ipset_adtfn adtfn = set->variant->adt[adt];
156 struct hash_netport4_elem data = { 184 struct hash_netport4_elem data = {
157 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 185 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
158 }; 186 };
159 187
160 if (data.cidr == 0)
161 return -EINVAL;
162 if (adt == IPSET_TEST) 188 if (adt == IPSET_TEST)
163 data.cidr = HOST_MASK; 189 data.cidr = HOST_MASK - 1;
164 190
165 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 191 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
166 &data.port, &data.proto)) 192 &data.port, &data.proto))
167 return -EINVAL; 193 return -EINVAL;
168 194
169 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); 195 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
170 data.ip &= ip_set_netmask(data.cidr); 196 data.ip &= ip_set_netmask(data.cidr + 1);
171 197
172 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 198 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
173} 199}
@@ -178,16 +204,18 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
178{ 204{
179 const struct ip_set_hash *h = set->data; 205 const struct ip_set_hash *h = set->data;
180 ipset_adtfn adtfn = set->variant->adt[adt]; 206 ipset_adtfn adtfn = set->variant->adt[adt];
181 struct hash_netport4_elem data = { .cidr = HOST_MASK }; 207 struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 };
182 u32 port, port_to, p = 0, ip = 0, ip_to, last; 208 u32 port, port_to, p = 0, ip = 0, ip_to, last;
183 u32 timeout = h->timeout; 209 u32 timeout = h->timeout;
184 bool with_ports = false; 210 bool with_ports = false;
211 u8 cidr;
185 int ret; 212 int ret;
186 213
187 if (unlikely(!tb[IPSET_ATTR_IP] || 214 if (unlikely(!tb[IPSET_ATTR_IP] ||
188 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 215 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
189 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 216 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
190 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 217 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
218 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
191 return -IPSET_ERR_PROTOCOL; 219 return -IPSET_ERR_PROTOCOL;
192 220
193 if (tb[IPSET_ATTR_LINENO]) 221 if (tb[IPSET_ATTR_LINENO])
@@ -198,9 +226,10 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
198 return ret; 226 return ret;
199 227
200 if (tb[IPSET_ATTR_CIDR]) { 228 if (tb[IPSET_ATTR_CIDR]) {
201 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 229 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
202 if (!data.cidr) 230 if (!cidr || cidr > HOST_MASK)
203 return -IPSET_ERR_INVALID_CIDR; 231 return -IPSET_ERR_INVALID_CIDR;
232 data.cidr = cidr - 1;
204 } 233 }
205 234
206 if (tb[IPSET_ATTR_PORT]) 235 if (tb[IPSET_ATTR_PORT])
@@ -227,8 +256,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
227 } 256 }
228 257
229 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; 258 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
259
260 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
261 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
262 if (cadt_flags & IPSET_FLAG_NOMATCH)
263 flags |= (cadt_flags << 16);
264 }
265
230 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { 266 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
231 data.ip = htonl(ip & ip_set_hostmask(data.cidr)); 267 data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1));
232 ret = adtfn(set, &data, timeout, flags); 268 ret = adtfn(set, &data, timeout, flags);
233 return ip_set_eexist(ret, flags) ? 0 : ret; 269 return ip_set_eexist(ret, flags) ? 0 : ret;
234 } 270 }
@@ -248,14 +284,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
248 if (ip + UINT_MAX == ip_to) 284 if (ip + UINT_MAX == ip_to)
249 return -IPSET_ERR_HASH_RANGE; 285 return -IPSET_ERR_HASH_RANGE;
250 } else { 286 } else {
251 ip_set_mask_from_to(ip, ip_to, data.cidr); 287 ip_set_mask_from_to(ip, ip_to, data.cidr + 1);
252 } 288 }
253 289
254 if (retried) 290 if (retried)
255 ip = h->next.ip; 291 ip = h->next.ip;
256 while (!after(ip, ip_to)) { 292 while (!after(ip, ip_to)) {
257 data.ip = htonl(ip); 293 data.ip = htonl(ip);
258 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); 294 last = ip_set_range_to_cidr(ip, ip_to, &cidr);
295 data.cidr = cidr - 1;
259 p = retried && ip == h->next.ip ? h->next.port : port; 296 p = retried && ip == h->next.ip ? h->next.port : port;
260 for (; p <= port_to; p++) { 297 for (; p <= port_to; p++) {
261 data.port = htons(p); 298 data.port = htons(p);
@@ -288,14 +325,16 @@ struct hash_netport6_elem {
288 union nf_inet_addr ip; 325 union nf_inet_addr ip;
289 __be16 port; 326 __be16 port;
290 u8 proto; 327 u8 proto;
291 u8 cidr; 328 u8 cidr:7;
329 u8 nomatch:1;
292}; 330};
293 331
294struct hash_netport6_telem { 332struct hash_netport6_telem {
295 union nf_inet_addr ip; 333 union nf_inet_addr ip;
296 __be16 port; 334 __be16 port;
297 u8 proto; 335 u8 proto;
298 u8 cidr; 336 u8 cidr:7;
337 u8 nomatch:1;
299 unsigned long timeout; 338 unsigned long timeout;
300}; 339};
301 340
@@ -324,6 +363,18 @@ hash_netport6_data_copy(struct hash_netport6_elem *dst,
324} 363}
325 364
326static inline void 365static inline void
366hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
367{
368 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
369}
370
371static inline bool
372hash_netport6_data_match(const struct hash_netport6_elem *elem)
373{
374 return !elem->nomatch;
375}
376
377static inline void
327hash_netport6_data_zero_out(struct hash_netport6_elem *elem) 378hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
328{ 379{
329 elem->proto = 0; 380 elem->proto = 0;
@@ -342,17 +393,21 @@ static inline void
342hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) 393hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
343{ 394{
344 ip6_netmask(&elem->ip, cidr); 395 ip6_netmask(&elem->ip, cidr);
345 elem->cidr = cidr; 396 elem->cidr = cidr - 1;
346} 397}
347 398
348static bool 399static bool
349hash_netport6_data_list(struct sk_buff *skb, 400hash_netport6_data_list(struct sk_buff *skb,
350 const struct hash_netport6_elem *data) 401 const struct hash_netport6_elem *data)
351{ 402{
403 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
404
352 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 405 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
353 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 406 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
354 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 407 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
355 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 408 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
409 if (flags)
410 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
356 return 0; 411 return 0;
357 412
358nla_put_failure: 413nla_put_failure:
@@ -365,13 +420,16 @@ hash_netport6_data_tlist(struct sk_buff *skb,
365{ 420{
366 const struct hash_netport6_telem *e = 421 const struct hash_netport6_telem *e =
367 (const struct hash_netport6_telem *)data; 422 (const struct hash_netport6_telem *)data;
423 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
368 424
369 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 425 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
370 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 426 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
371 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 427 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
372 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 428 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
373 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 429 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
374 htonl(ip_set_timeout_get(e->timeout))); 430 htonl(ip_set_timeout_get(e->timeout)));
431 if (flags)
432 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
375 return 0; 433 return 0;
376 434
377nla_put_failure: 435nla_put_failure:
@@ -400,20 +458,18 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
400 const struct ip_set_hash *h = set->data; 458 const struct ip_set_hash *h = set->data;
401 ipset_adtfn adtfn = set->variant->adt[adt]; 459 ipset_adtfn adtfn = set->variant->adt[adt];
402 struct hash_netport6_elem data = { 460 struct hash_netport6_elem data = {
403 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 461 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
404 }; 462 };
405 463
406 if (data.cidr == 0)
407 return -EINVAL;
408 if (adt == IPSET_TEST) 464 if (adt == IPSET_TEST)
409 data.cidr = HOST_MASK; 465 data.cidr = HOST_MASK - 1;
410 466
411 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, 467 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
412 &data.port, &data.proto)) 468 &data.port, &data.proto))
413 return -EINVAL; 469 return -EINVAL;
414 470
415 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 471 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
416 ip6_netmask(&data.ip, data.cidr); 472 ip6_netmask(&data.ip, data.cidr + 1);
417 473
418 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); 474 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
419} 475}
@@ -424,16 +480,18 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
424{ 480{
425 const struct ip_set_hash *h = set->data; 481 const struct ip_set_hash *h = set->data;
426 ipset_adtfn adtfn = set->variant->adt[adt]; 482 ipset_adtfn adtfn = set->variant->adt[adt];
427 struct hash_netport6_elem data = { .cidr = HOST_MASK }; 483 struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 };
428 u32 port, port_to; 484 u32 port, port_to;
429 u32 timeout = h->timeout; 485 u32 timeout = h->timeout;
430 bool with_ports = false; 486 bool with_ports = false;
487 u8 cidr;
431 int ret; 488 int ret;
432 489
433 if (unlikely(!tb[IPSET_ATTR_IP] || 490 if (unlikely(!tb[IPSET_ATTR_IP] ||
434 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 491 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
435 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 492 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
436 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 493 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
494 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
437 return -IPSET_ERR_PROTOCOL; 495 return -IPSET_ERR_PROTOCOL;
438 if (unlikely(tb[IPSET_ATTR_IP_TO])) 496 if (unlikely(tb[IPSET_ATTR_IP_TO]))
439 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; 497 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -445,11 +503,13 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
445 if (ret) 503 if (ret)
446 return ret; 504 return ret;
447 505
448 if (tb[IPSET_ATTR_CIDR]) 506 if (tb[IPSET_ATTR_CIDR]) {
449 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 507 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
450 if (!data.cidr) 508 if (!cidr || cidr > HOST_MASK)
451 return -IPSET_ERR_INVALID_CIDR; 509 return -IPSET_ERR_INVALID_CIDR;
452 ip6_netmask(&data.ip, data.cidr); 510 data.cidr = cidr - 1;
511 }
512 ip6_netmask(&data.ip, data.cidr + 1);
453 513
454 if (tb[IPSET_ATTR_PORT]) 514 if (tb[IPSET_ATTR_PORT])
455 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); 515 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -474,6 +534,12 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
474 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 534 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
475 } 535 }
476 536
537 if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
538 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
539 if (cadt_flags & IPSET_FLAG_NOMATCH)
540 flags |= (cadt_flags << 16);
541 }
542
477 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 543 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
478 ret = adtfn(set, &data, timeout, flags); 544 ret = adtfn(set, &data, timeout, flags);
479 return ip_set_eexist(ret, flags) ? 0 : ret; 545 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -507,7 +573,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
507 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 573 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
508 u8 hbits; 574 u8 hbits;
509 575
510 if (!(set->family == AF_INET || set->family == AF_INET6)) 576 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
511 return -IPSET_ERR_INVALID_FAMILY; 577 return -IPSET_ERR_INVALID_FAMILY;
512 578
513 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || 579 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -526,7 +592,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
526 592
527 h = kzalloc(sizeof(*h) 593 h = kzalloc(sizeof(*h)
528 + sizeof(struct ip_set_hash_nets) 594 + sizeof(struct ip_set_hash_nets)
529 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL); 595 * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
530 if (!h) 596 if (!h)
531 return -ENOMEM; 597 return -ENOMEM;
532 598
@@ -549,15 +615,15 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
549 if (tb[IPSET_ATTR_TIMEOUT]) { 615 if (tb[IPSET_ATTR_TIMEOUT]) {
550 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 616 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
551 617
552 set->variant = set->family == AF_INET 618 set->variant = set->family == NFPROTO_IPV4
553 ? &hash_netport4_tvariant : &hash_netport6_tvariant; 619 ? &hash_netport4_tvariant : &hash_netport6_tvariant;
554 620
555 if (set->family == AF_INET) 621 if (set->family == NFPROTO_IPV4)
556 hash_netport4_gc_init(set); 622 hash_netport4_gc_init(set);
557 else 623 else
558 hash_netport6_gc_init(set); 624 hash_netport6_gc_init(set);
559 } else { 625 } else {
560 set->variant = set->family == AF_INET 626 set->variant = set->family == NFPROTO_IPV4
561 ? &hash_netport4_variant : &hash_netport6_variant; 627 ? &hash_netport4_variant : &hash_netport6_variant;
562 } 628 }
563 629
@@ -573,10 +639,11 @@ static struct ip_set_type hash_netport_type __read_mostly = {
573 .protocol = IPSET_PROTOCOL, 639 .protocol = IPSET_PROTOCOL,
574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 640 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
575 .dimension = IPSET_DIM_TWO, 641 .dimension = IPSET_DIM_TWO,
576 .family = AF_UNSPEC, 642 .family = NFPROTO_UNSPEC,
577 .revision_min = 0, 643 .revision_min = 0,
578 /* 1 SCTP and UDPLITE support added */ 644 /* 1 SCTP and UDPLITE support added */
579 .revision_max = 2, /* Range as input support for IPv4 added */ 645 /* 2, Range as input support for IPv4 added */
646 .revision_max = 3, /* nomatch flag support added */
580 .create = hash_netport_create, 647 .create = hash_netport_create,
581 .create_policy = { 648 .create_policy = {
582 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 649 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -595,6 +662,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
595 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 662 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
596 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 663 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
597 [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, 664 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
665 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
598 }, 666 },
599 .me = THIS_MODULE, 667 .me = THIS_MODULE,
600}; 668};
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4d10819d462e..7e095f9005f0 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -575,7 +575,7 @@ static struct ip_set_type list_set_type __read_mostly = {
575 .protocol = IPSET_PROTOCOL, 575 .protocol = IPSET_PROTOCOL,
576 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST, 576 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
577 .dimension = IPSET_DIM_ONE, 577 .dimension = IPSET_DIM_ONE,
578 .family = AF_UNSPEC, 578 .family = NFPROTO_UNSPEC,
579 .revision_min = 0, 579 .revision_min = 0,
580 .revision_max = 0, 580 .revision_max = 0,
581 .create = list_set_create, 581 .create = list_set_create,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index fa4b82c8ae80..7b48035826ee 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -44,6 +44,7 @@
44#include <net/netfilter/nf_conntrack_ecache.h> 44#include <net/netfilter/nf_conntrack_ecache.h>
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_timeout.h>
47#include <net/netfilter/nf_nat.h> 48#include <net/netfilter/nf_nat.h>
48#include <net/netfilter/nf_nat_core.h> 49#include <net/netfilter/nf_nat_core.h>
49 50
@@ -767,7 +768,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
767 struct nf_conntrack_l3proto *l3proto, 768 struct nf_conntrack_l3proto *l3proto,
768 struct nf_conntrack_l4proto *l4proto, 769 struct nf_conntrack_l4proto *l4proto,
769 struct sk_buff *skb, 770 struct sk_buff *skb,
770 unsigned int dataoff, u32 hash) 771 unsigned int dataoff, u32 hash,
772 unsigned int *timeouts)
771{ 773{
772 struct nf_conn *ct; 774 struct nf_conn *ct;
773 struct nf_conn_help *help; 775 struct nf_conn_help *help;
@@ -786,7 +788,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
786 if (IS_ERR(ct)) 788 if (IS_ERR(ct))
787 return (struct nf_conntrack_tuple_hash *)ct; 789 return (struct nf_conntrack_tuple_hash *)ct;
788 790
789 if (!l4proto->new(ct, skb, dataoff)) { 791 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
790 nf_conntrack_free(ct); 792 nf_conntrack_free(ct);
791 pr_debug("init conntrack: can't track with proto module\n"); 793 pr_debug("init conntrack: can't track with proto module\n");
792 return NULL; 794 return NULL;
@@ -852,7 +854,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
852 struct nf_conntrack_l3proto *l3proto, 854 struct nf_conntrack_l3proto *l3proto,
853 struct nf_conntrack_l4proto *l4proto, 855 struct nf_conntrack_l4proto *l4proto,
854 int *set_reply, 856 int *set_reply,
855 enum ip_conntrack_info *ctinfo) 857 enum ip_conntrack_info *ctinfo,
858 unsigned int *timeouts)
856{ 859{
857 struct nf_conntrack_tuple tuple; 860 struct nf_conntrack_tuple tuple;
858 struct nf_conntrack_tuple_hash *h; 861 struct nf_conntrack_tuple_hash *h;
@@ -872,7 +875,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
872 h = __nf_conntrack_find_get(net, zone, &tuple, hash); 875 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
873 if (!h) { 876 if (!h) {
874 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, 877 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
875 skb, dataoff, hash); 878 skb, dataoff, hash, timeouts);
876 if (!h) 879 if (!h)
877 return NULL; 880 return NULL;
878 if (IS_ERR(h)) 881 if (IS_ERR(h))
@@ -913,6 +916,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
913 enum ip_conntrack_info ctinfo; 916 enum ip_conntrack_info ctinfo;
914 struct nf_conntrack_l3proto *l3proto; 917 struct nf_conntrack_l3proto *l3proto;
915 struct nf_conntrack_l4proto *l4proto; 918 struct nf_conntrack_l4proto *l4proto;
919 struct nf_conn_timeout *timeout_ext;
920 unsigned int *timeouts;
916 unsigned int dataoff; 921 unsigned int dataoff;
917 u_int8_t protonum; 922 u_int8_t protonum;
918 int set_reply = 0; 923 int set_reply = 0;
@@ -959,8 +964,19 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
959 goto out; 964 goto out;
960 } 965 }
961 966
967 /* Decide what timeout policy we want to apply to this flow. */
968 if (tmpl) {
969 timeout_ext = nf_ct_timeout_find(tmpl);
970 if (timeout_ext)
971 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
972 else
973 timeouts = l4proto->get_timeouts(net);
974 } else
975 timeouts = l4proto->get_timeouts(net);
976
962 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 977 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
963 l3proto, l4proto, &set_reply, &ctinfo); 978 l3proto, l4proto, &set_reply, &ctinfo,
979 timeouts);
964 if (!ct) { 980 if (!ct) {
965 /* Not valid part of a connection */ 981 /* Not valid part of a connection */
966 NF_CT_STAT_INC_ATOMIC(net, invalid); 982 NF_CT_STAT_INC_ATOMIC(net, invalid);
@@ -977,7 +993,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
977 993
978 NF_CT_ASSERT(skb->nfct); 994 NF_CT_ASSERT(skb->nfct);
979 995
980 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 996 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
981 if (ret <= 0) { 997 if (ret <= 0) {
982 /* Invalid: inverse of the return code tells 998 /* Invalid: inverse of the return code tells
983 * the netfilter core what to do */ 999 * the netfilter core what to do */
@@ -1331,6 +1347,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
1331 } 1347 }
1332 1348
1333 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1349 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1350 nf_conntrack_timeout_fini(net);
1334 nf_conntrack_ecache_fini(net); 1351 nf_conntrack_ecache_fini(net);
1335 nf_conntrack_tstamp_fini(net); 1352 nf_conntrack_tstamp_fini(net);
1336 nf_conntrack_acct_fini(net); 1353 nf_conntrack_acct_fini(net);
@@ -1562,9 +1579,14 @@ static int nf_conntrack_init_net(struct net *net)
1562 ret = nf_conntrack_ecache_init(net); 1579 ret = nf_conntrack_ecache_init(net);
1563 if (ret < 0) 1580 if (ret < 0)
1564 goto err_ecache; 1581 goto err_ecache;
1582 ret = nf_conntrack_timeout_init(net);
1583 if (ret < 0)
1584 goto err_timeout;
1565 1585
1566 return 0; 1586 return 0;
1567 1587
1588err_timeout:
1589 nf_conntrack_timeout_fini(net);
1568err_ecache: 1590err_ecache:
1569 nf_conntrack_tstamp_fini(net); 1591 nf_conntrack_tstamp_fini(net);
1570err_tstamp: 1592err_tstamp:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 14af6329bdda..5bd3047ddeec 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -32,9 +32,11 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex);
32void nf_ct_deliver_cached_events(struct nf_conn *ct) 32void nf_ct_deliver_cached_events(struct nf_conn *ct)
33{ 33{
34 struct net *net = nf_ct_net(ct); 34 struct net *net = nf_ct_net(ct);
35 unsigned long events; 35 unsigned long events, missed;
36 struct nf_ct_event_notifier *notify; 36 struct nf_ct_event_notifier *notify;
37 struct nf_conntrack_ecache *e; 37 struct nf_conntrack_ecache *e;
38 struct nf_ct_event item;
39 int ret;
38 40
39 rcu_read_lock(); 41 rcu_read_lock();
40 notify = rcu_dereference(net->ct.nf_conntrack_event_cb); 42 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
@@ -47,31 +49,32 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
47 49
48 events = xchg(&e->cache, 0); 50 events = xchg(&e->cache, 0);
49 51
50 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) { 52 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
51 struct nf_ct_event item = { 53 goto out_unlock;
52 .ct = ct, 54
53 .pid = 0, 55 /* We make a copy of the missed event cache without taking
54 .report = 0 56 * the lock, thus we may send missed events twice. However,
55 }; 57 * this does not harm and it happens very rarely. */
56 int ret; 58 missed = e->missed;
57 /* We make a copy of the missed event cache without taking 59
58 * the lock, thus we may send missed events twice. However, 60 if (!((events | missed) & e->ctmask))
59 * this does not harm and it happens very rarely. */ 61 goto out_unlock;
60 unsigned long missed = e->missed; 62
61 63 item.ct = ct;
62 if (!((events | missed) & e->ctmask)) 64 item.pid = 0;
63 goto out_unlock; 65 item.report = 0;
64 66
65 ret = notify->fcn(events | missed, &item); 67 ret = notify->fcn(events | missed, &item);
66 if (unlikely(ret < 0 || missed)) { 68
67 spin_lock_bh(&ct->lock); 69 if (likely(ret >= 0 && !missed))
68 if (ret < 0) 70 goto out_unlock;
69 e->missed |= events; 71
70 else 72 spin_lock_bh(&ct->lock);
71 e->missed &= ~missed; 73 if (ret < 0)
72 spin_unlock_bh(&ct->lock); 74 e->missed |= events;
73 } 75 else
74 } 76 e->missed &= ~missed;
77 spin_unlock_bh(&ct->lock);
75 78
76out_unlock: 79out_unlock:
77 rcu_read_unlock(); 80 rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index bbe23baa19b6..436b7cb79ba4 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -181,6 +181,60 @@ void nf_ct_helper_destroy(struct nf_conn *ct)
181 } 181 }
182} 182}
183 183
184static LIST_HEAD(nf_ct_helper_expectfn_list);
185
186void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
187{
188 spin_lock_bh(&nf_conntrack_lock);
189 list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
190 spin_unlock_bh(&nf_conntrack_lock);
191}
192EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
193
194void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
195{
196 spin_lock_bh(&nf_conntrack_lock);
197 list_del_rcu(&n->head);
198 spin_unlock_bh(&nf_conntrack_lock);
199}
200EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
201
202struct nf_ct_helper_expectfn *
203nf_ct_helper_expectfn_find_by_name(const char *name)
204{
205 struct nf_ct_helper_expectfn *cur;
206 bool found = false;
207
208 rcu_read_lock();
209 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
210 if (!strcmp(cur->name, name)) {
211 found = true;
212 break;
213 }
214 }
215 rcu_read_unlock();
216 return found ? cur : NULL;
217}
218EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
219
220struct nf_ct_helper_expectfn *
221nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
222{
223 struct nf_ct_helper_expectfn *cur;
224 bool found = false;
225
226 rcu_read_lock();
227 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
228 if (cur->expectfn == symbol) {
229 found = true;
230 break;
231 }
232 }
233 rcu_read_unlock();
234 return found ? cur : NULL;
235}
236EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
237
184int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 238int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
185{ 239{
186 unsigned int h = helper_hash(&me->tuple); 240 unsigned int h = helper_hash(&me->tuple);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index b49da6c925b3..ca7e8354e4f8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -110,15 +110,16 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
110 struct nf_conntrack_l3proto *l3proto; 110 struct nf_conntrack_l3proto *l3proto;
111 struct nf_conntrack_l4proto *l4proto; 111 struct nf_conntrack_l4proto *l4proto;
112 112
113 rcu_read_lock();
113 l3proto = __nf_ct_l3proto_find(tuple->src.l3num); 114 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
114 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); 115 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
115 116
116 if (unlikely(ret < 0)) 117 if (ret >= 0) {
117 return ret; 118 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
118 119 tuple->dst.protonum);
119 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum); 120 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
120 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); 121 }
121 122 rcu_read_unlock();
122 return ret; 123 return ret;
123} 124}
124 125
@@ -691,9 +692,18 @@ static int ctnetlink_done(struct netlink_callback *cb)
691{ 692{
692 if (cb->args[1]) 693 if (cb->args[1])
693 nf_ct_put((struct nf_conn *)cb->args[1]); 694 nf_ct_put((struct nf_conn *)cb->args[1]);
695 if (cb->data)
696 kfree(cb->data);
694 return 0; 697 return 0;
695} 698}
696 699
700struct ctnetlink_dump_filter {
701 struct {
702 u_int32_t val;
703 u_int32_t mask;
704 } mark;
705};
706
697static int 707static int
698ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 708ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
699{ 709{
@@ -703,6 +713,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
703 struct hlist_nulls_node *n; 713 struct hlist_nulls_node *n;
704 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 714 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
705 u_int8_t l3proto = nfmsg->nfgen_family; 715 u_int8_t l3proto = nfmsg->nfgen_family;
716 int res;
717#ifdef CONFIG_NF_CONNTRACK_MARK
718 const struct ctnetlink_dump_filter *filter = cb->data;
719#endif
706 720
707 spin_lock_bh(&nf_conntrack_lock); 721 spin_lock_bh(&nf_conntrack_lock);
708 last = (struct nf_conn *)cb->args[1]; 722 last = (struct nf_conn *)cb->args[1];
@@ -723,11 +737,20 @@ restart:
723 continue; 737 continue;
724 cb->args[1] = 0; 738 cb->args[1] = 0;
725 } 739 }
726 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 740#ifdef CONFIG_NF_CONNTRACK_MARK
727 cb->nlh->nlmsg_seq, 741 if (filter && !((ct->mark & filter->mark.mask) ==
728 NFNL_MSG_TYPE( 742 filter->mark.val)) {
729 cb->nlh->nlmsg_type), 743 continue;
730 ct) < 0) { 744 }
745#endif
746 rcu_read_lock();
747 res =
748 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
749 cb->nlh->nlmsg_seq,
750 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
751 ct);
752 rcu_read_unlock();
753 if (res < 0) {
731 nf_conntrack_get(&ct->ct_general); 754 nf_conntrack_get(&ct->ct_general);
732 cb->args[1] = (unsigned long)ct; 755 cb->args[1] = (unsigned long)ct;
733 goto out; 756 goto out;
@@ -894,6 +917,7 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
894 [CTA_NAT_DST] = { .type = NLA_NESTED }, 917 [CTA_NAT_DST] = { .type = NLA_NESTED },
895 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, 918 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
896 [CTA_ZONE] = { .type = NLA_U16 }, 919 [CTA_ZONE] = { .type = NLA_U16 },
920 [CTA_MARK_MASK] = { .type = NLA_U32 },
897}; 921};
898 922
899static int 923static int
@@ -978,9 +1002,28 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
978 u16 zone; 1002 u16 zone;
979 int err; 1003 int err;
980 1004
981 if (nlh->nlmsg_flags & NLM_F_DUMP) 1005 if (nlh->nlmsg_flags & NLM_F_DUMP) {
982 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 1006 struct netlink_dump_control c = {
983 ctnetlink_done, 0); 1007 .dump = ctnetlink_dump_table,
1008 .done = ctnetlink_done,
1009 };
1010#ifdef CONFIG_NF_CONNTRACK_MARK
1011 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1012 struct ctnetlink_dump_filter *filter;
1013
1014 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1015 GFP_ATOMIC);
1016 if (filter == NULL)
1017 return -ENOMEM;
1018
1019 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1020 filter->mark.mask =
1021 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1022 c.data = filter;
1023 }
1024#endif
1025 return netlink_dump_start(ctnl, skb, nlh, &c);
1026 }
984 1027
985 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1028 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
986 if (err < 0) 1029 if (err < 0)
@@ -1610,14 +1653,16 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
1610 if (!nest_parms) 1653 if (!nest_parms)
1611 goto nla_put_failure; 1654 goto nla_put_failure;
1612 1655
1656 rcu_read_lock();
1613 l3proto = __nf_ct_l3proto_find(tuple->src.l3num); 1657 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
1614 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); 1658 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
1615 1659 if (ret >= 0) {
1616 if (unlikely(ret < 0)) 1660 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
1617 goto nla_put_failure; 1661 tuple->dst.protonum);
1618
1619 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
1620 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 1662 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
1663 }
1664 rcu_read_unlock();
1665
1621 if (unlikely(ret < 0)) 1666 if (unlikely(ret < 0))
1622 goto nla_put_failure; 1667 goto nla_put_failure;
1623 1668
@@ -1636,6 +1681,11 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1636 struct nf_conn *master = exp->master; 1681 struct nf_conn *master = exp->master;
1637 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; 1682 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
1638 struct nf_conn_help *help; 1683 struct nf_conn_help *help;
1684#ifdef CONFIG_NF_NAT_NEEDED
1685 struct nlattr *nest_parms;
1686 struct nf_conntrack_tuple nat_tuple = {};
1687#endif
1688 struct nf_ct_helper_expectfn *expfn;
1639 1689
1640 if (timeout < 0) 1690 if (timeout < 0)
1641 timeout = 0; 1691 timeout = 0;
@@ -1649,9 +1699,29 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1649 CTA_EXPECT_MASTER) < 0) 1699 CTA_EXPECT_MASTER) < 0)
1650 goto nla_put_failure; 1700 goto nla_put_failure;
1651 1701
1702#ifdef CONFIG_NF_NAT_NEEDED
1703 if (exp->saved_ip || exp->saved_proto.all) {
1704 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
1705 if (!nest_parms)
1706 goto nla_put_failure;
1707
1708 NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
1709
1710 nat_tuple.src.l3num = nf_ct_l3num(master);
1711 nat_tuple.src.u3.ip = exp->saved_ip;
1712 nat_tuple.dst.protonum = nf_ct_protonum(master);
1713 nat_tuple.src.u = exp->saved_proto;
1714
1715 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
1716 CTA_EXPECT_NAT_TUPLE) < 0)
1717 goto nla_put_failure;
1718 nla_nest_end(skb, nest_parms);
1719 }
1720#endif
1652 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); 1721 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
1653 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); 1722 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
1654 NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)); 1723 NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
1724 NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
1655 help = nfct_help(master); 1725 help = nfct_help(master);
1656 if (help) { 1726 if (help) {
1657 struct nf_conntrack_helper *helper; 1727 struct nf_conntrack_helper *helper;
@@ -1660,6 +1730,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1660 if (helper) 1730 if (helper)
1661 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); 1731 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
1662 } 1732 }
1733 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
1734 if (expfn != NULL)
1735 NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
1663 1736
1664 return 0; 1737 return 0;
1665 1738
@@ -1817,6 +1890,9 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1817 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, 1890 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1818 [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, 1891 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
1819 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, 1892 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
1893 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
1894 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
1895 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
1820}; 1896};
1821 1897
1822static int 1898static int
@@ -1834,9 +1910,11 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1834 int err; 1910 int err;
1835 1911
1836 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1912 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1837 return netlink_dump_start(ctnl, skb, nlh, 1913 struct netlink_dump_control c = {
1838 ctnetlink_exp_dump_table, 1914 .dump = ctnetlink_exp_dump_table,
1839 ctnetlink_exp_done, 0); 1915 .done = ctnetlink_exp_done,
1916 };
1917 return netlink_dump_start(ctnl, skb, nlh, &c);
1840 } 1918 }
1841 1919
1842 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 1920 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
@@ -1990,6 +2068,41 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
1990 return -EOPNOTSUPP; 2068 return -EOPNOTSUPP;
1991} 2069}
1992 2070
2071static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2072 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2073 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2074};
2075
2076static int
2077ctnetlink_parse_expect_nat(const struct nlattr *attr,
2078 struct nf_conntrack_expect *exp,
2079 u_int8_t u3)
2080{
2081#ifdef CONFIG_NF_NAT_NEEDED
2082 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2083 struct nf_conntrack_tuple nat_tuple = {};
2084 int err;
2085
2086 nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2087
2088 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2089 return -EINVAL;
2090
2091 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2092 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2093 if (err < 0)
2094 return err;
2095
2096 exp->saved_ip = nat_tuple.src.u3.ip;
2097 exp->saved_proto = nat_tuple.src.u;
2098 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2099
2100 return 0;
2101#else
2102 return -EOPNOTSUPP;
2103#endif
2104}
2105
1993static int 2106static int
1994ctnetlink_create_expect(struct net *net, u16 zone, 2107ctnetlink_create_expect(struct net *net, u16 zone,
1995 const struct nlattr * const cda[], 2108 const struct nlattr * const cda[],
@@ -2001,6 +2114,8 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2001 struct nf_conntrack_expect *exp; 2114 struct nf_conntrack_expect *exp;
2002 struct nf_conn *ct; 2115 struct nf_conn *ct;
2003 struct nf_conn_help *help; 2116 struct nf_conn_help *help;
2117 struct nf_conntrack_helper *helper = NULL;
2118 u_int32_t class = 0;
2004 int err = 0; 2119 int err = 0;
2005 2120
2006 /* caller guarantees that those three CTA_EXPECT_* exist */ 2121 /* caller guarantees that those three CTA_EXPECT_* exist */
@@ -2019,6 +2134,40 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2019 if (!h) 2134 if (!h)
2020 return -ENOENT; 2135 return -ENOENT;
2021 ct = nf_ct_tuplehash_to_ctrack(h); 2136 ct = nf_ct_tuplehash_to_ctrack(h);
2137
2138 /* Look for helper of this expectation */
2139 if (cda[CTA_EXPECT_HELP_NAME]) {
2140 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2141
2142 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2143 nf_ct_protonum(ct));
2144 if (helper == NULL) {
2145#ifdef CONFIG_MODULES
2146 if (request_module("nfct-helper-%s", helpname) < 0) {
2147 err = -EOPNOTSUPP;
2148 goto out;
2149 }
2150
2151 helper = __nf_conntrack_helper_find(helpname,
2152 nf_ct_l3num(ct),
2153 nf_ct_protonum(ct));
2154 if (helper) {
2155 err = -EAGAIN;
2156 goto out;
2157 }
2158#endif
2159 err = -EOPNOTSUPP;
2160 goto out;
2161 }
2162 }
2163
2164 if (cda[CTA_EXPECT_CLASS] && helper) {
2165 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2166 if (class > helper->expect_class_max) {
2167 err = -EINVAL;
2168 goto out;
2169 }
2170 }
2022 exp = nf_ct_expect_alloc(ct); 2171 exp = nf_ct_expect_alloc(ct);
2023 if (!exp) { 2172 if (!exp) {
2024 err = -ENOMEM; 2173 err = -ENOMEM;
@@ -2045,18 +2194,35 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2045 } else 2194 } else
2046 exp->flags = 0; 2195 exp->flags = 0;
2047 } 2196 }
2197 if (cda[CTA_EXPECT_FN]) {
2198 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2199 struct nf_ct_helper_expectfn *expfn;
2200
2201 expfn = nf_ct_helper_expectfn_find_by_name(name);
2202 if (expfn == NULL) {
2203 err = -EINVAL;
2204 goto err_out;
2205 }
2206 exp->expectfn = expfn->expectfn;
2207 } else
2208 exp->expectfn = NULL;
2048 2209
2049 exp->class = 0; 2210 exp->class = class;
2050 exp->expectfn = NULL;
2051 exp->master = ct; 2211 exp->master = ct;
2052 exp->helper = NULL; 2212 exp->helper = helper;
2053 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); 2213 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2054 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); 2214 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2055 exp->mask.src.u.all = mask.src.u.all; 2215 exp->mask.src.u.all = mask.src.u.all;
2056 2216
2217 if (cda[CTA_EXPECT_NAT]) {
2218 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2219 exp, u3);
2220 if (err < 0)
2221 goto err_out;
2222 }
2057 err = nf_ct_expect_related_report(exp, pid, report); 2223 err = nf_ct_expect_related_report(exp, pid, report);
2224err_out:
2058 nf_ct_expect_put(exp); 2225 nf_ct_expect_put(exp);
2059
2060out: 2226out:
2061 nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); 2227 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2062 return err; 2228 return err;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index d6dde6dc09e6..24fdce256cb0 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -423,7 +423,7 @@ static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
423} 423}
424 424
425static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, 425static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
426 unsigned int dataoff) 426 unsigned int dataoff, unsigned int *timeouts)
427{ 427{
428 struct net *net = nf_ct_net(ct); 428 struct net *net = nf_ct_net(ct);
429 struct dccp_net *dn; 429 struct dccp_net *dn;
@@ -472,12 +472,17 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
472 ntohl(dhack->dccph_ack_nr_low); 472 ntohl(dhack->dccph_ack_nr_low);
473} 473}
474 474
475static unsigned int *dccp_get_timeouts(struct net *net)
476{
477 return dccp_pernet(net)->dccp_timeout;
478}
479
475static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, 480static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
476 unsigned int dataoff, enum ip_conntrack_info ctinfo, 481 unsigned int dataoff, enum ip_conntrack_info ctinfo,
477 u_int8_t pf, unsigned int hooknum) 482 u_int8_t pf, unsigned int hooknum,
483 unsigned int *timeouts)
478{ 484{
479 struct net *net = nf_ct_net(ct); 485 struct net *net = nf_ct_net(ct);
480 struct dccp_net *dn;
481 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 486 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
482 struct dccp_hdr _dh, *dh; 487 struct dccp_hdr _dh, *dh;
483 u_int8_t type, old_state, new_state; 488 u_int8_t type, old_state, new_state;
@@ -559,8 +564,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
559 if (new_state != old_state) 564 if (new_state != old_state)
560 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 565 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
561 566
562 dn = dccp_pernet(net); 567 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
563 nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
564 568
565 return NF_ACCEPT; 569 return NF_ACCEPT;
566} 570}
@@ -702,8 +706,60 @@ static int dccp_nlattr_size(void)
702 return nla_total_size(0) /* CTA_PROTOINFO_DCCP */ 706 return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
703 + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1); 707 + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
704} 708}
709
705#endif 710#endif
706 711
712#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
713
714#include <linux/netfilter/nfnetlink.h>
715#include <linux/netfilter/nfnetlink_cttimeout.h>
716
717static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
718{
719 struct dccp_net *dn = dccp_pernet(&init_net);
720 unsigned int *timeouts = data;
721 int i;
722
723 /* set default DCCP timeouts. */
724 for (i=0; i<CT_DCCP_MAX; i++)
725 timeouts[i] = dn->dccp_timeout[i];
726
727 /* there's a 1:1 mapping between attributes and protocol states. */
728 for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
729 if (tb[i]) {
730 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
731 }
732 }
733 return 0;
734}
735
736static int
737dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
738{
739 const unsigned int *timeouts = data;
740 int i;
741
742 for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
743 NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
744
745 return 0;
746
747nla_put_failure:
748 return -ENOSPC;
749}
750
751static const struct nla_policy
752dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
753 [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
754 [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
755 [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
756 [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
757 [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
758 [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
759 [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
760};
761#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
762
707#ifdef CONFIG_SYSCTL 763#ifdef CONFIG_SYSCTL
708/* template, data assigned later */ 764/* template, data assigned later */
709static struct ctl_table dccp_sysctl_table[] = { 765static struct ctl_table dccp_sysctl_table[] = {
@@ -767,6 +823,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
767 .invert_tuple = dccp_invert_tuple, 823 .invert_tuple = dccp_invert_tuple,
768 .new = dccp_new, 824 .new = dccp_new,
769 .packet = dccp_packet, 825 .packet = dccp_packet,
826 .get_timeouts = dccp_get_timeouts,
770 .error = dccp_error, 827 .error = dccp_error,
771 .print_tuple = dccp_print_tuple, 828 .print_tuple = dccp_print_tuple,
772 .print_conntrack = dccp_print_conntrack, 829 .print_conntrack = dccp_print_conntrack,
@@ -779,6 +836,15 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
779 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 836 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
780 .nla_policy = nf_ct_port_nla_policy, 837 .nla_policy = nf_ct_port_nla_policy,
781#endif 838#endif
839#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
840 .ctnl_timeout = {
841 .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
842 .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
843 .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
844 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
845 .nla_policy = dccp_timeout_nla_policy,
846 },
847#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
782}; 848};
783 849
784static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { 850static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -789,6 +855,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
789 .invert_tuple = dccp_invert_tuple, 855 .invert_tuple = dccp_invert_tuple,
790 .new = dccp_new, 856 .new = dccp_new,
791 .packet = dccp_packet, 857 .packet = dccp_packet,
858 .get_timeouts = dccp_get_timeouts,
792 .error = dccp_error, 859 .error = dccp_error,
793 .print_tuple = dccp_print_tuple, 860 .print_tuple = dccp_print_tuple,
794 .print_conntrack = dccp_print_conntrack, 861 .print_conntrack = dccp_print_conntrack,
@@ -801,6 +868,15 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
801 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 868 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
802 .nla_policy = nf_ct_port_nla_policy, 869 .nla_policy = nf_ct_port_nla_policy,
803#endif 870#endif
871#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
872 .ctnl_timeout = {
873 .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
874 .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
875 .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
876 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
877 .nla_policy = dccp_timeout_nla_policy,
878 },
879#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
804}; 880};
805 881
806static __net_init int dccp_net_init(struct net *net) 882static __net_init int dccp_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e2091d0c7a2f..835e24c58f0d 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -40,25 +40,70 @@ static int generic_print_tuple(struct seq_file *s,
40 return 0; 40 return 0;
41} 41}
42 42
43static unsigned int *generic_get_timeouts(struct net *net)
44{
45 return &nf_ct_generic_timeout;
46}
47
43/* Returns verdict for packet, or -1 for invalid. */ 48/* Returns verdict for packet, or -1 for invalid. */
44static int packet(struct nf_conn *ct, 49static int generic_packet(struct nf_conn *ct,
45 const struct sk_buff *skb, 50 const struct sk_buff *skb,
46 unsigned int dataoff, 51 unsigned int dataoff,
47 enum ip_conntrack_info ctinfo, 52 enum ip_conntrack_info ctinfo,
48 u_int8_t pf, 53 u_int8_t pf,
49 unsigned int hooknum) 54 unsigned int hooknum,
55 unsigned int *timeout)
50{ 56{
51 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout); 57 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
52 return NF_ACCEPT; 58 return NF_ACCEPT;
53} 59}
54 60
55/* Called when a new connection for this protocol found. */ 61/* Called when a new connection for this protocol found. */
56static bool new(struct nf_conn *ct, const struct sk_buff *skb, 62static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
57 unsigned int dataoff) 63 unsigned int dataoff, unsigned int *timeouts)
58{ 64{
59 return true; 65 return true;
60} 66}
61 67
68#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
69
70#include <linux/netfilter/nfnetlink.h>
71#include <linux/netfilter/nfnetlink_cttimeout.h>
72
73static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
74{
75 unsigned int *timeout = data;
76
77 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
78 *timeout =
79 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
80 else {
81 /* Set default generic timeout. */
82 *timeout = nf_ct_generic_timeout;
83 }
84
85 return 0;
86}
87
88static int
89generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
90{
91 const unsigned int *timeout = data;
92
93 NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
94
95 return 0;
96
97nla_put_failure:
98 return -ENOSPC;
99}
100
101static const struct nla_policy
102generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
103 [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
104};
105#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
106
62#ifdef CONFIG_SYSCTL 107#ifdef CONFIG_SYSCTL
63static struct ctl_table_header *generic_sysctl_header; 108static struct ctl_table_header *generic_sysctl_header;
64static struct ctl_table generic_sysctl_table[] = { 109static struct ctl_table generic_sysctl_table[] = {
@@ -93,8 +138,18 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
93 .pkt_to_tuple = generic_pkt_to_tuple, 138 .pkt_to_tuple = generic_pkt_to_tuple,
94 .invert_tuple = generic_invert_tuple, 139 .invert_tuple = generic_invert_tuple,
95 .print_tuple = generic_print_tuple, 140 .print_tuple = generic_print_tuple,
96 .packet = packet, 141 .packet = generic_packet,
97 .new = new, 142 .get_timeouts = generic_get_timeouts,
143 .new = generic_new,
144#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
145 .ctnl_timeout = {
146 .nlattr_to_obj = generic_timeout_nlattr_to_obj,
147 .obj_to_nlattr = generic_timeout_obj_to_nlattr,
148 .nlattr_max = CTA_TIMEOUT_GENERIC_MAX,
149 .obj_size = sizeof(unsigned int),
150 .nla_policy = generic_timeout_nla_policy,
151 },
152#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
98#ifdef CONFIG_SYSCTL 153#ifdef CONFIG_SYSCTL
99 .ctl_table_header = &generic_sysctl_header, 154 .ctl_table_header = &generic_sysctl_header,
100 .ctl_table = generic_sysctl_table, 155 .ctl_table = generic_sysctl_table,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index f0338791b822..659648c4b14a 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -41,8 +41,16 @@
41#include <linux/netfilter/nf_conntrack_proto_gre.h> 41#include <linux/netfilter/nf_conntrack_proto_gre.h>
42#include <linux/netfilter/nf_conntrack_pptp.h> 42#include <linux/netfilter/nf_conntrack_pptp.h>
43 43
44#define GRE_TIMEOUT (30 * HZ) 44enum grep_conntrack {
45#define GRE_STREAM_TIMEOUT (180 * HZ) 45 GRE_CT_UNREPLIED,
46 GRE_CT_REPLIED,
47 GRE_CT_MAX
48};
49
50static unsigned int gre_timeouts[GRE_CT_MAX] = {
51 [GRE_CT_UNREPLIED] = 30*HZ,
52 [GRE_CT_REPLIED] = 180*HZ,
53};
46 54
47static int proto_gre_net_id __read_mostly; 55static int proto_gre_net_id __read_mostly;
48struct netns_proto_gre { 56struct netns_proto_gre {
@@ -227,13 +235,19 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
227 (ct->proto.gre.stream_timeout / HZ)); 235 (ct->proto.gre.stream_timeout / HZ));
228} 236}
229 237
238static unsigned int *gre_get_timeouts(struct net *net)
239{
240 return gre_timeouts;
241}
242
230/* Returns verdict for packet, and may modify conntrack */ 243/* Returns verdict for packet, and may modify conntrack */
231static int gre_packet(struct nf_conn *ct, 244static int gre_packet(struct nf_conn *ct,
232 const struct sk_buff *skb, 245 const struct sk_buff *skb,
233 unsigned int dataoff, 246 unsigned int dataoff,
234 enum ip_conntrack_info ctinfo, 247 enum ip_conntrack_info ctinfo,
235 u_int8_t pf, 248 u_int8_t pf,
236 unsigned int hooknum) 249 unsigned int hooknum,
250 unsigned int *timeouts)
237{ 251{
238 /* If we've seen traffic both ways, this is a GRE connection. 252 /* If we've seen traffic both ways, this is a GRE connection.
239 * Extend timeout. */ 253 * Extend timeout. */
@@ -252,15 +266,15 @@ static int gre_packet(struct nf_conn *ct,
252 266
253/* Called when a new connection for this protocol found. */ 267/* Called when a new connection for this protocol found. */
254static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb, 268static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
255 unsigned int dataoff) 269 unsigned int dataoff, unsigned int *timeouts)
256{ 270{
257 pr_debug(": "); 271 pr_debug(": ");
258 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 272 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
259 273
260 /* initialize to sane value. Ideally a conntrack helper 274 /* initialize to sane value. Ideally a conntrack helper
261 * (e.g. in case of pptp) is increasing them */ 275 * (e.g. in case of pptp) is increasing them */
262 ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT; 276 ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
263 ct->proto.gre.timeout = GRE_TIMEOUT; 277 ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
264 278
265 return true; 279 return true;
266} 280}
@@ -278,6 +292,52 @@ static void gre_destroy(struct nf_conn *ct)
278 nf_ct_gre_keymap_destroy(master); 292 nf_ct_gre_keymap_destroy(master);
279} 293}
280 294
295#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
296
297#include <linux/netfilter/nfnetlink.h>
298#include <linux/netfilter/nfnetlink_cttimeout.h>
299
300static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
301{
302 unsigned int *timeouts = data;
303
304 /* set default timeouts for GRE. */
305 timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
306 timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
307
308 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
309 timeouts[GRE_CT_UNREPLIED] =
310 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
311 }
312 if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
313 timeouts[GRE_CT_REPLIED] =
314 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
315 }
316 return 0;
317}
318
319static int
320gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
321{
322 const unsigned int *timeouts = data;
323
324 NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
325 htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
326 NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
327 htonl(timeouts[GRE_CT_REPLIED] / HZ));
328 return 0;
329
330nla_put_failure:
331 return -ENOSPC;
332}
333
334static const struct nla_policy
335gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
336 [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
337 [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
338};
339#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
340
281/* protocol helper struct */ 341/* protocol helper struct */
282static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { 342static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
283 .l3proto = AF_INET, 343 .l3proto = AF_INET,
@@ -287,6 +347,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
287 .invert_tuple = gre_invert_tuple, 347 .invert_tuple = gre_invert_tuple,
288 .print_tuple = gre_print_tuple, 348 .print_tuple = gre_print_tuple,
289 .print_conntrack = gre_print_conntrack, 349 .print_conntrack = gre_print_conntrack,
350 .get_timeouts = gre_get_timeouts,
290 .packet = gre_packet, 351 .packet = gre_packet,
291 .new = gre_new, 352 .new = gre_new,
292 .destroy = gre_destroy, 353 .destroy = gre_destroy,
@@ -297,6 +358,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
297 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 358 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
298 .nla_policy = nf_ct_port_nla_policy, 359 .nla_policy = nf_ct_port_nla_policy,
299#endif 360#endif
361#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
362 .ctnl_timeout = {
363 .nlattr_to_obj = gre_timeout_nlattr_to_obj,
364 .obj_to_nlattr = gre_timeout_obj_to_nlattr,
365 .nlattr_max = CTA_TIMEOUT_GRE_MAX,
366 .obj_size = sizeof(unsigned int) * GRE_CT_MAX,
367 .nla_policy = gre_timeout_nla_policy,
368 },
369#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
300}; 370};
301 371
302static int proto_gre_net_init(struct net *net) 372static int proto_gre_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index afa69136061a..72b5088592dc 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -279,13 +279,19 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
279 return sctp_conntracks[dir][i][cur_state]; 279 return sctp_conntracks[dir][i][cur_state];
280} 280}
281 281
282static unsigned int *sctp_get_timeouts(struct net *net)
283{
284 return sctp_timeouts;
285}
286
282/* Returns verdict for packet, or -NF_ACCEPT for invalid. */ 287/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
283static int sctp_packet(struct nf_conn *ct, 288static int sctp_packet(struct nf_conn *ct,
284 const struct sk_buff *skb, 289 const struct sk_buff *skb,
285 unsigned int dataoff, 290 unsigned int dataoff,
286 enum ip_conntrack_info ctinfo, 291 enum ip_conntrack_info ctinfo,
287 u_int8_t pf, 292 u_int8_t pf,
288 unsigned int hooknum) 293 unsigned int hooknum,
294 unsigned int *timeouts)
289{ 295{
290 enum sctp_conntrack new_state, old_state; 296 enum sctp_conntrack new_state, old_state;
291 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 297 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -370,7 +376,7 @@ static int sctp_packet(struct nf_conn *ct,
370 } 376 }
371 spin_unlock_bh(&ct->lock); 377 spin_unlock_bh(&ct->lock);
372 378
373 nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]); 379 nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
374 380
375 if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && 381 if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
376 dir == IP_CT_DIR_REPLY && 382 dir == IP_CT_DIR_REPLY &&
@@ -390,7 +396,7 @@ out:
390 396
391/* Called when a new connection for this protocol found. */ 397/* Called when a new connection for this protocol found. */
392static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, 398static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
393 unsigned int dataoff) 399 unsigned int dataoff, unsigned int *timeouts)
394{ 400{
395 enum sctp_conntrack new_state; 401 enum sctp_conntrack new_state;
396 const struct sctphdr *sh; 402 const struct sctphdr *sh;
@@ -543,6 +549,57 @@ static int sctp_nlattr_size(void)
543} 549}
544#endif 550#endif
545 551
552#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
553
554#include <linux/netfilter/nfnetlink.h>
555#include <linux/netfilter/nfnetlink_cttimeout.h>
556
557static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
558{
559 unsigned int *timeouts = data;
560 int i;
561
562 /* set default SCTP timeouts. */
563 for (i=0; i<SCTP_CONNTRACK_MAX; i++)
564 timeouts[i] = sctp_timeouts[i];
565
566 /* there's a 1:1 mapping between attributes and protocol states. */
567 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
568 if (tb[i]) {
569 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
570 }
571 }
572 return 0;
573}
574
575static int
576sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
577{
578 const unsigned int *timeouts = data;
579 int i;
580
581 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
582 NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
583
584 return 0;
585
586nla_put_failure:
587 return -ENOSPC;
588}
589
590static const struct nla_policy
591sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
592 [CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 },
593 [CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 },
594 [CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 },
595 [CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 },
596 [CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
597 [CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
598 [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
599};
600#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
601
602
546#ifdef CONFIG_SYSCTL 603#ifdef CONFIG_SYSCTL
547static unsigned int sctp_sysctl_table_users; 604static unsigned int sctp_sysctl_table_users;
548static struct ctl_table_header *sctp_sysctl_header; 605static struct ctl_table_header *sctp_sysctl_header;
@@ -664,6 +721,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
664 .print_tuple = sctp_print_tuple, 721 .print_tuple = sctp_print_tuple,
665 .print_conntrack = sctp_print_conntrack, 722 .print_conntrack = sctp_print_conntrack,
666 .packet = sctp_packet, 723 .packet = sctp_packet,
724 .get_timeouts = sctp_get_timeouts,
667 .new = sctp_new, 725 .new = sctp_new,
668 .me = THIS_MODULE, 726 .me = THIS_MODULE,
669#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 727#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -675,6 +733,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
675 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 733 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
676 .nla_policy = nf_ct_port_nla_policy, 734 .nla_policy = nf_ct_port_nla_policy,
677#endif 735#endif
736#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
737 .ctnl_timeout = {
738 .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
739 .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
740 .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
741 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
742 .nla_policy = sctp_timeout_nla_policy,
743 },
744#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
678#ifdef CONFIG_SYSCTL 745#ifdef CONFIG_SYSCTL
679 .ctl_table_users = &sctp_sysctl_table_users, 746 .ctl_table_users = &sctp_sysctl_table_users,
680 .ctl_table_header = &sctp_sysctl_header, 747 .ctl_table_header = &sctp_sysctl_header,
@@ -694,6 +761,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
694 .print_tuple = sctp_print_tuple, 761 .print_tuple = sctp_print_tuple,
695 .print_conntrack = sctp_print_conntrack, 762 .print_conntrack = sctp_print_conntrack,
696 .packet = sctp_packet, 763 .packet = sctp_packet,
764 .get_timeouts = sctp_get_timeouts,
697 .new = sctp_new, 765 .new = sctp_new,
698 .me = THIS_MODULE, 766 .me = THIS_MODULE,
699#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 767#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -704,6 +772,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
704 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 772 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
705 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 773 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
706 .nla_policy = nf_ct_port_nla_policy, 774 .nla_policy = nf_ct_port_nla_policy,
775#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
776 .ctnl_timeout = {
777 .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
778 .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
779 .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
780 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
781 .nla_policy = sctp_timeout_nla_policy,
782 },
783#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
707#endif 784#endif
708#ifdef CONFIG_SYSCTL 785#ifdef CONFIG_SYSCTL
709 .ctl_table_users = &sctp_sysctl_table_users, 786 .ctl_table_users = &sctp_sysctl_table_users,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97b9f3ebf28c..361eade62a09 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -64,13 +64,7 @@ static const char *const tcp_conntrack_names[] = {
64#define HOURS * 60 MINS 64#define HOURS * 60 MINS
65#define DAYS * 24 HOURS 65#define DAYS * 24 HOURS
66 66
67/* RFC1122 says the R2 limit should be at least 100 seconds. 67static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
68 Linux uses 15 packets as limit, which corresponds
69 to ~13-30min depending on RTO. */
70static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
71static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS;
72
73static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
74 [TCP_CONNTRACK_SYN_SENT] = 2 MINS, 68 [TCP_CONNTRACK_SYN_SENT] = 2 MINS,
75 [TCP_CONNTRACK_SYN_RECV] = 60 SECS, 69 [TCP_CONNTRACK_SYN_RECV] = 60 SECS,
76 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS, 70 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
@@ -80,6 +74,11 @@ static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
80 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS, 74 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
81 [TCP_CONNTRACK_CLOSE] = 10 SECS, 75 [TCP_CONNTRACK_CLOSE] = 10 SECS,
82 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS, 76 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
77/* RFC1122 says the R2 limit should be at least 100 seconds.
78 Linux uses 15 packets as limit, which corresponds
79 to ~13-30min depending on RTO. */
80 [TCP_CONNTRACK_RETRANS] = 5 MINS,
81 [TCP_CONNTRACK_UNACK] = 5 MINS,
83}; 82};
84 83
85#define sNO TCP_CONNTRACK_NONE 84#define sNO TCP_CONNTRACK_NONE
@@ -814,13 +813,19 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
814 return NF_ACCEPT; 813 return NF_ACCEPT;
815} 814}
816 815
816static unsigned int *tcp_get_timeouts(struct net *net)
817{
818 return tcp_timeouts;
819}
820
817/* Returns verdict for packet, or -1 for invalid. */ 821/* Returns verdict for packet, or -1 for invalid. */
818static int tcp_packet(struct nf_conn *ct, 822static int tcp_packet(struct nf_conn *ct,
819 const struct sk_buff *skb, 823 const struct sk_buff *skb,
820 unsigned int dataoff, 824 unsigned int dataoff,
821 enum ip_conntrack_info ctinfo, 825 enum ip_conntrack_info ctinfo,
822 u_int8_t pf, 826 u_int8_t pf,
823 unsigned int hooknum) 827 unsigned int hooknum,
828 unsigned int *timeouts)
824{ 829{
825 struct net *net = nf_ct_net(ct); 830 struct net *net = nf_ct_net(ct);
826 struct nf_conntrack_tuple *tuple; 831 struct nf_conntrack_tuple *tuple;
@@ -1015,14 +1020,14 @@ static int tcp_packet(struct nf_conn *ct,
1015 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 1020 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1016 1021
1017 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans && 1022 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
1018 tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans) 1023 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1019 timeout = nf_ct_tcp_timeout_max_retrans; 1024 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1020 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & 1025 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1021 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && 1026 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1022 tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged) 1027 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1023 timeout = nf_ct_tcp_timeout_unacknowledged; 1028 timeout = timeouts[TCP_CONNTRACK_UNACK];
1024 else 1029 else
1025 timeout = tcp_timeouts[new_state]; 1030 timeout = timeouts[new_state];
1026 spin_unlock_bh(&ct->lock); 1031 spin_unlock_bh(&ct->lock);
1027 1032
1028 if (new_state != old_state) 1033 if (new_state != old_state)
@@ -1054,7 +1059,7 @@ static int tcp_packet(struct nf_conn *ct,
1054 1059
1055/* Called when a new connection for this protocol found. */ 1060/* Called when a new connection for this protocol found. */
1056static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, 1061static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1057 unsigned int dataoff) 1062 unsigned int dataoff, unsigned int *timeouts)
1058{ 1063{
1059 enum tcp_conntrack new_state; 1064 enum tcp_conntrack new_state;
1060 const struct tcphdr *th; 1065 const struct tcphdr *th;
@@ -1239,6 +1244,113 @@ static int tcp_nlattr_tuple_size(void)
1239} 1244}
1240#endif 1245#endif
1241 1246
1247#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1248
1249#include <linux/netfilter/nfnetlink.h>
1250#include <linux/netfilter/nfnetlink_cttimeout.h>
1251
1252static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
1253{
1254 unsigned int *timeouts = data;
1255 int i;
1256
1257 /* set default TCP timeouts. */
1258 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1259 timeouts[i] = tcp_timeouts[i];
1260
1261 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1262 timeouts[TCP_CONNTRACK_SYN_SENT] =
1263 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1264 }
1265 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1266 timeouts[TCP_CONNTRACK_SYN_RECV] =
1267 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1268 }
1269 if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1270 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1271 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1272 }
1273 if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1274 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1275 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1276 }
1277 if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1278 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1279 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1280 }
1281 if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1282 timeouts[TCP_CONNTRACK_LAST_ACK] =
1283 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1284 }
1285 if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1286 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1287 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1288 }
1289 if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1290 timeouts[TCP_CONNTRACK_CLOSE] =
1291 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1292 }
1293 if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1294 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1295 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1296 }
1297 if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1298 timeouts[TCP_CONNTRACK_RETRANS] =
1299 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1300 }
1301 if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1302 timeouts[TCP_CONNTRACK_UNACK] =
1303 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1304 }
1305 return 0;
1306}
1307
1308static int
1309tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1310{
1311 const unsigned int *timeouts = data;
1312
1313 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1314 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
1315 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1316 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
1317 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1318 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
1319 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1320 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
1321 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1322 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
1323 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1324 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
1325 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1326 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
1327 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
1328 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
1329 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1330 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
1331 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
1332 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
1333 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
1334 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
1335 return 0;
1336
1337nla_put_failure:
1338 return -ENOSPC;
1339}
1340
1341static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1342 [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
1343 [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
1344 [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
1345 [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
1346 [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
1347 [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
1348 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
1349 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
1350 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
1351};
1352#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1353
1242#ifdef CONFIG_SYSCTL 1354#ifdef CONFIG_SYSCTL
1243static unsigned int tcp_sysctl_table_users; 1355static unsigned int tcp_sysctl_table_users;
1244static struct ctl_table_header *tcp_sysctl_header; 1356static struct ctl_table_header *tcp_sysctl_header;
@@ -1301,14 +1413,14 @@ static struct ctl_table tcp_sysctl_table[] = {
1301 }, 1413 },
1302 { 1414 {
1303 .procname = "nf_conntrack_tcp_timeout_max_retrans", 1415 .procname = "nf_conntrack_tcp_timeout_max_retrans",
1304 .data = &nf_ct_tcp_timeout_max_retrans, 1416 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1305 .maxlen = sizeof(unsigned int), 1417 .maxlen = sizeof(unsigned int),
1306 .mode = 0644, 1418 .mode = 0644,
1307 .proc_handler = proc_dointvec_jiffies, 1419 .proc_handler = proc_dointvec_jiffies,
1308 }, 1420 },
1309 { 1421 {
1310 .procname = "nf_conntrack_tcp_timeout_unacknowledged", 1422 .procname = "nf_conntrack_tcp_timeout_unacknowledged",
1311 .data = &nf_ct_tcp_timeout_unacknowledged, 1423 .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
1312 .maxlen = sizeof(unsigned int), 1424 .maxlen = sizeof(unsigned int),
1313 .mode = 0644, 1425 .mode = 0644,
1314 .proc_handler = proc_dointvec_jiffies, 1426 .proc_handler = proc_dointvec_jiffies,
@@ -1404,7 +1516,7 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
1404 }, 1516 },
1405 { 1517 {
1406 .procname = "ip_conntrack_tcp_timeout_max_retrans", 1518 .procname = "ip_conntrack_tcp_timeout_max_retrans",
1407 .data = &nf_ct_tcp_timeout_max_retrans, 1519 .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
1408 .maxlen = sizeof(unsigned int), 1520 .maxlen = sizeof(unsigned int),
1409 .mode = 0644, 1521 .mode = 0644,
1410 .proc_handler = proc_dointvec_jiffies, 1522 .proc_handler = proc_dointvec_jiffies,
@@ -1445,6 +1557,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1445 .print_tuple = tcp_print_tuple, 1557 .print_tuple = tcp_print_tuple,
1446 .print_conntrack = tcp_print_conntrack, 1558 .print_conntrack = tcp_print_conntrack,
1447 .packet = tcp_packet, 1559 .packet = tcp_packet,
1560 .get_timeouts = tcp_get_timeouts,
1448 .new = tcp_new, 1561 .new = tcp_new,
1449 .error = tcp_error, 1562 .error = tcp_error,
1450#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 1563#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1456,6 +1569,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1456 .nlattr_tuple_size = tcp_nlattr_tuple_size, 1569 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1457 .nla_policy = nf_ct_port_nla_policy, 1570 .nla_policy = nf_ct_port_nla_policy,
1458#endif 1571#endif
1572#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1573 .ctnl_timeout = {
1574 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1575 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1576 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1577 .obj_size = sizeof(unsigned int) *
1578 TCP_CONNTRACK_TIMEOUT_MAX,
1579 .nla_policy = tcp_timeout_nla_policy,
1580 },
1581#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1459#ifdef CONFIG_SYSCTL 1582#ifdef CONFIG_SYSCTL
1460 .ctl_table_users = &tcp_sysctl_table_users, 1583 .ctl_table_users = &tcp_sysctl_table_users,
1461 .ctl_table_header = &tcp_sysctl_header, 1584 .ctl_table_header = &tcp_sysctl_header,
@@ -1477,6 +1600,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1477 .print_tuple = tcp_print_tuple, 1600 .print_tuple = tcp_print_tuple,
1478 .print_conntrack = tcp_print_conntrack, 1601 .print_conntrack = tcp_print_conntrack,
1479 .packet = tcp_packet, 1602 .packet = tcp_packet,
1603 .get_timeouts = tcp_get_timeouts,
1480 .new = tcp_new, 1604 .new = tcp_new,
1481 .error = tcp_error, 1605 .error = tcp_error,
1482#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 1606#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1488,6 +1612,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1488 .nlattr_tuple_size = tcp_nlattr_tuple_size, 1612 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1489 .nla_policy = nf_ct_port_nla_policy, 1613 .nla_policy = nf_ct_port_nla_policy,
1490#endif 1614#endif
1615#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1616 .ctnl_timeout = {
1617 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1618 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1619 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1620 .obj_size = sizeof(unsigned int) *
1621 TCP_CONNTRACK_TIMEOUT_MAX,
1622 .nla_policy = tcp_timeout_nla_policy,
1623 },
1624#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1491#ifdef CONFIG_SYSCTL 1625#ifdef CONFIG_SYSCTL
1492 .ctl_table_users = &tcp_sysctl_table_users, 1626 .ctl_table_users = &tcp_sysctl_table_users,
1493 .ctl_table_header = &tcp_sysctl_header, 1627 .ctl_table_header = &tcp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5f35757fbff0..a9073dc1548d 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,8 +25,16 @@
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
27 27
28static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; 28enum udp_conntrack {
29static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; 29 UDP_CT_UNREPLIED,
30 UDP_CT_REPLIED,
31 UDP_CT_MAX
32};
33
34static unsigned int udp_timeouts[UDP_CT_MAX] = {
35 [UDP_CT_UNREPLIED] = 30*HZ,
36 [UDP_CT_REPLIED] = 180*HZ,
37};
30 38
31static bool udp_pkt_to_tuple(const struct sk_buff *skb, 39static bool udp_pkt_to_tuple(const struct sk_buff *skb,
32 unsigned int dataoff, 40 unsigned int dataoff,
@@ -63,30 +71,38 @@ static int udp_print_tuple(struct seq_file *s,
63 ntohs(tuple->dst.u.udp.port)); 71 ntohs(tuple->dst.u.udp.port));
64} 72}
65 73
74static unsigned int *udp_get_timeouts(struct net *net)
75{
76 return udp_timeouts;
77}
78
66/* Returns verdict for packet, and may modify conntracktype */ 79/* Returns verdict for packet, and may modify conntracktype */
67static int udp_packet(struct nf_conn *ct, 80static int udp_packet(struct nf_conn *ct,
68 const struct sk_buff *skb, 81 const struct sk_buff *skb,
69 unsigned int dataoff, 82 unsigned int dataoff,
70 enum ip_conntrack_info ctinfo, 83 enum ip_conntrack_info ctinfo,
71 u_int8_t pf, 84 u_int8_t pf,
72 unsigned int hooknum) 85 unsigned int hooknum,
86 unsigned int *timeouts)
73{ 87{
74 /* If we've seen traffic both ways, this is some kind of UDP 88 /* If we've seen traffic both ways, this is some kind of UDP
75 stream. Extend timeout. */ 89 stream. Extend timeout. */
76 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 90 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); 91 nf_ct_refresh_acct(ct, ctinfo, skb,
92 timeouts[UDP_CT_REPLIED]);
78 /* Also, more likely to be important, and not a probe */ 93 /* Also, more likely to be important, and not a probe */
79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 94 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
80 nf_conntrack_event_cache(IPCT_ASSURED, ct); 95 nf_conntrack_event_cache(IPCT_ASSURED, ct);
81 } else 96 } else {
82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); 97 nf_ct_refresh_acct(ct, ctinfo, skb,
83 98 timeouts[UDP_CT_UNREPLIED]);
99 }
84 return NF_ACCEPT; 100 return NF_ACCEPT;
85} 101}
86 102
87/* Called when a new connection for this protocol found. */ 103/* Called when a new connection for this protocol found. */
88static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, 104static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
89 unsigned int dataoff) 105 unsigned int dataoff, unsigned int *timeouts)
90{ 106{
91 return true; 107 return true;
92} 108}
@@ -136,20 +152,66 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
136 return NF_ACCEPT; 152 return NF_ACCEPT;
137} 153}
138 154
155#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
156
157#include <linux/netfilter/nfnetlink.h>
158#include <linux/netfilter/nfnetlink_cttimeout.h>
159
160static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
161{
162 unsigned int *timeouts = data;
163
164 /* set default timeouts for UDP. */
165 timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
166 timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
167
168 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
169 timeouts[UDP_CT_UNREPLIED] =
170 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
171 }
172 if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
173 timeouts[UDP_CT_REPLIED] =
174 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
175 }
176 return 0;
177}
178
179static int
180udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
181{
182 const unsigned int *timeouts = data;
183
184 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
185 htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
186 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
187 htonl(timeouts[UDP_CT_REPLIED] / HZ));
188 return 0;
189
190nla_put_failure:
191 return -ENOSPC;
192}
193
194static const struct nla_policy
195udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
196 [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
197 [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
198};
199#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
200
139#ifdef CONFIG_SYSCTL 201#ifdef CONFIG_SYSCTL
140static unsigned int udp_sysctl_table_users; 202static unsigned int udp_sysctl_table_users;
141static struct ctl_table_header *udp_sysctl_header; 203static struct ctl_table_header *udp_sysctl_header;
142static struct ctl_table udp_sysctl_table[] = { 204static struct ctl_table udp_sysctl_table[] = {
143 { 205 {
144 .procname = "nf_conntrack_udp_timeout", 206 .procname = "nf_conntrack_udp_timeout",
145 .data = &nf_ct_udp_timeout, 207 .data = &udp_timeouts[UDP_CT_UNREPLIED],
146 .maxlen = sizeof(unsigned int), 208 .maxlen = sizeof(unsigned int),
147 .mode = 0644, 209 .mode = 0644,
148 .proc_handler = proc_dointvec_jiffies, 210 .proc_handler = proc_dointvec_jiffies,
149 }, 211 },
150 { 212 {
151 .procname = "nf_conntrack_udp_timeout_stream", 213 .procname = "nf_conntrack_udp_timeout_stream",
152 .data = &nf_ct_udp_timeout_stream, 214 .data = &udp_timeouts[UDP_CT_REPLIED],
153 .maxlen = sizeof(unsigned int), 215 .maxlen = sizeof(unsigned int),
154 .mode = 0644, 216 .mode = 0644,
155 .proc_handler = proc_dointvec_jiffies, 217 .proc_handler = proc_dointvec_jiffies,
@@ -160,14 +222,14 @@ static struct ctl_table udp_sysctl_table[] = {
160static struct ctl_table udp_compat_sysctl_table[] = { 222static struct ctl_table udp_compat_sysctl_table[] = {
161 { 223 {
162 .procname = "ip_conntrack_udp_timeout", 224 .procname = "ip_conntrack_udp_timeout",
163 .data = &nf_ct_udp_timeout, 225 .data = &udp_timeouts[UDP_CT_UNREPLIED],
164 .maxlen = sizeof(unsigned int), 226 .maxlen = sizeof(unsigned int),
165 .mode = 0644, 227 .mode = 0644,
166 .proc_handler = proc_dointvec_jiffies, 228 .proc_handler = proc_dointvec_jiffies,
167 }, 229 },
168 { 230 {
169 .procname = "ip_conntrack_udp_timeout_stream", 231 .procname = "ip_conntrack_udp_timeout_stream",
170 .data = &nf_ct_udp_timeout_stream, 232 .data = &udp_timeouts[UDP_CT_REPLIED],
171 .maxlen = sizeof(unsigned int), 233 .maxlen = sizeof(unsigned int),
172 .mode = 0644, 234 .mode = 0644,
173 .proc_handler = proc_dointvec_jiffies, 235 .proc_handler = proc_dointvec_jiffies,
@@ -186,6 +248,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
186 .invert_tuple = udp_invert_tuple, 248 .invert_tuple = udp_invert_tuple,
187 .print_tuple = udp_print_tuple, 249 .print_tuple = udp_print_tuple,
188 .packet = udp_packet, 250 .packet = udp_packet,
251 .get_timeouts = udp_get_timeouts,
189 .new = udp_new, 252 .new = udp_new,
190 .error = udp_error, 253 .error = udp_error,
191#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 254#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -194,6 +257,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
194 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 257 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
195 .nla_policy = nf_ct_port_nla_policy, 258 .nla_policy = nf_ct_port_nla_policy,
196#endif 259#endif
260#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
261 .ctnl_timeout = {
262 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
263 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
264 .nlattr_max = CTA_TIMEOUT_UDP_MAX,
265 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
266 .nla_policy = udp_timeout_nla_policy,
267 },
268#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
197#ifdef CONFIG_SYSCTL 269#ifdef CONFIG_SYSCTL
198 .ctl_table_users = &udp_sysctl_table_users, 270 .ctl_table_users = &udp_sysctl_table_users,
199 .ctl_table_header = &udp_sysctl_header, 271 .ctl_table_header = &udp_sysctl_header,
@@ -214,6 +286,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
214 .invert_tuple = udp_invert_tuple, 286 .invert_tuple = udp_invert_tuple,
215 .print_tuple = udp_print_tuple, 287 .print_tuple = udp_print_tuple,
216 .packet = udp_packet, 288 .packet = udp_packet,
289 .get_timeouts = udp_get_timeouts,
217 .new = udp_new, 290 .new = udp_new,
218 .error = udp_error, 291 .error = udp_error,
219#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 292#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -222,6 +295,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
222 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 295 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
223 .nla_policy = nf_ct_port_nla_policy, 296 .nla_policy = nf_ct_port_nla_policy,
224#endif 297#endif
298#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
299 .ctnl_timeout = {
300 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
301 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
302 .nlattr_max = CTA_TIMEOUT_UDP_MAX,
303 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
304 .nla_policy = udp_timeout_nla_policy,
305 },
306#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
225#ifdef CONFIG_SYSCTL 307#ifdef CONFIG_SYSCTL
226 .ctl_table_users = &udp_sysctl_table_users, 308 .ctl_table_users = &udp_sysctl_table_users,
227 .ctl_table_header = &udp_sysctl_header, 309 .ctl_table_header = &udp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index f52ca1181013..e0606392cda0 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -24,8 +24,16 @@
24#include <net/netfilter/nf_conntrack_ecache.h> 24#include <net/netfilter/nf_conntrack_ecache.h>
25#include <net/netfilter/nf_log.h> 25#include <net/netfilter/nf_log.h>
26 26
27static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ; 27enum udplite_conntrack {
28static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ; 28 UDPLITE_CT_UNREPLIED,
29 UDPLITE_CT_REPLIED,
30 UDPLITE_CT_MAX
31};
32
33static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
34 [UDPLITE_CT_UNREPLIED] = 30*HZ,
35 [UDPLITE_CT_REPLIED] = 180*HZ,
36};
29 37
30static bool udplite_pkt_to_tuple(const struct sk_buff *skb, 38static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
31 unsigned int dataoff, 39 unsigned int dataoff,
@@ -60,31 +68,38 @@ static int udplite_print_tuple(struct seq_file *s,
60 ntohs(tuple->dst.u.udp.port)); 68 ntohs(tuple->dst.u.udp.port));
61} 69}
62 70
71static unsigned int *udplite_get_timeouts(struct net *net)
72{
73 return udplite_timeouts;
74}
75
63/* Returns verdict for packet, and may modify conntracktype */ 76/* Returns verdict for packet, and may modify conntracktype */
64static int udplite_packet(struct nf_conn *ct, 77static int udplite_packet(struct nf_conn *ct,
65 const struct sk_buff *skb, 78 const struct sk_buff *skb,
66 unsigned int dataoff, 79 unsigned int dataoff,
67 enum ip_conntrack_info ctinfo, 80 enum ip_conntrack_info ctinfo,
68 u_int8_t pf, 81 u_int8_t pf,
69 unsigned int hooknum) 82 unsigned int hooknum,
83 unsigned int *timeouts)
70{ 84{
71 /* If we've seen traffic both ways, this is some kind of UDP 85 /* If we've seen traffic both ways, this is some kind of UDP
72 stream. Extend timeout. */ 86 stream. Extend timeout. */
73 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 87 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
74 nf_ct_refresh_acct(ct, ctinfo, skb, 88 nf_ct_refresh_acct(ct, ctinfo, skb,
75 nf_ct_udplite_timeout_stream); 89 timeouts[UDPLITE_CT_REPLIED]);
76 /* Also, more likely to be important, and not a probe */ 90 /* Also, more likely to be important, and not a probe */
77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 91 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
78 nf_conntrack_event_cache(IPCT_ASSURED, ct); 92 nf_conntrack_event_cache(IPCT_ASSURED, ct);
79 } else 93 } else {
80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout); 94 nf_ct_refresh_acct(ct, ctinfo, skb,
81 95 timeouts[UDPLITE_CT_UNREPLIED]);
96 }
82 return NF_ACCEPT; 97 return NF_ACCEPT;
83} 98}
84 99
85/* Called when a new connection for this protocol found. */ 100/* Called when a new connection for this protocol found. */
86static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, 101static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
87 unsigned int dataoff) 102 unsigned int dataoff, unsigned int *timeouts)
88{ 103{
89 return true; 104 return true;
90} 105}
@@ -141,20 +156,66 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
141 return NF_ACCEPT; 156 return NF_ACCEPT;
142} 157}
143 158
159#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
160
161#include <linux/netfilter/nfnetlink.h>
162#include <linux/netfilter/nfnetlink_cttimeout.h>
163
164static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
165{
166 unsigned int *timeouts = data;
167
168 /* set default timeouts for UDPlite. */
169 timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
170 timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
171
172 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
173 timeouts[UDPLITE_CT_UNREPLIED] =
174 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
175 }
176 if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
177 timeouts[UDPLITE_CT_REPLIED] =
178 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
179 }
180 return 0;
181}
182
183static int
184udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
185{
186 const unsigned int *timeouts = data;
187
188 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
189 htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
190 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
191 htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
192 return 0;
193
194nla_put_failure:
195 return -ENOSPC;
196}
197
198static const struct nla_policy
199udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
200 [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
201 [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
202};
203#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
204
144#ifdef CONFIG_SYSCTL 205#ifdef CONFIG_SYSCTL
145static unsigned int udplite_sysctl_table_users; 206static unsigned int udplite_sysctl_table_users;
146static struct ctl_table_header *udplite_sysctl_header; 207static struct ctl_table_header *udplite_sysctl_header;
147static struct ctl_table udplite_sysctl_table[] = { 208static struct ctl_table udplite_sysctl_table[] = {
148 { 209 {
149 .procname = "nf_conntrack_udplite_timeout", 210 .procname = "nf_conntrack_udplite_timeout",
150 .data = &nf_ct_udplite_timeout, 211 .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
151 .maxlen = sizeof(unsigned int), 212 .maxlen = sizeof(unsigned int),
152 .mode = 0644, 213 .mode = 0644,
153 .proc_handler = proc_dointvec_jiffies, 214 .proc_handler = proc_dointvec_jiffies,
154 }, 215 },
155 { 216 {
156 .procname = "nf_conntrack_udplite_timeout_stream", 217 .procname = "nf_conntrack_udplite_timeout_stream",
157 .data = &nf_ct_udplite_timeout_stream, 218 .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
158 .maxlen = sizeof(unsigned int), 219 .maxlen = sizeof(unsigned int),
159 .mode = 0644, 220 .mode = 0644,
160 .proc_handler = proc_dointvec_jiffies, 221 .proc_handler = proc_dointvec_jiffies,
@@ -172,6 +233,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
172 .invert_tuple = udplite_invert_tuple, 233 .invert_tuple = udplite_invert_tuple,
173 .print_tuple = udplite_print_tuple, 234 .print_tuple = udplite_print_tuple,
174 .packet = udplite_packet, 235 .packet = udplite_packet,
236 .get_timeouts = udplite_get_timeouts,
175 .new = udplite_new, 237 .new = udplite_new,
176 .error = udplite_error, 238 .error = udplite_error,
177#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 239#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -180,6 +242,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
180 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 242 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
181 .nla_policy = nf_ct_port_nla_policy, 243 .nla_policy = nf_ct_port_nla_policy,
182#endif 244#endif
245#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
246 .ctnl_timeout = {
247 .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
248 .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
249 .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
250 .obj_size = sizeof(unsigned int) *
251 CTA_TIMEOUT_UDPLITE_MAX,
252 .nla_policy = udplite_timeout_nla_policy,
253 },
254#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
183#ifdef CONFIG_SYSCTL 255#ifdef CONFIG_SYSCTL
184 .ctl_table_users = &udplite_sysctl_table_users, 256 .ctl_table_users = &udplite_sysctl_table_users,
185 .ctl_table_header = &udplite_sysctl_header, 257 .ctl_table_header = &udplite_sysctl_header,
@@ -196,6 +268,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
196 .invert_tuple = udplite_invert_tuple, 268 .invert_tuple = udplite_invert_tuple,
197 .print_tuple = udplite_print_tuple, 269 .print_tuple = udplite_print_tuple,
198 .packet = udplite_packet, 270 .packet = udplite_packet,
271 .get_timeouts = udplite_get_timeouts,
199 .new = udplite_new, 272 .new = udplite_new,
200 .error = udplite_error, 273 .error = udplite_error,
201#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 274#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -204,6 +277,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
204 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 277 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
205 .nla_policy = nf_ct_port_nla_policy, 278 .nla_policy = nf_ct_port_nla_policy,
206#endif 279#endif
280#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
281 .ctnl_timeout = {
282 .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
283 .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
284 .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
285 .obj_size = sizeof(unsigned int) *
286 CTA_TIMEOUT_UDPLITE_MAX,
287 .nla_policy = udplite_timeout_nla_policy,
288 },
289#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
207#ifdef CONFIG_SYSCTL 290#ifdef CONFIG_SYSCTL
208 .ctl_table_users = &udplite_sysctl_table_users, 291 .ctl_table_users = &udplite_sysctl_table_users,
209 .ctl_table_header = &udplite_sysctl_header, 292 .ctl_table_header = &udplite_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
new file mode 100644
index 000000000000..a878ce5b252c
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -0,0 +1,60 @@
1/*
2 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
3 * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation (or any later at your option).
8 */
9
10#include <linux/types.h>
11#include <linux/netfilter.h>
12#include <linux/skbuff.h>
13#include <linux/vmalloc.h>
14#include <linux/stddef.h>
15#include <linux/err.h>
16#include <linux/percpu.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/slab.h>
20#include <linux/export.h>
21
22#include <net/netfilter/nf_conntrack.h>
23#include <net/netfilter/nf_conntrack_core.h>
24#include <net/netfilter/nf_conntrack_extend.h>
25#include <net/netfilter/nf_conntrack_timeout.h>
26
27struct ctnl_timeout *
28(*nf_ct_timeout_find_get_hook)(const char *name) __read_mostly;
29EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook);
30
31void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook);
33
34static struct nf_ct_ext_type timeout_extend __read_mostly = {
35 .len = sizeof(struct nf_conn_timeout),
36 .align = __alignof__(struct nf_conn_timeout),
37 .id = NF_CT_EXT_TIMEOUT,
38};
39
40int nf_conntrack_timeout_init(struct net *net)
41{
42 int ret = 0;
43
44 if (net_eq(net, &init_net)) {
45 ret = nf_ct_extend_register(&timeout_extend);
46 if (ret < 0) {
47 printk(KERN_ERR "nf_ct_timeout: Unable to register "
48 "timeout extension.\n");
49 return ret;
50 }
51 }
52
53 return 0;
54}
55
56void nf_conntrack_timeout_fini(struct net *net)
57{
58 if (net_eq(net, &init_net))
59 nf_ct_extend_unregister(&timeout_extend);
60}
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 11ba013e47f6..3eb348bfc4fb 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -171,8 +171,10 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
171 char *acct_name; 171 char *acct_name;
172 172
173 if (nlh->nlmsg_flags & NLM_F_DUMP) { 173 if (nlh->nlmsg_flags & NLM_F_DUMP) {
174 return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump, 174 struct netlink_dump_control c = {
175 NULL, 0); 175 .dump = nfnl_acct_dump,
176 };
177 return netlink_dump_start(nfnl, skb, nlh, &c);
176 } 178 }
177 179
178 if (!tb[NFACCT_NAME]) 180 if (!tb[NFACCT_NAME])
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
new file mode 100644
index 000000000000..fec29a43de4d
--- /dev/null
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -0,0 +1,429 @@
1/*
2 * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
3 * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation (or any later at your option).
8 */
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/rculist.h>
13#include <linux/rculist_nulls.h>
14#include <linux/types.h>
15#include <linux/timer.h>
16#include <linux/security.h>
17#include <linux/skbuff.h>
18#include <linux/errno.h>
19#include <linux/netlink.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
22#include <linux/slab.h>
23
24#include <linux/netfilter.h>
25#include <net/netlink.h>
26#include <net/sock.h>
27#include <net/netfilter/nf_conntrack.h>
28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_l3proto.h>
30#include <net/netfilter/nf_conntrack_l4proto.h>
31#include <net/netfilter/nf_conntrack_tuple.h>
32#include <net/netfilter/nf_conntrack_timeout.h>
33
34#include <linux/netfilter/nfnetlink.h>
35#include <linux/netfilter/nfnetlink_cttimeout.h>
36
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
39MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tuning");
40
41static LIST_HEAD(cttimeout_list);
42
43static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
44 [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
45 [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
46 [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
47 [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
48};
49
50static int
51ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
52 struct nf_conntrack_l4proto *l4proto,
53 const struct nlattr *attr)
54{
55 int ret = 0;
56
57 if (likely(l4proto->ctnl_timeout.nlattr_to_obj)) {
58 struct nlattr *tb[l4proto->ctnl_timeout.nlattr_max+1];
59
60 nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
61 attr, l4proto->ctnl_timeout.nla_policy);
62
63 ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
64 }
65 return ret;
66}
67
68static int
69cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
70 const struct nlmsghdr *nlh,
71 const struct nlattr * const cda[])
72{
73 __u16 l3num;
74 __u8 l4num;
75 struct nf_conntrack_l4proto *l4proto;
76 struct ctnl_timeout *timeout, *matching = NULL;
77 char *name;
78 int ret;
79
80 if (!cda[CTA_TIMEOUT_NAME] ||
81 !cda[CTA_TIMEOUT_L3PROTO] ||
82 !cda[CTA_TIMEOUT_L4PROTO] ||
83 !cda[CTA_TIMEOUT_DATA])
84 return -EINVAL;
85
86 name = nla_data(cda[CTA_TIMEOUT_NAME]);
87 l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
88 l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
89
90 list_for_each_entry(timeout, &cttimeout_list, head) {
91 if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
92 continue;
93
94 if (nlh->nlmsg_flags & NLM_F_EXCL)
95 return -EEXIST;
96
97 matching = timeout;
98 break;
99 }
100
101 l4proto = __nf_ct_l4proto_find(l3num, l4num);
102
103 /* This protocol is not supportted, skip. */
104 if (l4proto->l4proto != l4num)
105 return -EOPNOTSUPP;
106
107 if (matching) {
108 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
109 /* You cannot replace one timeout policy by another of
110 * different kind, sorry.
111 */
112 if (matching->l3num != l3num ||
113 matching->l4num != l4num)
114 return -EINVAL;
115
116 ret = ctnl_timeout_parse_policy(matching, l4proto,
117 cda[CTA_TIMEOUT_DATA]);
118 return ret;
119 }
120 return -EBUSY;
121 }
122
123 timeout = kzalloc(sizeof(struct ctnl_timeout) +
124 l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
125 if (timeout == NULL)
126 return -ENOMEM;
127
128 ret = ctnl_timeout_parse_policy(timeout, l4proto,
129 cda[CTA_TIMEOUT_DATA]);
130 if (ret < 0)
131 goto err;
132
133 strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
134 timeout->l3num = l3num;
135 timeout->l4num = l4num;
136 atomic_set(&timeout->refcnt, 1);
137 list_add_tail_rcu(&timeout->head, &cttimeout_list);
138
139 return 0;
140err:
141 kfree(timeout);
142 return ret;
143}
144
145static int
146ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
147 int event, struct ctnl_timeout *timeout)
148{
149 struct nlmsghdr *nlh;
150 struct nfgenmsg *nfmsg;
151 unsigned int flags = pid ? NLM_F_MULTI : 0;
152 struct nf_conntrack_l4proto *l4proto;
153
154 event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
155 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
156 if (nlh == NULL)
157 goto nlmsg_failure;
158
159 nfmsg = nlmsg_data(nlh);
160 nfmsg->nfgen_family = AF_UNSPEC;
161 nfmsg->version = NFNETLINK_V0;
162 nfmsg->res_id = 0;
163
164 NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
165 NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
166 NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4num);
167 NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
168 htonl(atomic_read(&timeout->refcnt)));
169
170 l4proto = __nf_ct_l4proto_find(timeout->l3num, timeout->l4num);
171
172 /* If the timeout object does not match the layer 4 protocol tracker,
173 * then skip dumping the data part since we don't know how to
174 * interpret it. This may happen for UPDlite, SCTP and DCCP since
175 * you can unload the module.
176 */
177 if (timeout->l4num != l4proto->l4proto)
178 goto out;
179
180 if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
181 struct nlattr *nest_parms;
182 int ret;
183
184 nest_parms = nla_nest_start(skb,
185 CTA_TIMEOUT_DATA | NLA_F_NESTED);
186 if (!nest_parms)
187 goto nla_put_failure;
188
189 ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
190 if (ret < 0)
191 goto nla_put_failure;
192
193 nla_nest_end(skb, nest_parms);
194 }
195out:
196 nlmsg_end(skb, nlh);
197 return skb->len;
198
199nlmsg_failure:
200nla_put_failure:
201 nlmsg_cancel(skb, nlh);
202 return -1;
203}
204
205static int
206ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
207{
208 struct ctnl_timeout *cur, *last;
209
210 if (cb->args[2])
211 return 0;
212
213 last = (struct ctnl_timeout *)cb->args[1];
214 if (cb->args[1])
215 cb->args[1] = 0;
216
217 rcu_read_lock();
218 list_for_each_entry_rcu(cur, &cttimeout_list, head) {
219 if (last && cur != last)
220 continue;
221
222 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
223 cb->nlh->nlmsg_seq,
224 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
225 IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
226 cb->args[1] = (unsigned long)cur;
227 break;
228 }
229 }
230 if (!cb->args[1])
231 cb->args[2] = 1;
232 rcu_read_unlock();
233 return skb->len;
234}
235
236static int
237cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
238 const struct nlmsghdr *nlh,
239 const struct nlattr * const cda[])
240{
241 int ret = -ENOENT;
242 char *name;
243 struct ctnl_timeout *cur;
244
245 if (nlh->nlmsg_flags & NLM_F_DUMP) {
246 struct netlink_dump_control c = {
247 .dump = ctnl_timeout_dump,
248 };
249 return netlink_dump_start(ctnl, skb, nlh, &c);
250 }
251
252 if (!cda[CTA_TIMEOUT_NAME])
253 return -EINVAL;
254 name = nla_data(cda[CTA_TIMEOUT_NAME]);
255
256 list_for_each_entry(cur, &cttimeout_list, head) {
257 struct sk_buff *skb2;
258
259 if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
260 continue;
261
262 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
263 if (skb2 == NULL) {
264 ret = -ENOMEM;
265 break;
266 }
267
268 ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
269 nlh->nlmsg_seq,
270 NFNL_MSG_TYPE(nlh->nlmsg_type),
271 IPCTNL_MSG_TIMEOUT_NEW, cur);
272 if (ret <= 0) {
273 kfree_skb(skb2);
274 break;
275 }
276 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
277 MSG_DONTWAIT);
278 if (ret > 0)
279 ret = 0;
280
281 /* this avoids a loop in nfnetlink. */
282 return ret == -EAGAIN ? -ENOBUFS : ret;
283 }
284 return ret;
285}
286
287/* try to delete object, fail if it is still in use. */
288static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
289{
290 int ret = 0;
291
292 /* we want to avoid races with nf_ct_timeout_find_get. */
293 if (atomic_dec_and_test(&timeout->refcnt)) {
294 /* We are protected by nfnl mutex. */
295 list_del_rcu(&timeout->head);
296 kfree_rcu(timeout, rcu_head);
297 } else {
298 /* still in use, restore reference counter. */
299 atomic_inc(&timeout->refcnt);
300 ret = -EBUSY;
301 }
302 return ret;
303}
304
305static int
306cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
307 const struct nlmsghdr *nlh,
308 const struct nlattr * const cda[])
309{
310 char *name;
311 struct ctnl_timeout *cur;
312 int ret = -ENOENT;
313
314 if (!cda[CTA_TIMEOUT_NAME]) {
315 list_for_each_entry(cur, &cttimeout_list, head)
316 ctnl_timeout_try_del(cur);
317
318 return 0;
319 }
320 name = nla_data(cda[CTA_TIMEOUT_NAME]);
321
322 list_for_each_entry(cur, &cttimeout_list, head) {
323 if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
324 continue;
325
326 ret = ctnl_timeout_try_del(cur);
327 if (ret < 0)
328 return ret;
329
330 break;
331 }
332 return ret;
333}
334
335#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
336static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
337{
338 struct ctnl_timeout *timeout, *matching = NULL;
339
340 rcu_read_lock();
341 list_for_each_entry_rcu(timeout, &cttimeout_list, head) {
342 if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
343 continue;
344
345 if (!try_module_get(THIS_MODULE))
346 goto err;
347
348 if (!atomic_inc_not_zero(&timeout->refcnt)) {
349 module_put(THIS_MODULE);
350 goto err;
351 }
352 matching = timeout;
353 break;
354 }
355err:
356 rcu_read_unlock();
357 return matching;
358}
359
360static void ctnl_timeout_put(struct ctnl_timeout *timeout)
361{
362 atomic_dec(&timeout->refcnt);
363 module_put(THIS_MODULE);
364}
365#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
366
367static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
368 [IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout,
369 .attr_count = CTA_TIMEOUT_MAX,
370 .policy = cttimeout_nla_policy },
371 [IPCTNL_MSG_TIMEOUT_GET] = { .call = cttimeout_get_timeout,
372 .attr_count = CTA_TIMEOUT_MAX,
373 .policy = cttimeout_nla_policy },
374 [IPCTNL_MSG_TIMEOUT_DELETE] = { .call = cttimeout_del_timeout,
375 .attr_count = CTA_TIMEOUT_MAX,
376 .policy = cttimeout_nla_policy },
377};
378
379static const struct nfnetlink_subsystem cttimeout_subsys = {
380 .name = "conntrack_timeout",
381 .subsys_id = NFNL_SUBSYS_CTNETLINK_TIMEOUT,
382 .cb_count = IPCTNL_MSG_TIMEOUT_MAX,
383 .cb = cttimeout_cb,
384};
385
386MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_TIMEOUT);
387
388static int __init cttimeout_init(void)
389{
390 int ret;
391
392 ret = nfnetlink_subsys_register(&cttimeout_subsys);
393 if (ret < 0) {
394 pr_err("cttimeout_init: cannot register cttimeout with "
395 "nfnetlink.\n");
396 goto err_out;
397 }
398#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
399 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get);
400 RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put);
401#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
402 return 0;
403
404err_out:
405 return ret;
406}
407
408static void __exit cttimeout_exit(void)
409{
410 struct ctnl_timeout *cur, *tmp;
411
412 pr_info("cttimeout: unregistering from nfnetlink.\n");
413
414 nfnetlink_subsys_unregister(&cttimeout_subsys);
415 list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
416 list_del_rcu(&cur->head);
417 /* We are sure that our objects have no clients at this point,
418 * it's safe to release them all without checking refcnt.
419 */
420 kfree_rcu(cur, rcu_head);
421 }
422#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
423 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
424 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
425#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
426}
427
428module_init(cttimeout_init);
429module_exit(cttimeout_exit);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0221d10de75a..b873445df444 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -16,10 +16,11 @@
16#include <net/netfilter/nf_conntrack.h> 16#include <net/netfilter/nf_conntrack.h>
17#include <net/netfilter/nf_conntrack_helper.h> 17#include <net/netfilter/nf_conntrack_helper.h>
18#include <net/netfilter/nf_conntrack_ecache.h> 18#include <net/netfilter/nf_conntrack_ecache.h>
19#include <net/netfilter/nf_conntrack_timeout.h>
19#include <net/netfilter/nf_conntrack_zones.h> 20#include <net/netfilter/nf_conntrack_zones.h>
20 21
21static unsigned int xt_ct_target(struct sk_buff *skb, 22static unsigned int xt_ct_target_v0(struct sk_buff *skb,
22 const struct xt_action_param *par) 23 const struct xt_action_param *par)
23{ 24{
24 const struct xt_ct_target_info *info = par->targinfo; 25 const struct xt_ct_target_info *info = par->targinfo;
25 struct nf_conn *ct = info->ct; 26 struct nf_conn *ct = info->ct;
@@ -35,6 +36,23 @@ static unsigned int xt_ct_target(struct sk_buff *skb,
35 return XT_CONTINUE; 36 return XT_CONTINUE;
36} 37}
37 38
39static unsigned int xt_ct_target_v1(struct sk_buff *skb,
40 const struct xt_action_param *par)
41{
42 const struct xt_ct_target_info_v1 *info = par->targinfo;
43 struct nf_conn *ct = info->ct;
44
45 /* Previously seen (loopback)? Ignore. */
46 if (skb->nfct != NULL)
47 return XT_CONTINUE;
48
49 atomic_inc(&ct->ct_general.use);
50 skb->nfct = &ct->ct_general;
51 skb->nfctinfo = IP_CT_NEW;
52
53 return XT_CONTINUE;
54}
55
38static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) 56static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
39{ 57{
40 if (par->family == NFPROTO_IPV4) { 58 if (par->family == NFPROTO_IPV4) {
@@ -53,7 +71,7 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
53 return 0; 71 return 0;
54} 72}
55 73
56static int xt_ct_tg_check(const struct xt_tgchk_param *par) 74static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
57{ 75{
58 struct xt_ct_target_info *info = par->targinfo; 76 struct xt_ct_target_info *info = par->targinfo;
59 struct nf_conntrack_tuple t; 77 struct nf_conntrack_tuple t;
@@ -130,7 +148,137 @@ err1:
130 return ret; 148 return ret;
131} 149}
132 150
133static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) 151static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
152{
153 struct xt_ct_target_info_v1 *info = par->targinfo;
154 struct nf_conntrack_tuple t;
155 struct nf_conn_help *help;
156 struct nf_conn *ct;
157 int ret = 0;
158 u8 proto;
159
160 if (info->flags & ~XT_CT_NOTRACK)
161 return -EINVAL;
162
163 if (info->flags & XT_CT_NOTRACK) {
164 ct = nf_ct_untracked_get();
165 atomic_inc(&ct->ct_general.use);
166 goto out;
167 }
168
169#ifndef CONFIG_NF_CONNTRACK_ZONES
170 if (info->zone)
171 goto err1;
172#endif
173
174 ret = nf_ct_l3proto_try_module_get(par->family);
175 if (ret < 0)
176 goto err1;
177
178 memset(&t, 0, sizeof(t));
179 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
180 ret = PTR_ERR(ct);
181 if (IS_ERR(ct))
182 goto err2;
183
184 ret = 0;
185 if ((info->ct_events || info->exp_events) &&
186 !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
187 GFP_KERNEL))
188 goto err3;
189
190 if (info->helper[0]) {
191 ret = -ENOENT;
192 proto = xt_ct_find_proto(par);
193 if (!proto) {
194 pr_info("You must specify a L4 protocol, "
195 "and not use inversions on it.\n");
196 goto err3;
197 }
198
199 ret = -ENOMEM;
200 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
201 if (help == NULL)
202 goto err3;
203
204 ret = -ENOENT;
205 help->helper = nf_conntrack_helper_try_module_get(info->helper,
206 par->family,
207 proto);
208 if (help->helper == NULL) {
209 pr_info("No such helper \"%s\"\n", info->helper);
210 goto err3;
211 }
212 }
213
214#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
215 if (info->timeout) {
216 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
217 struct ctnl_timeout *timeout;
218 struct nf_conn_timeout *timeout_ext;
219
220 timeout_find_get =
221 rcu_dereference(nf_ct_timeout_find_get_hook);
222
223 if (timeout_find_get) {
224 const struct ipt_entry *e = par->entryinfo;
225
226 if (e->ip.invflags & IPT_INV_PROTO) {
227 ret = -EINVAL;
228 pr_info("You cannot use inversion on "
229 "L4 protocol\n");
230 goto err3;
231 }
232 timeout = timeout_find_get(info->timeout);
233 if (timeout == NULL) {
234 ret = -ENOENT;
235 pr_info("No such timeout policy \"%s\"\n",
236 info->timeout);
237 goto err3;
238 }
239 if (timeout->l3num != par->family) {
240 ret = -EINVAL;
241 pr_info("Timeout policy `%s' can only be "
242 "used by L3 protocol number %d\n",
243 info->timeout, timeout->l3num);
244 goto err3;
245 }
246 if (timeout->l4num != e->ip.proto) {
247 ret = -EINVAL;
248 pr_info("Timeout policy `%s' can only be "
249 "used by L4 protocol number %d\n",
250 info->timeout, timeout->l4num);
251 goto err3;
252 }
253 timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
254 GFP_KERNEL);
255 if (timeout_ext == NULL) {
256 ret = -ENOMEM;
257 goto err3;
258 }
259 } else {
260 ret = -ENOENT;
261 pr_info("Timeout policy base is empty\n");
262 goto err3;
263 }
264 }
265#endif
266
267 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
268 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
269out:
270 info->ct = ct;
271 return 0;
272
273err3:
274 nf_conntrack_free(ct);
275err2:
276 nf_ct_l3proto_module_put(par->family);
277err1:
278 return ret;
279}
280
281static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
134{ 282{
135 struct xt_ct_target_info *info = par->targinfo; 283 struct xt_ct_target_info *info = par->targinfo;
136 struct nf_conn *ct = info->ct; 284 struct nf_conn *ct = info->ct;
@@ -146,25 +294,67 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
146 nf_ct_put(info->ct); 294 nf_ct_put(info->ct);
147} 295}
148 296
149static struct xt_target xt_ct_tg __read_mostly = { 297static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
150 .name = "CT", 298{
151 .family = NFPROTO_UNSPEC, 299 struct xt_ct_target_info_v1 *info = par->targinfo;
152 .targetsize = sizeof(struct xt_ct_target_info), 300 struct nf_conn *ct = info->ct;
153 .checkentry = xt_ct_tg_check, 301 struct nf_conn_help *help;
154 .destroy = xt_ct_tg_destroy, 302#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
155 .target = xt_ct_target, 303 struct nf_conn_timeout *timeout_ext;
156 .table = "raw", 304 typeof(nf_ct_timeout_put_hook) timeout_put;
157 .me = THIS_MODULE, 305#endif
306 if (!nf_ct_is_untracked(ct)) {
307 help = nfct_help(ct);
308 if (help)
309 module_put(help->helper->me);
310
311 nf_ct_l3proto_module_put(par->family);
312
313#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
314 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
315
316 if (timeout_put) {
317 timeout_ext = nf_ct_timeout_find(ct);
318 if (timeout_ext)
319 timeout_put(timeout_ext->timeout);
320 }
321#endif
322 }
323 nf_ct_put(info->ct);
324}
325
326static struct xt_target xt_ct_tg_reg[] __read_mostly = {
327 {
328 .name = "CT",
329 .family = NFPROTO_UNSPEC,
330 .targetsize = sizeof(struct xt_ct_target_info),
331 .checkentry = xt_ct_tg_check_v0,
332 .destroy = xt_ct_tg_destroy_v0,
333 .target = xt_ct_target_v0,
334 .table = "raw",
335 .me = THIS_MODULE,
336 },
337 {
338 .name = "CT",
339 .family = NFPROTO_UNSPEC,
340 .revision = 1,
341 .targetsize = sizeof(struct xt_ct_target_info_v1),
342 .checkentry = xt_ct_tg_check_v1,
343 .destroy = xt_ct_tg_destroy_v1,
344 .target = xt_ct_target_v1,
345 .table = "raw",
346 .me = THIS_MODULE,
347 },
158}; 348};
159 349
160static int __init xt_ct_tg_init(void) 350static int __init xt_ct_tg_init(void)
161{ 351{
162 return xt_register_target(&xt_ct_tg); 352 return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
163} 353}
164 354
165static void __exit xt_ct_tg_exit(void) 355static void __exit xt_ct_tg_exit(void)
166{ 356{
167 xt_unregister_target(&xt_ct_tg); 357 xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
168} 358}
169 359
170module_init(xt_ct_tg_init); 360module_init(xt_ct_tg_init);
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
new file mode 100644
index 000000000000..f99f8dee238b
--- /dev/null
+++ b/net/netfilter/xt_LOG.c
@@ -0,0 +1,925 @@
1/*
2 * This is a module which is used for logging packets.
3 */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/skbuff.h>
17#include <linux/if_arp.h>
18#include <linux/ip.h>
19#include <net/ipv6.h>
20#include <net/icmp.h>
21#include <net/udp.h>
22#include <net/tcp.h>
23#include <net/route.h>
24
25#include <linux/netfilter.h>
26#include <linux/netfilter/x_tables.h>
27#include <linux/netfilter/xt_LOG.h>
28#include <linux/netfilter_ipv6/ip6_tables.h>
29#include <net/netfilter/nf_log.h>
30#include <net/netfilter/xt_log.h>
31
32static struct nf_loginfo default_loginfo = {
33 .type = NF_LOG_TYPE_LOG,
34 .u = {
35 .log = {
36 .level = 5,
37 .logflags = NF_LOG_MASK,
38 },
39 },
40};
41
42static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
43 u8 proto, int fragment, unsigned int offset)
44{
45 struct udphdr _udph;
46 const struct udphdr *uh;
47
48 if (proto == IPPROTO_UDP)
49 /* Max length: 10 "PROTO=UDP " */
50 sb_add(m, "PROTO=UDP ");
51 else /* Max length: 14 "PROTO=UDPLITE " */
52 sb_add(m, "PROTO=UDPLITE ");
53
54 if (fragment)
55 goto out;
56
57 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
58 uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
59 if (uh == NULL) {
60 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
61
62 return 1;
63 }
64
65 /* Max length: 20 "SPT=65535 DPT=65535 " */
66 sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
67 ntohs(uh->len));
68
69out:
70 return 0;
71}
72
73static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
74 u8 proto, int fragment, unsigned int offset,
75 unsigned int logflags)
76{
77 struct tcphdr _tcph;
78 const struct tcphdr *th;
79
80 /* Max length: 10 "PROTO=TCP " */
81 sb_add(m, "PROTO=TCP ");
82
83 if (fragment)
84 return 0;
85
86 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
87 th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
88 if (th == NULL) {
89 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
90 return 1;
91 }
92
93 /* Max length: 20 "SPT=65535 DPT=65535 " */
94 sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
95 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
96 if (logflags & XT_LOG_TCPSEQ)
97 sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
98
99 /* Max length: 13 "WINDOW=65535 " */
100 sb_add(m, "WINDOW=%u ", ntohs(th->window));
101 /* Max length: 9 "RES=0x3C " */
102 sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
103 TCP_RESERVED_BITS) >> 22));
104 /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
105 if (th->cwr)
106 sb_add(m, "CWR ");
107 if (th->ece)
108 sb_add(m, "ECE ");
109 if (th->urg)
110 sb_add(m, "URG ");
111 if (th->ack)
112 sb_add(m, "ACK ");
113 if (th->psh)
114 sb_add(m, "PSH ");
115 if (th->rst)
116 sb_add(m, "RST ");
117 if (th->syn)
118 sb_add(m, "SYN ");
119 if (th->fin)
120 sb_add(m, "FIN ");
121 /* Max length: 11 "URGP=65535 " */
122 sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
123
124 if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
125 u_int8_t _opt[60 - sizeof(struct tcphdr)];
126 const u_int8_t *op;
127 unsigned int i;
128 unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
129
130 op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
131 optsize, _opt);
132 if (op == NULL) {
133 sb_add(m, "OPT (TRUNCATED)");
134 return 1;
135 }
136
137 /* Max length: 127 "OPT (" 15*4*2chars ") " */
138 sb_add(m, "OPT (");
139 for (i = 0; i < optsize; i++)
140 sb_add(m, "%02X", op[i]);
141
142 sb_add(m, ") ");
143 }
144
145 return 0;
146}
147
148/* One level of recursion won't kill us */
149static void dump_ipv4_packet(struct sbuff *m,
150 const struct nf_loginfo *info,
151 const struct sk_buff *skb,
152 unsigned int iphoff)
153{
154 struct iphdr _iph;
155 const struct iphdr *ih;
156 unsigned int logflags;
157
158 if (info->type == NF_LOG_TYPE_LOG)
159 logflags = info->u.log.logflags;
160 else
161 logflags = NF_LOG_MASK;
162
163 ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
164 if (ih == NULL) {
165 sb_add(m, "TRUNCATED");
166 return;
167 }
168
169 /* Important fields:
170 * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
171 /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
172 sb_add(m, "SRC=%pI4 DST=%pI4 ",
173 &ih->saddr, &ih->daddr);
174
175 /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
176 sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
177 ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
178 ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
179
180 /* Max length: 6 "CE DF MF " */
181 if (ntohs(ih->frag_off) & IP_CE)
182 sb_add(m, "CE ");
183 if (ntohs(ih->frag_off) & IP_DF)
184 sb_add(m, "DF ");
185 if (ntohs(ih->frag_off) & IP_MF)
186 sb_add(m, "MF ");
187
188 /* Max length: 11 "FRAG:65535 " */
189 if (ntohs(ih->frag_off) & IP_OFFSET)
190 sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
191
192 if ((logflags & XT_LOG_IPOPT) &&
193 ih->ihl * 4 > sizeof(struct iphdr)) {
194 const unsigned char *op;
195 unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
196 unsigned int i, optsize;
197
198 optsize = ih->ihl * 4 - sizeof(struct iphdr);
199 op = skb_header_pointer(skb, iphoff+sizeof(_iph),
200 optsize, _opt);
201 if (op == NULL) {
202 sb_add(m, "TRUNCATED");
203 return;
204 }
205
206 /* Max length: 127 "OPT (" 15*4*2chars ") " */
207 sb_add(m, "OPT (");
208 for (i = 0; i < optsize; i++)
209 sb_add(m, "%02X", op[i]);
210 sb_add(m, ") ");
211 }
212
213 switch (ih->protocol) {
214 case IPPROTO_TCP:
215 if (dump_tcp_header(m, skb, ih->protocol,
216 ntohs(ih->frag_off) & IP_OFFSET,
217 iphoff+ih->ihl*4, logflags))
218 return;
219 break;
220 case IPPROTO_UDP:
221 case IPPROTO_UDPLITE:
222 if (dump_udp_header(m, skb, ih->protocol,
223 ntohs(ih->frag_off) & IP_OFFSET,
224 iphoff+ih->ihl*4))
225 return;
226 break;
227 case IPPROTO_ICMP: {
228 struct icmphdr _icmph;
229 const struct icmphdr *ich;
230 static const size_t required_len[NR_ICMP_TYPES+1]
231 = { [ICMP_ECHOREPLY] = 4,
232 [ICMP_DEST_UNREACH]
233 = 8 + sizeof(struct iphdr),
234 [ICMP_SOURCE_QUENCH]
235 = 8 + sizeof(struct iphdr),
236 [ICMP_REDIRECT]
237 = 8 + sizeof(struct iphdr),
238 [ICMP_ECHO] = 4,
239 [ICMP_TIME_EXCEEDED]
240 = 8 + sizeof(struct iphdr),
241 [ICMP_PARAMETERPROB]
242 = 8 + sizeof(struct iphdr),
243 [ICMP_TIMESTAMP] = 20,
244 [ICMP_TIMESTAMPREPLY] = 20,
245 [ICMP_ADDRESS] = 12,
246 [ICMP_ADDRESSREPLY] = 12 };
247
248 /* Max length: 11 "PROTO=ICMP " */
249 sb_add(m, "PROTO=ICMP ");
250
251 if (ntohs(ih->frag_off) & IP_OFFSET)
252 break;
253
254 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
255 ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
256 sizeof(_icmph), &_icmph);
257 if (ich == NULL) {
258 sb_add(m, "INCOMPLETE [%u bytes] ",
259 skb->len - iphoff - ih->ihl*4);
260 break;
261 }
262
263 /* Max length: 18 "TYPE=255 CODE=255 " */
264 sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
265
266 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
267 if (ich->type <= NR_ICMP_TYPES &&
268 required_len[ich->type] &&
269 skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
270 sb_add(m, "INCOMPLETE [%u bytes] ",
271 skb->len - iphoff - ih->ihl*4);
272 break;
273 }
274
275 switch (ich->type) {
276 case ICMP_ECHOREPLY:
277 case ICMP_ECHO:
278 /* Max length: 19 "ID=65535 SEQ=65535 " */
279 sb_add(m, "ID=%u SEQ=%u ",
280 ntohs(ich->un.echo.id),
281 ntohs(ich->un.echo.sequence));
282 break;
283
284 case ICMP_PARAMETERPROB:
285 /* Max length: 14 "PARAMETER=255 " */
286 sb_add(m, "PARAMETER=%u ",
287 ntohl(ich->un.gateway) >> 24);
288 break;
289 case ICMP_REDIRECT:
290 /* Max length: 24 "GATEWAY=255.255.255.255 " */
291 sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
292 /* Fall through */
293 case ICMP_DEST_UNREACH:
294 case ICMP_SOURCE_QUENCH:
295 case ICMP_TIME_EXCEEDED:
296 /* Max length: 3+maxlen */
297 if (!iphoff) { /* Only recurse once. */
298 sb_add(m, "[");
299 dump_ipv4_packet(m, info, skb,
300 iphoff + ih->ihl*4+sizeof(_icmph));
301 sb_add(m, "] ");
302 }
303
304 /* Max length: 10 "MTU=65535 " */
305 if (ich->type == ICMP_DEST_UNREACH &&
306 ich->code == ICMP_FRAG_NEEDED)
307 sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
308 }
309 break;
310 }
311 /* Max Length */
312 case IPPROTO_AH: {
313 struct ip_auth_hdr _ahdr;
314 const struct ip_auth_hdr *ah;
315
316 if (ntohs(ih->frag_off) & IP_OFFSET)
317 break;
318
319 /* Max length: 9 "PROTO=AH " */
320 sb_add(m, "PROTO=AH ");
321
322 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
323 ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
324 sizeof(_ahdr), &_ahdr);
325 if (ah == NULL) {
326 sb_add(m, "INCOMPLETE [%u bytes] ",
327 skb->len - iphoff - ih->ihl*4);
328 break;
329 }
330
331 /* Length: 15 "SPI=0xF1234567 " */
332 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
333 break;
334 }
335 case IPPROTO_ESP: {
336 struct ip_esp_hdr _esph;
337 const struct ip_esp_hdr *eh;
338
339 /* Max length: 10 "PROTO=ESP " */
340 sb_add(m, "PROTO=ESP ");
341
342 if (ntohs(ih->frag_off) & IP_OFFSET)
343 break;
344
345 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
346 eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
347 sizeof(_esph), &_esph);
348 if (eh == NULL) {
349 sb_add(m, "INCOMPLETE [%u bytes] ",
350 skb->len - iphoff - ih->ihl*4);
351 break;
352 }
353
354 /* Length: 15 "SPI=0xF1234567 " */
355 sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
356 break;
357 }
358 /* Max length: 10 "PROTO 255 " */
359 default:
360 sb_add(m, "PROTO=%u ", ih->protocol);
361 }
362
363 /* Max length: 15 "UID=4294967295 " */
364 if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
365 read_lock_bh(&skb->sk->sk_callback_lock);
366 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
367 sb_add(m, "UID=%u GID=%u ",
368 skb->sk->sk_socket->file->f_cred->fsuid,
369 skb->sk->sk_socket->file->f_cred->fsgid);
370 read_unlock_bh(&skb->sk->sk_callback_lock);
371 }
372
373 /* Max length: 16 "MARK=0xFFFFFFFF " */
374 if (!iphoff && skb->mark)
375 sb_add(m, "MARK=0x%x ", skb->mark);
376
377 /* Proto Max log string length */
378 /* IP: 40+46+6+11+127 = 230 */
379 /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
380 /* UDP: 10+max(25,20) = 35 */
381 /* UDPLITE: 14+max(25,20) = 39 */
382 /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
383 /* ESP: 10+max(25)+15 = 50 */
384 /* AH: 9+max(25)+15 = 49 */
385 /* unknown: 10 */
386
387 /* (ICMP allows recursion one level deep) */
388 /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
389 /* maxlen = 230+ 91 + 230 + 252 = 803 */
390}
391
392static void dump_ipv4_mac_header(struct sbuff *m,
393 const struct nf_loginfo *info,
394 const struct sk_buff *skb)
395{
396 struct net_device *dev = skb->dev;
397 unsigned int logflags = 0;
398
399 if (info->type == NF_LOG_TYPE_LOG)
400 logflags = info->u.log.logflags;
401
402 if (!(logflags & XT_LOG_MACDECODE))
403 goto fallback;
404
405 switch (dev->type) {
406 case ARPHRD_ETHER:
407 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
408 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
409 ntohs(eth_hdr(skb)->h_proto));
410 return;
411 default:
412 break;
413 }
414
415fallback:
416 sb_add(m, "MAC=");
417 if (dev->hard_header_len &&
418 skb->mac_header != skb->network_header) {
419 const unsigned char *p = skb_mac_header(skb);
420 unsigned int i;
421
422 sb_add(m, "%02x", *p++);
423 for (i = 1; i < dev->hard_header_len; i++, p++)
424 sb_add(m, ":%02x", *p);
425 }
426 sb_add(m, " ");
427}
428
429static void
430log_packet_common(struct sbuff *m,
431 u_int8_t pf,
432 unsigned int hooknum,
433 const struct sk_buff *skb,
434 const struct net_device *in,
435 const struct net_device *out,
436 const struct nf_loginfo *loginfo,
437 const char *prefix)
438{
439 sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
440 prefix,
441 in ? in->name : "",
442 out ? out->name : "");
443#ifdef CONFIG_BRIDGE_NETFILTER
444 if (skb->nf_bridge) {
445 const struct net_device *physindev;
446 const struct net_device *physoutdev;
447
448 physindev = skb->nf_bridge->physindev;
449 if (physindev && in != physindev)
450 sb_add(m, "PHYSIN=%s ", physindev->name);
451 physoutdev = skb->nf_bridge->physoutdev;
452 if (physoutdev && out != physoutdev)
453 sb_add(m, "PHYSOUT=%s ", physoutdev->name);
454 }
455#endif
456}
457
458
459static void
460ipt_log_packet(u_int8_t pf,
461 unsigned int hooknum,
462 const struct sk_buff *skb,
463 const struct net_device *in,
464 const struct net_device *out,
465 const struct nf_loginfo *loginfo,
466 const char *prefix)
467{
468 struct sbuff *m = sb_open();
469
470 if (!loginfo)
471 loginfo = &default_loginfo;
472
473 log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
474
475 if (in != NULL)
476 dump_ipv4_mac_header(m, loginfo, skb);
477
478 dump_ipv4_packet(m, loginfo, skb, 0);
479
480 sb_close(m);
481}
482
483#if IS_ENABLED(CONFIG_IPV6)
484/* One level of recursion won't kill us */
485static void dump_ipv6_packet(struct sbuff *m,
486 const struct nf_loginfo *info,
487 const struct sk_buff *skb, unsigned int ip6hoff,
488 int recurse)
489{
490 u_int8_t currenthdr;
491 int fragment;
492 struct ipv6hdr _ip6h;
493 const struct ipv6hdr *ih;
494 unsigned int ptr;
495 unsigned int hdrlen = 0;
496 unsigned int logflags;
497
498 if (info->type == NF_LOG_TYPE_LOG)
499 logflags = info->u.log.logflags;
500 else
501 logflags = NF_LOG_MASK;
502
503 ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
504 if (ih == NULL) {
505 sb_add(m, "TRUNCATED");
506 return;
507 }
508
509 /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
510 sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
511
512 /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
513 sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
514 ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
515 (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
516 ih->hop_limit,
517 (ntohl(*(__be32 *)ih) & 0x000fffff));
518
519 fragment = 0;
520 ptr = ip6hoff + sizeof(struct ipv6hdr);
521 currenthdr = ih->nexthdr;
522 while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
523 struct ipv6_opt_hdr _hdr;
524 const struct ipv6_opt_hdr *hp;
525
526 hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
527 if (hp == NULL) {
528 sb_add(m, "TRUNCATED");
529 return;
530 }
531
532 /* Max length: 48 "OPT (...) " */
533 if (logflags & XT_LOG_IPOPT)
534 sb_add(m, "OPT ( ");
535
536 switch (currenthdr) {
537 case IPPROTO_FRAGMENT: {
538 struct frag_hdr _fhdr;
539 const struct frag_hdr *fh;
540
541 sb_add(m, "FRAG:");
542 fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
543 &_fhdr);
544 if (fh == NULL) {
545 sb_add(m, "TRUNCATED ");
546 return;
547 }
548
549 /* Max length: 6 "65535 " */
550 sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
551
552 /* Max length: 11 "INCOMPLETE " */
553 if (fh->frag_off & htons(0x0001))
554 sb_add(m, "INCOMPLETE ");
555
556 sb_add(m, "ID:%08x ", ntohl(fh->identification));
557
558 if (ntohs(fh->frag_off) & 0xFFF8)
559 fragment = 1;
560
561 hdrlen = 8;
562
563 break;
564 }
565 case IPPROTO_DSTOPTS:
566 case IPPROTO_ROUTING:
567 case IPPROTO_HOPOPTS:
568 if (fragment) {
569 if (logflags & XT_LOG_IPOPT)
570 sb_add(m, ")");
571 return;
572 }
573 hdrlen = ipv6_optlen(hp);
574 break;
575 /* Max Length */
576 case IPPROTO_AH:
577 if (logflags & XT_LOG_IPOPT) {
578 struct ip_auth_hdr _ahdr;
579 const struct ip_auth_hdr *ah;
580
581 /* Max length: 3 "AH " */
582 sb_add(m, "AH ");
583
584 if (fragment) {
585 sb_add(m, ")");
586 return;
587 }
588
589 ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
590 &_ahdr);
591 if (ah == NULL) {
592 /*
593 * Max length: 26 "INCOMPLETE [65535
594 * bytes] )"
595 */
596 sb_add(m, "INCOMPLETE [%u bytes] )",
597 skb->len - ptr);
598 return;
599 }
600
601 /* Length: 15 "SPI=0xF1234567 */
602 sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
603
604 }
605
606 hdrlen = (hp->hdrlen+2)<<2;
607 break;
608 case IPPROTO_ESP:
609 if (logflags & XT_LOG_IPOPT) {
610 struct ip_esp_hdr _esph;
611 const struct ip_esp_hdr *eh;
612
613 /* Max length: 4 "ESP " */
614 sb_add(m, "ESP ");
615
616 if (fragment) {
617 sb_add(m, ")");
618 return;
619 }
620
621 /*
622 * Max length: 26 "INCOMPLETE [65535 bytes] )"
623 */
624 eh = skb_header_pointer(skb, ptr, sizeof(_esph),
625 &_esph);
626 if (eh == NULL) {
627 sb_add(m, "INCOMPLETE [%u bytes] )",
628 skb->len - ptr);
629 return;
630 }
631
632 /* Length: 16 "SPI=0xF1234567 )" */
633 sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
634
635 }
636 return;
637 default:
638 /* Max length: 20 "Unknown Ext Hdr 255" */
639 sb_add(m, "Unknown Ext Hdr %u", currenthdr);
640 return;
641 }
642 if (logflags & XT_LOG_IPOPT)
643 sb_add(m, ") ");
644
645 currenthdr = hp->nexthdr;
646 ptr += hdrlen;
647 }
648
649 switch (currenthdr) {
650 case IPPROTO_TCP:
651 if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
652 logflags))
653 return;
654 break;
655 case IPPROTO_UDP:
656 case IPPROTO_UDPLITE:
657 if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
658 return;
659 break;
660 case IPPROTO_ICMPV6: {
661 struct icmp6hdr _icmp6h;
662 const struct icmp6hdr *ic;
663
664 /* Max length: 13 "PROTO=ICMPv6 " */
665 sb_add(m, "PROTO=ICMPv6 ");
666
667 if (fragment)
668 break;
669
670 /* Max length: 25 "INCOMPLETE [65535 bytes] " */
671 ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
672 if (ic == NULL) {
673 sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
674 return;
675 }
676
677 /* Max length: 18 "TYPE=255 CODE=255 " */
678 sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
679
680 switch (ic->icmp6_type) {
681 case ICMPV6_ECHO_REQUEST:
682 case ICMPV6_ECHO_REPLY:
683 /* Max length: 19 "ID=65535 SEQ=65535 " */
684 sb_add(m, "ID=%u SEQ=%u ",
685 ntohs(ic->icmp6_identifier),
686 ntohs(ic->icmp6_sequence));
687 break;
688 case ICMPV6_MGM_QUERY:
689 case ICMPV6_MGM_REPORT:
690 case ICMPV6_MGM_REDUCTION:
691 break;
692
693 case ICMPV6_PARAMPROB:
694 /* Max length: 17 "POINTER=ffffffff " */
695 sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
696 /* Fall through */
697 case ICMPV6_DEST_UNREACH:
698 case ICMPV6_PKT_TOOBIG:
699 case ICMPV6_TIME_EXCEED:
700 /* Max length: 3+maxlen */
701 if (recurse) {
702 sb_add(m, "[");
703 dump_ipv6_packet(m, info, skb,
704 ptr + sizeof(_icmp6h), 0);
705 sb_add(m, "] ");
706 }
707
708 /* Max length: 10 "MTU=65535 " */
709 if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
710 sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
711 }
712 break;
713 }
714 /* Max length: 10 "PROTO=255 " */
715 default:
716 sb_add(m, "PROTO=%u ", currenthdr);
717 }
718
719 /* Max length: 15 "UID=4294967295 " */
720 if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
721 read_lock_bh(&skb->sk->sk_callback_lock);
722 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
723 sb_add(m, "UID=%u GID=%u ",
724 skb->sk->sk_socket->file->f_cred->fsuid,
725 skb->sk->sk_socket->file->f_cred->fsgid);
726 read_unlock_bh(&skb->sk->sk_callback_lock);
727 }
728
729 /* Max length: 16 "MARK=0xFFFFFFFF " */
730 if (!recurse && skb->mark)
731 sb_add(m, "MARK=0x%x ", skb->mark);
732}
733
734static void dump_ipv6_mac_header(struct sbuff *m,
735 const struct nf_loginfo *info,
736 const struct sk_buff *skb)
737{
738 struct net_device *dev = skb->dev;
739 unsigned int logflags = 0;
740
741 if (info->type == NF_LOG_TYPE_LOG)
742 logflags = info->u.log.logflags;
743
744 if (!(logflags & XT_LOG_MACDECODE))
745 goto fallback;
746
747 switch (dev->type) {
748 case ARPHRD_ETHER:
749 sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
750 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
751 ntohs(eth_hdr(skb)->h_proto));
752 return;
753 default:
754 break;
755 }
756
757fallback:
758 sb_add(m, "MAC=");
759 if (dev->hard_header_len &&
760 skb->mac_header != skb->network_header) {
761 const unsigned char *p = skb_mac_header(skb);
762 unsigned int len = dev->hard_header_len;
763 unsigned int i;
764
765 if (dev->type == ARPHRD_SIT) {
766 p -= ETH_HLEN;
767
768 if (p < skb->head)
769 p = NULL;
770 }
771
772 if (p != NULL) {
773 sb_add(m, "%02x", *p++);
774 for (i = 1; i < len; i++)
775 sb_add(m, ":%02x", *p++);
776 }
777 sb_add(m, " ");
778
779 if (dev->type == ARPHRD_SIT) {
780 const struct iphdr *iph =
781 (struct iphdr *)skb_mac_header(skb);
782 sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
783 &iph->daddr);
784 }
785 } else
786 sb_add(m, " ");
787}
788
789static void
790ip6t_log_packet(u_int8_t pf,
791 unsigned int hooknum,
792 const struct sk_buff *skb,
793 const struct net_device *in,
794 const struct net_device *out,
795 const struct nf_loginfo *loginfo,
796 const char *prefix)
797{
798 struct sbuff *m = sb_open();
799
800 if (!loginfo)
801 loginfo = &default_loginfo;
802
803 log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
804
805 if (in != NULL)
806 dump_ipv6_mac_header(m, loginfo, skb);
807
808 dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
809
810 sb_close(m);
811}
812#endif
813
814static unsigned int
815log_tg(struct sk_buff *skb, const struct xt_action_param *par)
816{
817 const struct xt_log_info *loginfo = par->targinfo;
818 struct nf_loginfo li;
819
820 li.type = NF_LOG_TYPE_LOG;
821 li.u.log.level = loginfo->level;
822 li.u.log.logflags = loginfo->logflags;
823
824 if (par->family == NFPROTO_IPV4)
825 ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
826 par->out, &li, loginfo->prefix);
827#if IS_ENABLED(CONFIG_IPV6)
828 else if (par->family == NFPROTO_IPV6)
829 ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
830 par->out, &li, loginfo->prefix);
831#endif
832 else
833 WARN_ON_ONCE(1);
834
835 return XT_CONTINUE;
836}
837
838static int log_tg_check(const struct xt_tgchk_param *par)
839{
840 const struct xt_log_info *loginfo = par->targinfo;
841
842 if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
843 return -EINVAL;
844
845 if (loginfo->level >= 8) {
846 pr_debug("level %u >= 8\n", loginfo->level);
847 return -EINVAL;
848 }
849
850 if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
851 pr_debug("prefix is not null-terminated\n");
852 return -EINVAL;
853 }
854
855 return 0;
856}
857
858static struct xt_target log_tg_regs[] __read_mostly = {
859 {
860 .name = "LOG",
861 .family = NFPROTO_IPV4,
862 .target = log_tg,
863 .targetsize = sizeof(struct xt_log_info),
864 .checkentry = log_tg_check,
865 .me = THIS_MODULE,
866 },
867#if IS_ENABLED(CONFIG_IPV6)
868 {
869 .name = "LOG",
870 .family = NFPROTO_IPV6,
871 .target = log_tg,
872 .targetsize = sizeof(struct xt_log_info),
873 .checkentry = log_tg_check,
874 .me = THIS_MODULE,
875 },
876#endif
877};
878
879static struct nf_logger ipt_log_logger __read_mostly = {
880 .name = "ipt_LOG",
881 .logfn = &ipt_log_packet,
882 .me = THIS_MODULE,
883};
884
885#if IS_ENABLED(CONFIG_IPV6)
886static struct nf_logger ip6t_log_logger __read_mostly = {
887 .name = "ip6t_LOG",
888 .logfn = &ip6t_log_packet,
889 .me = THIS_MODULE,
890};
891#endif
892
893static int __init log_tg_init(void)
894{
895 int ret;
896
897 ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
898 if (ret < 0)
899 return ret;
900
901 nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
902#if IS_ENABLED(CONFIG_IPV6)
903 nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
904#endif
905 return 0;
906}
907
908static void __exit log_tg_exit(void)
909{
910 nf_log_unregister(&ipt_log_logger);
911#if IS_ENABLED(CONFIG_IPV6)
912 nf_log_unregister(&ip6t_log_logger);
913#endif
914 xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
915}
916
917module_init(log_tg_init);
918module_exit(log_tg_exit);
919
920MODULE_LICENSE("GPL");
921MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
922MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
923MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging");
924MODULE_ALIAS("ipt_LOG");
925MODULE_ALIAS("ip6t_LOG");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 629b06182f3f..32bb75324e76 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1645,6 +1645,24 @@ static void netlink_destroy_callback(struct netlink_callback *cb)
1645 kfree(cb); 1645 kfree(cb);
1646} 1646}
1647 1647
1648struct nlmsghdr *
1649__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1650{
1651 struct nlmsghdr *nlh;
1652 int size = NLMSG_LENGTH(len);
1653
1654 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1655 nlh->nlmsg_type = type;
1656 nlh->nlmsg_len = size;
1657 nlh->nlmsg_flags = flags;
1658 nlh->nlmsg_pid = pid;
1659 nlh->nlmsg_seq = seq;
1660 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1661 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1662 return nlh;
1663}
1664EXPORT_SYMBOL(__nlmsg_put);
1665
1648/* 1666/*
1649 * It looks a bit ugly. 1667 * It looks a bit ugly.
1650 * It would be better to create kernel thread. 1668 * It would be better to create kernel thread.
@@ -1718,10 +1736,7 @@ errout_skb:
1718 1736
1719int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 1737int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1720 const struct nlmsghdr *nlh, 1738 const struct nlmsghdr *nlh,
1721 int (*dump)(struct sk_buff *skb, 1739 struct netlink_dump_control *control)
1722 struct netlink_callback *),
1723 int (*done)(struct netlink_callback *),
1724 u16 min_dump_alloc)
1725{ 1740{
1726 struct netlink_callback *cb; 1741 struct netlink_callback *cb;
1727 struct sock *sk; 1742 struct sock *sk;
@@ -1732,10 +1747,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1732 if (cb == NULL) 1747 if (cb == NULL)
1733 return -ENOBUFS; 1748 return -ENOBUFS;
1734 1749
1735 cb->dump = dump; 1750 cb->dump = control->dump;
1736 cb->done = done; 1751 cb->done = control->done;
1737 cb->nlh = nlh; 1752 cb->nlh = nlh;
1738 cb->min_dump_alloc = min_dump_alloc; 1753 cb->data = control->data;
1754 cb->min_dump_alloc = control->min_dump_alloc;
1739 atomic_inc(&skb->users); 1755 atomic_inc(&skb->users);
1740 cb->skb = skb; 1756 cb->skb = skb;
1741 1757
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index c29d2568c9e0..9f40441d7a7d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -498,6 +498,37 @@ int genl_unregister_family(struct genl_family *family)
498} 498}
499EXPORT_SYMBOL(genl_unregister_family); 499EXPORT_SYMBOL(genl_unregister_family);
500 500
501/**
502 * genlmsg_put - Add generic netlink header to netlink message
503 * @skb: socket buffer holding the message
504 * @pid: netlink pid the message is addressed to
505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family
507 * @flags netlink message flags
508 * @cmd: generic netlink command
509 *
510 * Returns pointer to user specific header
511 */
512void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
513 struct genl_family *family, int flags, u8 cmd)
514{
515 struct nlmsghdr *nlh;
516 struct genlmsghdr *hdr;
517
518 nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
519 family->hdrsize, flags);
520 if (nlh == NULL)
521 return NULL;
522
523 hdr = nlmsg_data(nlh);
524 hdr->cmd = cmd;
525 hdr->version = family->version;
526 hdr->reserved = 0;
527
528 return (char *) hdr + GENL_HDRLEN;
529}
530EXPORT_SYMBOL(genlmsg_put);
531
501static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 532static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
502{ 533{
503 struct genl_ops *ops; 534 struct genl_ops *ops;
@@ -532,8 +563,13 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
532 return -EOPNOTSUPP; 563 return -EOPNOTSUPP;
533 564
534 genl_unlock(); 565 genl_unlock();
535 err = netlink_dump_start(net->genl_sock, skb, nlh, 566 {
536 ops->dumpit, ops->done, 0); 567 struct netlink_dump_control c = {
568 .dump = ops->dumpit,
569 .done = ops->done,
570 };
571 err = netlink_dump_start(net->genl_sock, skb, nlh, &c);
572 }
537 genl_lock(); 573 genl_lock();
538 return err; 574 return err;
539 } 575 }
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index da67756425ce..9d68441e2a5a 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -30,7 +30,7 @@ static DEFINE_RWLOCK(proto_tab_lock);
30static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX]; 30static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX];
31 31
32static int nfc_sock_create(struct net *net, struct socket *sock, int proto, 32static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
33 int kern) 33 int kern)
34{ 34{
35 int rc = -EPROTONOSUPPORT; 35 int rc = -EPROTONOSUPPORT;
36 36
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 3ddf6e698df0..295d129864d2 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -181,13 +181,13 @@ error:
181 return rc; 181 return rc;
182} 182}
183 183
184int nfc_dep_link_up(struct nfc_dev *dev, int target_index, 184int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
185 u8 comm_mode, u8 rf_mode)
186{ 185{
187 int rc = 0; 186 int rc = 0;
187 u8 *gb;
188 size_t gb_len;
188 189
189 pr_debug("dev_name=%s comm:%d rf:%d\n", 190 pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
190 dev_name(&dev->dev), comm_mode, rf_mode);
191 191
192 if (!dev->ops->dep_link_up) 192 if (!dev->ops->dep_link_up)
193 return -EOPNOTSUPP; 193 return -EOPNOTSUPP;
@@ -204,7 +204,13 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index,
204 goto error; 204 goto error;
205 } 205 }
206 206
207 rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode); 207 gb = nfc_llcp_general_bytes(dev, &gb_len);
208 if (gb_len > NFC_MAX_GT_LEN) {
209 rc = -EINVAL;
210 goto error;
211 }
212
213 rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
208 214
209error: 215error:
210 device_unlock(&dev->dev); 216 device_unlock(&dev->dev);
@@ -250,7 +256,7 @@ error:
250} 256}
251 257
252int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, 258int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
253 u8 comm_mode, u8 rf_mode) 259 u8 comm_mode, u8 rf_mode)
254{ 260{
255 dev->dep_link_up = true; 261 dev->dep_link_up = true;
256 dev->dep_rf_mode = rf_mode; 262 dev->dep_rf_mode = rf_mode;
@@ -330,10 +336,8 @@ error:
330 * 336 *
331 * The user must wait for the callback before calling this function again. 337 * The user must wait for the callback before calling this function again.
332 */ 338 */
333int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, 339int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
334 struct sk_buff *skb, 340 data_exchange_cb_t cb, void *cb_context)
335 data_exchange_cb_t cb,
336 void *cb_context)
337{ 341{
338 int rc; 342 int rc;
339 343
@@ -357,8 +361,7 @@ error:
357 361
358int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) 362int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
359{ 363{
360 pr_debug("dev_name=%s gb_len=%d\n", 364 pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len);
361 dev_name(&dev->dev), gb_len);
362 365
363 if (gb_len > NFC_MAX_GT_LEN) 366 if (gb_len > NFC_MAX_GT_LEN)
364 return -EINVAL; 367 return -EINVAL;
@@ -367,12 +370,6 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
367} 370}
368EXPORT_SYMBOL(nfc_set_remote_general_bytes); 371EXPORT_SYMBOL(nfc_set_remote_general_bytes);
369 372
370u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len)
371{
372 return nfc_llcp_general_bytes(dev, gt_len);
373}
374EXPORT_SYMBOL(nfc_get_local_general_bytes);
375
376/** 373/**
377 * nfc_alloc_send_skb - allocate a skb for data exchange responses 374 * nfc_alloc_send_skb - allocate a skb for data exchange responses
378 * 375 *
@@ -380,8 +377,8 @@ EXPORT_SYMBOL(nfc_get_local_general_bytes);
380 * @gfp: gfp flags 377 * @gfp: gfp flags
381 */ 378 */
382struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, 379struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
383 unsigned int flags, unsigned int size, 380 unsigned int flags, unsigned int size,
384 unsigned int *err) 381 unsigned int *err)
385{ 382{
386 struct sk_buff *skb; 383 struct sk_buff *skb;
387 unsigned int total_size; 384 unsigned int total_size;
@@ -428,25 +425,20 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
428 * are found. After calling this function, the device driver must stop 425 * are found. After calling this function, the device driver must stop
429 * polling for targets. 426 * polling for targets.
430 */ 427 */
431int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, 428int nfc_targets_found(struct nfc_dev *dev,
432 int n_targets) 429 struct nfc_target *targets, int n_targets)
433{ 430{
434 int i;
435
436 pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); 431 pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
437 432
438 dev->polling = false; 433 dev->polling = false;
439 434
440 for (i = 0; i < n_targets; i++)
441 targets[i].idx = dev->target_idx++;
442
443 spin_lock_bh(&dev->targets_lock); 435 spin_lock_bh(&dev->targets_lock);
444 436
445 dev->targets_generation++; 437 dev->targets_generation++;
446 438
447 kfree(dev->targets); 439 kfree(dev->targets);
448 dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), 440 dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target),
449 GFP_ATOMIC); 441 GFP_ATOMIC);
450 442
451 if (!dev->targets) { 443 if (!dev->targets) {
452 dev->n_targets = 0; 444 dev->n_targets = 0;
@@ -506,15 +498,14 @@ struct nfc_dev *nfc_get_device(unsigned idx)
506 * @supported_protocols: NFC protocols supported by the device 498 * @supported_protocols: NFC protocols supported by the device
507 */ 499 */
508struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, 500struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
509 u32 supported_protocols, 501 u32 supported_protocols,
510 int tx_headroom, 502 int tx_headroom, int tx_tailroom)
511 int tx_tailroom)
512{ 503{
513 static atomic_t dev_no = ATOMIC_INIT(0); 504 static atomic_t dev_no = ATOMIC_INIT(0);
514 struct nfc_dev *dev; 505 struct nfc_dev *dev;
515 506
516 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 507 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
517 !ops->deactivate_target || !ops->data_exchange) 508 !ops->deactivate_target || !ops->data_exchange)
518 return NULL; 509 return NULL;
519 510
520 if (!supported_protocols) 511 if (!supported_protocols)
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 151f2ef429c4..7b76eb7192f3 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -118,7 +118,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
118} 118}
119 119
120int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 120int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
121 u8 *tlv_array, u16 tlv_array_len) 121 u8 *tlv_array, u16 tlv_array_len)
122{ 122{
123 u8 *tlv = tlv_array, type, length, offset = 0; 123 u8 *tlv = tlv_array, type, length, offset = 0;
124 124
@@ -152,6 +152,8 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
152 case LLCP_TLV_RW: 152 case LLCP_TLV_RW:
153 local->remote_rw = llcp_tlv_rw(tlv); 153 local->remote_rw = llcp_tlv_rw(tlv);
154 break; 154 break;
155 case LLCP_TLV_SN:
156 break;
155 default: 157 default:
156 pr_err("Invalid gt tlv value 0x%x\n", type); 158 pr_err("Invalid gt tlv value 0x%x\n", type);
157 break; 159 break;
@@ -162,15 +164,15 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
162 } 164 }
163 165
164 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n", 166 pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
165 local->remote_version, local->remote_miu, 167 local->remote_version, local->remote_miu,
166 local->remote_lto, local->remote_opt, 168 local->remote_lto, local->remote_opt,
167 local->remote_wks, local->remote_rw); 169 local->remote_wks, local->remote_rw);
168 170
169 return 0; 171 return 0;
170} 172}
171 173
172static struct sk_buff *llcp_add_header(struct sk_buff *pdu, 174static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
173 u8 dsap, u8 ssap, u8 ptype) 175 u8 dsap, u8 ssap, u8 ptype)
174{ 176{
175 u8 header[2]; 177 u8 header[2];
176 178
@@ -186,7 +188,8 @@ static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
186 return pdu; 188 return pdu;
187} 189}
188 190
189static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length) 191static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv,
192 u8 tlv_length)
190{ 193{
191 /* XXX Add an skb length check */ 194 /* XXX Add an skb length check */
192 195
@@ -199,7 +202,7 @@ static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length)
199} 202}
200 203
201static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock, 204static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
202 u8 cmd, u16 size) 205 u8 cmd, u16 size)
203{ 206{
204 struct sk_buff *skb; 207 struct sk_buff *skb;
205 int err; 208 int err;
@@ -208,7 +211,7 @@ static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
208 return NULL; 211 return NULL;
209 212
210 skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, 213 skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
211 size + LLCP_HEADER_SIZE, &err); 214 size + LLCP_HEADER_SIZE, &err);
212 if (skb == NULL) { 215 if (skb == NULL) {
213 pr_err("Could not allocate PDU\n"); 216 pr_err("Could not allocate PDU\n");
214 return NULL; 217 return NULL;
@@ -276,7 +279,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
276 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); 279 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
277 280
278 return nfc_data_exchange(dev, local->target_idx, skb, 281 return nfc_data_exchange(dev, local->target_idx, skb,
279 nfc_llcp_recv, local); 282 nfc_llcp_recv, local);
280} 283}
281 284
282int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) 285int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
@@ -284,6 +287,9 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
284 struct nfc_llcp_local *local; 287 struct nfc_llcp_local *local;
285 struct sk_buff *skb; 288 struct sk_buff *skb;
286 u8 *service_name_tlv = NULL, service_name_tlv_length; 289 u8 *service_name_tlv = NULL, service_name_tlv_length;
290 u8 *miux_tlv = NULL, miux_tlv_length;
291 u8 *rw_tlv = NULL, rw_tlv_length, rw;
292 __be16 miux;
287 int err; 293 int err;
288 u16 size = 0; 294 u16 size = 0;
289 295
@@ -295,12 +301,21 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
295 301
296 if (sock->service_name != NULL) { 302 if (sock->service_name != NULL) {
297 service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN, 303 service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
298 sock->service_name, 304 sock->service_name,
299 sock->service_name_len, 305 sock->service_name_len,
300 &service_name_tlv_length); 306 &service_name_tlv_length);
301 size += service_name_tlv_length; 307 size += service_name_tlv_length;
302 } 308 }
303 309
310 miux = cpu_to_be16(LLCP_MAX_MIUX);
311 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
312 &miux_tlv_length);
313 size += miux_tlv_length;
314
315 rw = LLCP_MAX_RW;
316 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
317 size += rw_tlv_length;
318
304 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); 319 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
305 320
306 skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size); 321 skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
@@ -311,7 +326,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
311 326
312 if (service_name_tlv != NULL) 327 if (service_name_tlv != NULL)
313 skb = llcp_add_tlv(skb, service_name_tlv, 328 skb = llcp_add_tlv(skb, service_name_tlv,
314 service_name_tlv_length); 329 service_name_tlv_length);
330
331 skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
332 skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
315 333
316 skb_queue_tail(&local->tx_queue, skb); 334 skb_queue_tail(&local->tx_queue, skb);
317 335
@@ -321,6 +339,8 @@ error_tlv:
321 pr_err("error %d\n", err); 339 pr_err("error %d\n", err);
322 340
323 kfree(service_name_tlv); 341 kfree(service_name_tlv);
342 kfree(miux_tlv);
343 kfree(rw_tlv);
324 344
325 return err; 345 return err;
326} 346}
@@ -329,6 +349,11 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
329{ 349{
330 struct nfc_llcp_local *local; 350 struct nfc_llcp_local *local;
331 struct sk_buff *skb; 351 struct sk_buff *skb;
352 u8 *miux_tlv = NULL, miux_tlv_length;
353 u8 *rw_tlv = NULL, rw_tlv_length, rw;
354 __be16 miux;
355 int err;
356 u16 size = 0;
332 357
333 pr_debug("Sending CC\n"); 358 pr_debug("Sending CC\n");
334 359
@@ -336,13 +361,35 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
336 if (local == NULL) 361 if (local == NULL)
337 return -ENODEV; 362 return -ENODEV;
338 363
339 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0); 364 miux = cpu_to_be16(LLCP_MAX_MIUX);
340 if (skb == NULL) 365 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
341 return -ENOMEM; 366 &miux_tlv_length);
367 size += miux_tlv_length;
368
369 rw = LLCP_MAX_RW;
370 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
371 size += rw_tlv_length;
372
373 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
374 if (skb == NULL) {
375 err = -ENOMEM;
376 goto error_tlv;
377 }
378
379 skb = llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
380 skb = llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
342 381
343 skb_queue_tail(&local->tx_queue, skb); 382 skb_queue_tail(&local->tx_queue, skb);
344 383
345 return 0; 384 return 0;
385
386error_tlv:
387 pr_err("error %d\n", err);
388
389 kfree(miux_tlv);
390 kfree(rw_tlv);
391
392 return err;
346} 393}
347 394
348int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) 395int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
@@ -397,3 +444,87 @@ int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
397 444
398 return 0; 445 return 0;
399} 446}
447
448int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
449 struct msghdr *msg, size_t len)
450{
451 struct sk_buff *pdu;
452 struct sock *sk = &sock->sk;
453 struct nfc_llcp_local *local;
454 size_t frag_len = 0, remaining_len;
455 u8 *msg_data, *msg_ptr;
456
457 pr_debug("Send I frame len %zd\n", len);
458
459 local = sock->local;
460 if (local == NULL)
461 return -ENODEV;
462
463 msg_data = kzalloc(len, GFP_KERNEL);
464 if (msg_data == NULL)
465 return -ENOMEM;
466
467 if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
468 kfree(msg_data);
469 return -EFAULT;
470 }
471
472 remaining_len = len;
473 msg_ptr = msg_data;
474
475 while (remaining_len > 0) {
476
477 frag_len = min_t(u16, local->remote_miu, remaining_len);
478
479 pr_debug("Fragment %zd bytes remaining %zd",
480 frag_len, remaining_len);
481
482 pdu = llcp_allocate_pdu(sock, LLCP_PDU_I,
483 frag_len + LLCP_SEQUENCE_SIZE);
484 if (pdu == NULL)
485 return -ENOMEM;
486
487 skb_put(pdu, LLCP_SEQUENCE_SIZE);
488
489 memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
490
491 skb_queue_head(&sock->tx_queue, pdu);
492
493 lock_sock(sk);
494
495 nfc_llcp_queue_i_frames(sock);
496
497 release_sock(sk);
498
499 remaining_len -= frag_len;
500 msg_ptr += len;
501 }
502
503 kfree(msg_data);
504
505 return 0;
506}
507
508int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
509{
510 struct sk_buff *skb;
511 struct nfc_llcp_local *local;
512
513 pr_debug("Send rr nr %d\n", sock->recv_n);
514
515 local = sock->local;
516 if (local == NULL)
517 return -ENODEV;
518
519 skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE);
520 if (skb == NULL)
521 return -ENOMEM;
522
523 skb_put(skb, LLCP_SEQUENCE_SIZE);
524
525 skb->data[2] = sock->recv_n % 16;
526
527 skb_queue_head(&local->tx_queue, skb);
528
529 return 0;
530}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 1d32680807d6..17a578f641f1 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -37,7 +37,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
37 struct sock *sk, *parent_sk; 37 struct sock *sk, *parent_sk;
38 int i; 38 int i;
39 39
40
41 mutex_lock(&local->socket_lock); 40 mutex_lock(&local->socket_lock);
42 41
43 for (i = 0; i < LLCP_MAX_SAP; i++) { 42 for (i = 0; i < LLCP_MAX_SAP; i++) {
@@ -47,7 +46,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
47 46
48 /* Release all child sockets */ 47 /* Release all child sockets */
49 list_for_each_entry_safe(s, n, &parent->list, list) { 48 list_for_each_entry_safe(s, n, &parent->list, list) {
50 list_del(&s->list); 49 list_del_init(&s->list);
51 sk = &s->sk; 50 sk = &s->sk;
52 51
53 lock_sock(sk); 52 lock_sock(sk);
@@ -56,9 +55,12 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
56 nfc_put_device(s->dev); 55 nfc_put_device(s->dev);
57 56
58 sk->sk_state = LLCP_CLOSED; 57 sk->sk_state = LLCP_CLOSED;
59 sock_set_flag(sk, SOCK_DEAD);
60 58
61 release_sock(sk); 59 release_sock(sk);
60
61 sock_orphan(sk);
62
63 s->local = NULL;
62 } 64 }
63 65
64 parent_sk = &parent->sk; 66 parent_sk = &parent->sk;
@@ -70,18 +72,19 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
70 struct sock *accept_sk; 72 struct sock *accept_sk;
71 73
72 list_for_each_entry_safe(lsk, n, &parent->accept_queue, 74 list_for_each_entry_safe(lsk, n, &parent->accept_queue,
73 accept_queue) { 75 accept_queue) {
74 accept_sk = &lsk->sk; 76 accept_sk = &lsk->sk;
75 lock_sock(accept_sk); 77 lock_sock(accept_sk);
76 78
77 nfc_llcp_accept_unlink(accept_sk); 79 nfc_llcp_accept_unlink(accept_sk);
78 80
79 accept_sk->sk_state = LLCP_CLOSED; 81 accept_sk->sk_state = LLCP_CLOSED;
80 sock_set_flag(accept_sk, SOCK_DEAD);
81 82
82 release_sock(accept_sk); 83 release_sock(accept_sk);
83 84
84 sock_orphan(accept_sk); 85 sock_orphan(accept_sk);
86
87 lsk->local = NULL;
85 } 88 }
86 } 89 }
87 90
@@ -89,18 +92,32 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
89 nfc_put_device(parent->dev); 92 nfc_put_device(parent->dev);
90 93
91 parent_sk->sk_state = LLCP_CLOSED; 94 parent_sk->sk_state = LLCP_CLOSED;
92 sock_set_flag(parent_sk, SOCK_DEAD);
93 95
94 release_sock(parent_sk); 96 release_sock(parent_sk);
97
98 sock_orphan(parent_sk);
99
100 parent->local = NULL;
95 } 101 }
96 102
97 mutex_unlock(&local->socket_lock); 103 mutex_unlock(&local->socket_lock);
98} 104}
99 105
106static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local)
107{
108 mutex_lock(&local->sdp_lock);
109
110 local->local_wks = 0;
111 local->local_sdp = 0;
112 local->local_sap = 0;
113
114 mutex_unlock(&local->sdp_lock);
115}
116
100static void nfc_llcp_timeout_work(struct work_struct *work) 117static void nfc_llcp_timeout_work(struct work_struct *work)
101{ 118{
102 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 119 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
103 timeout_work); 120 timeout_work);
104 121
105 nfc_dep_link_down(local->dev); 122 nfc_dep_link_down(local->dev);
106} 123}
@@ -146,7 +163,7 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
146 163
147 num_wks = ARRAY_SIZE(wks); 164 num_wks = ARRAY_SIZE(wks);
148 165
149 for (sap = 0 ; sap < num_wks; sap++) { 166 for (sap = 0; sap < num_wks; sap++) {
150 if (wks[sap] == NULL) 167 if (wks[sap] == NULL)
151 continue; 168 continue;
152 169
@@ -158,13 +175,13 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
158} 175}
159 176
160u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 177u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
161 struct nfc_llcp_sock *sock) 178 struct nfc_llcp_sock *sock)
162{ 179{
163 mutex_lock(&local->sdp_lock); 180 mutex_lock(&local->sdp_lock);
164 181
165 if (sock->service_name != NULL && sock->service_name_len > 0) { 182 if (sock->service_name != NULL && sock->service_name_len > 0) {
166 int ssap = nfc_llcp_wks_sap(sock->service_name, 183 int ssap = nfc_llcp_wks_sap(sock->service_name,
167 sock->service_name_len); 184 sock->service_name_len);
168 185
169 if (ssap > 0) { 186 if (ssap > 0) {
170 pr_debug("WKS %d\n", ssap); 187 pr_debug("WKS %d\n", ssap);
@@ -176,7 +193,7 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
176 return LLCP_SAP_MAX; 193 return LLCP_SAP_MAX;
177 } 194 }
178 195
179 set_bit(BIT(ssap), &local->local_wks); 196 set_bit(ssap, &local->local_wks);
180 mutex_unlock(&local->sdp_lock); 197 mutex_unlock(&local->sdp_lock);
181 198
182 return ssap; 199 return ssap;
@@ -195,25 +212,25 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
195 212
196 pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap); 213 pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
197 214
198 set_bit(BIT(ssap), &local->local_sdp); 215 set_bit(ssap, &local->local_sdp);
199 mutex_unlock(&local->sdp_lock); 216 mutex_unlock(&local->sdp_lock);
200 217
201 return LLCP_WKS_NUM_SAP + ssap; 218 return LLCP_WKS_NUM_SAP + ssap;
202 219
203 } else if (sock->ssap != 0) { 220 } else if (sock->ssap != 0) {
204 if (sock->ssap < LLCP_WKS_NUM_SAP) { 221 if (sock->ssap < LLCP_WKS_NUM_SAP) {
205 if (!(local->local_wks & BIT(sock->ssap))) { 222 if (!test_bit(sock->ssap, &local->local_wks)) {
206 set_bit(BIT(sock->ssap), &local->local_wks); 223 set_bit(sock->ssap, &local->local_wks);
207 mutex_unlock(&local->sdp_lock); 224 mutex_unlock(&local->sdp_lock);
208 225
209 return sock->ssap; 226 return sock->ssap;
210 } 227 }
211 228
212 } else if (sock->ssap < LLCP_SDP_NUM_SAP) { 229 } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
213 if (!(local->local_sdp & 230 if (!test_bit(sock->ssap - LLCP_WKS_NUM_SAP,
214 BIT(sock->ssap - LLCP_WKS_NUM_SAP))) { 231 &local->local_sdp)) {
215 set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP), 232 set_bit(sock->ssap - LLCP_WKS_NUM_SAP,
216 &local->local_sdp); 233 &local->local_sdp);
217 mutex_unlock(&local->sdp_lock); 234 mutex_unlock(&local->sdp_lock);
218 235
219 return sock->ssap; 236 return sock->ssap;
@@ -238,7 +255,7 @@ u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local)
238 return LLCP_SAP_MAX; 255 return LLCP_SAP_MAX;
239 } 256 }
240 257
241 set_bit(BIT(local_ssap), &local->local_sap); 258 set_bit(local_ssap, &local->local_sap);
242 259
243 mutex_unlock(&local->sdp_lock); 260 mutex_unlock(&local->sdp_lock);
244 261
@@ -265,12 +282,12 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
265 282
266 mutex_lock(&local->sdp_lock); 283 mutex_lock(&local->sdp_lock);
267 284
268 clear_bit(1 << local_ssap, sdp); 285 clear_bit(local_ssap, sdp);
269 286
270 mutex_unlock(&local->sdp_lock); 287 mutex_unlock(&local->sdp_lock);
271} 288}
272 289
273u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len) 290u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
274{ 291{
275 struct nfc_llcp_local *local; 292 struct nfc_llcp_local *local;
276 293
@@ -294,7 +311,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
294 311
295 version = LLCP_VERSION_11; 312 version = LLCP_VERSION_11;
296 version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, 313 version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
297 1, &version_length); 314 1, &version_length);
298 gb_len += version_length; 315 gb_len += version_length;
299 316
300 /* 1500 ms */ 317 /* 1500 ms */
@@ -304,7 +321,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
304 321
305 pr_debug("Local wks 0x%lx\n", local->local_wks); 322 pr_debug("Local wks 0x%lx\n", local->local_wks);
306 wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2, 323 wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
307 &wks_length); 324 &wks_length);
308 gb_len += wks_length; 325 gb_len += wks_length;
309 326
310 gb_len += ARRAY_SIZE(llcp_magic); 327 gb_len += ARRAY_SIZE(llcp_magic);
@@ -349,8 +366,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
349 memcpy(local->remote_gb, gb, gb_len); 366 memcpy(local->remote_gb, gb, gb_len);
350 local->remote_gb_len = gb_len; 367 local->remote_gb_len = gb_len;
351 368
352 if (local->remote_gb == NULL || 369 if (local->remote_gb == NULL || local->remote_gb_len == 0)
353 local->remote_gb_len == 0)
354 return -ENODEV; 370 return -ENODEV;
355 371
356 if (memcmp(local->remote_gb, llcp_magic, 3)) { 372 if (memcmp(local->remote_gb, llcp_magic, 3)) {
@@ -359,26 +375,27 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
359 } 375 }
360 376
361 return nfc_llcp_parse_tlv(local, 377 return nfc_llcp_parse_tlv(local,
362 &local->remote_gb[3], local->remote_gb_len - 3); 378 &local->remote_gb[3],
379 local->remote_gb_len - 3);
363} 380}
364 381
365static void nfc_llcp_tx_work(struct work_struct *work) 382static void nfc_llcp_tx_work(struct work_struct *work)
366{ 383{
367 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 384 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
368 tx_work); 385 tx_work);
369 struct sk_buff *skb; 386 struct sk_buff *skb;
370 387
371 skb = skb_dequeue(&local->tx_queue); 388 skb = skb_dequeue(&local->tx_queue);
372 if (skb != NULL) { 389 if (skb != NULL) {
373 pr_debug("Sending pending skb\n"); 390 pr_debug("Sending pending skb\n");
374 nfc_data_exchange(local->dev, local->target_idx, 391 nfc_data_exchange(local->dev, local->target_idx,
375 skb, nfc_llcp_recv, local); 392 skb, nfc_llcp_recv, local);
376 } else { 393 } else {
377 nfc_llcp_send_symm(local->dev); 394 nfc_llcp_send_symm(local->dev);
378 } 395 }
379 396
380 mod_timer(&local->link_timer, 397 mod_timer(&local->link_timer,
381 jiffies + msecs_to_jiffies(local->remote_lto)); 398 jiffies + msecs_to_jiffies(local->remote_lto));
382} 399}
383 400
384static u8 nfc_llcp_dsap(struct sk_buff *pdu) 401static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -408,13 +425,13 @@ static u8 nfc_llcp_nr(struct sk_buff *pdu)
408 425
409static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) 426static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
410{ 427{
411 pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16); 428 pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16);
412 sock->send_n = (sock->send_n + 1) % 16; 429 sock->send_n = (sock->send_n + 1) % 16;
413 sock->recv_ack_n = (sock->recv_n - 1) % 16; 430 sock->recv_ack_n = (sock->recv_n - 1) % 16;
414} 431}
415 432
416static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, 433static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
417 u8 ssap, u8 dsap) 434 u8 ssap, u8 dsap)
418{ 435{
419 struct nfc_llcp_sock *sock, *llcp_sock, *n; 436 struct nfc_llcp_sock *sock, *llcp_sock, *n;
420 437
@@ -438,7 +455,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
438 455
439 list_for_each_entry_safe(llcp_sock, n, &sock->list, list) { 456 list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
440 pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock, 457 pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
441 &llcp_sock->sk, llcp_sock->dsap); 458 &llcp_sock->sk, llcp_sock->dsap);
442 if (llcp_sock->dsap == dsap) { 459 if (llcp_sock->dsap == dsap) {
443 sock_hold(&llcp_sock->sk); 460 sock_hold(&llcp_sock->sk);
444 mutex_unlock(&local->socket_lock); 461 mutex_unlock(&local->socket_lock);
@@ -482,7 +499,7 @@ static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
482} 499}
483 500
484static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, 501static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
485 struct sk_buff *skb) 502 struct sk_buff *skb)
486{ 503{
487 struct sock *new_sk, *parent; 504 struct sock *new_sk, *parent;
488 struct nfc_llcp_sock *sock, *new_sock; 505 struct nfc_llcp_sock *sock, *new_sock;
@@ -494,7 +511,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
494 pr_debug("%d %d\n", dsap, ssap); 511 pr_debug("%d %d\n", dsap, ssap);
495 512
496 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 513 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
497 skb->len - LLCP_HEADER_SIZE); 514 skb->len - LLCP_HEADER_SIZE);
498 515
499 if (dsap != LLCP_SAP_SDP) { 516 if (dsap != LLCP_SAP_SDP) {
500 bound_sap = dsap; 517 bound_sap = dsap;
@@ -513,7 +530,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
513 lock_sock(&sock->sk); 530 lock_sock(&sock->sk);
514 531
515 if (sock->dsap == LLCP_SAP_SDP && 532 if (sock->dsap == LLCP_SAP_SDP &&
516 sock->sk.sk_state == LLCP_LISTEN) 533 sock->sk.sk_state == LLCP_LISTEN)
517 goto enqueue; 534 goto enqueue;
518 } else { 535 } else {
519 u8 *sn; 536 u8 *sn;
@@ -529,23 +546,23 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
529 546
530 mutex_lock(&local->socket_lock); 547 mutex_lock(&local->socket_lock);
531 for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET; 548 for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
532 bound_sap++) { 549 bound_sap++) {
533 sock = local->sockets[bound_sap]; 550 sock = local->sockets[bound_sap];
534 if (sock == NULL) 551 if (sock == NULL)
535 continue; 552 continue;
536 553
537 if (sock->service_name == NULL || 554 if (sock->service_name == NULL ||
538 sock->service_name_len == 0) 555 sock->service_name_len == 0)
539 continue; 556 continue;
540 557
541 if (sock->service_name_len != sn_len) 558 if (sock->service_name_len != sn_len)
542 continue; 559 continue;
543 560
544 if (sock->dsap == LLCP_SAP_SDP && 561 if (sock->dsap == LLCP_SAP_SDP &&
545 sock->sk.sk_state == LLCP_LISTEN && 562 sock->sk.sk_state == LLCP_LISTEN &&
546 !memcmp(sn, sock->service_name, sn_len)) { 563 !memcmp(sn, sock->service_name, sn_len)) {
547 pr_debug("Found service name at SAP %d\n", 564 pr_debug("Found service name at SAP %d\n",
548 bound_sap); 565 bound_sap);
549 sock_hold(&sock->sk); 566 sock_hold(&sock->sk);
550 mutex_unlock(&local->socket_lock); 567 mutex_unlock(&local->socket_lock);
551 568
@@ -570,8 +587,7 @@ enqueue:
570 goto fail; 587 goto fail;
571 } 588 }
572 589
573 new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, 590 new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
574 GFP_ATOMIC);
575 if (new_sk == NULL) { 591 if (new_sk == NULL) {
576 reason = LLCP_DM_REJ; 592 reason = LLCP_DM_REJ;
577 release_sock(&sock->sk); 593 release_sock(&sock->sk);
@@ -616,8 +632,39 @@ fail:
616 632
617} 633}
618 634
635int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
636{
637 int nr_frames = 0;
638 struct nfc_llcp_local *local = sock->local;
639
640 pr_debug("Remote ready %d tx queue len %d remote rw %d",
641 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
642 local->remote_rw);
643
644 /* Try to queue some I frames for transmission */
645 while (sock->remote_ready &&
646 skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) {
647 struct sk_buff *pdu, *pending_pdu;
648
649 pdu = skb_dequeue(&sock->tx_queue);
650 if (pdu == NULL)
651 break;
652
653 /* Update N(S)/N(R) */
654 nfc_llcp_set_nrns(sock, pdu);
655
656 pending_pdu = skb_clone(pdu, GFP_KERNEL);
657
658 skb_queue_tail(&local->tx_queue, pdu);
659 skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
660 nr_frames++;
661 }
662
663 return nr_frames;
664}
665
619static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, 666static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
620 struct sk_buff *skb) 667 struct sk_buff *skb)
621{ 668{
622 struct nfc_llcp_sock *llcp_sock; 669 struct nfc_llcp_sock *llcp_sock;
623 struct sock *sk; 670 struct sock *sk;
@@ -644,15 +691,15 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
644 nfc_llcp_sock_put(llcp_sock); 691 nfc_llcp_sock_put(llcp_sock);
645 } 692 }
646 693
647 if (ns == llcp_sock->recv_n)
648 llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
649 else
650 pr_err("Received out of sequence I PDU\n");
651
652 /* Pass the payload upstream */ 694 /* Pass the payload upstream */
653 if (ptype == LLCP_PDU_I) { 695 if (ptype == LLCP_PDU_I) {
654 pr_debug("I frame, queueing on %p\n", &llcp_sock->sk); 696 pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
655 697
698 if (ns == llcp_sock->recv_n)
699 llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
700 else
701 pr_err("Received out of sequence I PDU\n");
702
656 skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE); 703 skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
657 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) { 704 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
658 pr_err("receive queue is full\n"); 705 pr_err("receive queue is full\n");
@@ -673,30 +720,20 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
673 } 720 }
674 } 721 }
675 722
676 /* Queue some I frames for transmission */ 723 if (ptype == LLCP_PDU_RR)
677 while (llcp_sock->remote_ready && 724 llcp_sock->remote_ready = true;
678 skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) { 725 else if (ptype == LLCP_PDU_RNR)
679 struct sk_buff *pdu, *pending_pdu; 726 llcp_sock->remote_ready = false;
680
681 pdu = skb_dequeue(&llcp_sock->tx_queue);
682 if (pdu == NULL)
683 break;
684
685 /* Update N(S)/N(R) */
686 nfc_llcp_set_nrns(llcp_sock, pdu);
687 727
688 pending_pdu = skb_clone(pdu, GFP_KERNEL); 728 if (nfc_llcp_queue_i_frames(llcp_sock) == 0)
689 729 nfc_llcp_send_rr(llcp_sock);
690 skb_queue_tail(&local->tx_queue, pdu);
691 skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu);
692 }
693 730
694 release_sock(sk); 731 release_sock(sk);
695 nfc_llcp_sock_put(llcp_sock); 732 nfc_llcp_sock_put(llcp_sock);
696} 733}
697 734
698static void nfc_llcp_recv_disc(struct nfc_llcp_local *local, 735static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
699 struct sk_buff *skb) 736 struct sk_buff *skb)
700{ 737{
701 struct nfc_llcp_sock *llcp_sock; 738 struct nfc_llcp_sock *llcp_sock;
702 struct sock *sk; 739 struct sock *sk;
@@ -718,7 +755,6 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
718 nfc_llcp_sock_put(llcp_sock); 755 nfc_llcp_sock_put(llcp_sock);
719 } 756 }
720 757
721
722 if (sk->sk_state == LLCP_CONNECTED) { 758 if (sk->sk_state == LLCP_CONNECTED) {
723 nfc_put_device(local->dev); 759 nfc_put_device(local->dev);
724 sk->sk_state = LLCP_CLOSED; 760 sk->sk_state = LLCP_CLOSED;
@@ -731,13 +767,11 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
731 nfc_llcp_sock_put(llcp_sock); 767 nfc_llcp_sock_put(llcp_sock);
732} 768}
733 769
734static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, 770static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
735 struct sk_buff *skb)
736{ 771{
737 struct nfc_llcp_sock *llcp_sock; 772 struct nfc_llcp_sock *llcp_sock;
738 u8 dsap, ssap; 773 u8 dsap, ssap;
739 774
740
741 dsap = nfc_llcp_dsap(skb); 775 dsap = nfc_llcp_dsap(skb);
742 ssap = nfc_llcp_ssap(skb); 776 ssap = nfc_llcp_ssap(skb);
743 777
@@ -756,7 +790,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
756 llcp_sock->dsap = ssap; 790 llcp_sock->dsap = ssap;
757 791
758 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 792 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
759 skb->len - LLCP_HEADER_SIZE); 793 skb->len - LLCP_HEADER_SIZE);
760 794
761 nfc_llcp_sock_put(llcp_sock); 795 nfc_llcp_sock_put(llcp_sock);
762} 796}
@@ -764,7 +798,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
764static void nfc_llcp_rx_work(struct work_struct *work) 798static void nfc_llcp_rx_work(struct work_struct *work)
765{ 799{
766 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 800 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
767 rx_work); 801 rx_work);
768 u8 dsap, ssap, ptype; 802 u8 dsap, ssap, ptype;
769 struct sk_buff *skb; 803 struct sk_buff *skb;
770 804
@@ -802,6 +836,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
802 836
803 case LLCP_PDU_I: 837 case LLCP_PDU_I:
804 case LLCP_PDU_RR: 838 case LLCP_PDU_RR:
839 case LLCP_PDU_RNR:
805 pr_debug("I frame\n"); 840 pr_debug("I frame\n");
806 nfc_llcp_recv_hdlc(local, skb); 841 nfc_llcp_recv_hdlc(local, skb);
807 break; 842 break;
@@ -821,7 +856,7 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
821 856
822 pr_debug("Received an LLCP PDU\n"); 857 pr_debug("Received an LLCP PDU\n");
823 if (err < 0) { 858 if (err < 0) {
824 pr_err("err %d", err); 859 pr_err("err %d\n", err);
825 return; 860 return;
826 } 861 }
827 862
@@ -840,6 +875,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
840 if (local == NULL) 875 if (local == NULL)
841 return; 876 return;
842 877
878 nfc_llcp_clear_sdp(local);
879
843 /* Close and purge all existing sockets */ 880 /* Close and purge all existing sockets */
844 nfc_llcp_socket_release(local); 881 nfc_llcp_socket_release(local);
845} 882}
@@ -865,7 +902,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
865 queue_work(local->tx_wq, &local->tx_work); 902 queue_work(local->tx_wq, &local->tx_work);
866 } else { 903 } else {
867 mod_timer(&local->link_timer, 904 mod_timer(&local->link_timer,
868 jiffies + msecs_to_jiffies(local->remote_lto)); 905 jiffies + msecs_to_jiffies(local->remote_lto));
869 } 906 }
870} 907}
871 908
@@ -891,8 +928,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
891 skb_queue_head_init(&local->tx_queue); 928 skb_queue_head_init(&local->tx_queue);
892 INIT_WORK(&local->tx_work, nfc_llcp_tx_work); 929 INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
893 snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev)); 930 snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
894 local->tx_wq = alloc_workqueue(name, 931 local->tx_wq =
895 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 932 alloc_workqueue(name,
933 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
934 1);
896 if (local->tx_wq == NULL) { 935 if (local->tx_wq == NULL) {
897 err = -ENOMEM; 936 err = -ENOMEM;
898 goto err_local; 937 goto err_local;
@@ -901,8 +940,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
901 local->rx_pending = NULL; 940 local->rx_pending = NULL;
902 INIT_WORK(&local->rx_work, nfc_llcp_rx_work); 941 INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
903 snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev)); 942 snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
904 local->rx_wq = alloc_workqueue(name, 943 local->rx_wq =
905 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 944 alloc_workqueue(name,
945 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
946 1);
906 if (local->rx_wq == NULL) { 947 if (local->rx_wq == NULL) {
907 err = -ENOMEM; 948 err = -ENOMEM;
908 goto err_tx_wq; 949 goto err_tx_wq;
@@ -910,8 +951,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
910 951
911 INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work); 952 INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
912 snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev)); 953 snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
913 local->timeout_wq = alloc_workqueue(name, 954 local->timeout_wq =
914 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 955 alloc_workqueue(name,
956 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
957 1);
915 if (local->timeout_wq == NULL) { 958 if (local->timeout_wq == NULL) {
916 err = -ENOMEM; 959 err = -ENOMEM;
917 goto err_rx_wq; 960 goto err_rx_wq;
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 0ad2e3361584..50680ce5ae43 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -28,6 +28,10 @@ enum llcp_state {
28#define LLCP_DEFAULT_RW 1 28#define LLCP_DEFAULT_RW 1
29#define LLCP_DEFAULT_MIU 128 29#define LLCP_DEFAULT_MIU 128
30 30
31#define LLCP_MAX_LTO 0xff
32#define LLCP_MAX_RW 15
33#define LLCP_MAX_MIUX 0x7ff
34
31#define LLCP_WKS_NUM_SAP 16 35#define LLCP_WKS_NUM_SAP 16
32#define LLCP_SDP_NUM_SAP 16 36#define LLCP_SDP_NUM_SAP 16
33#define LLCP_LOCAL_NUM_SAP 32 37#define LLCP_LOCAL_NUM_SAP 32
@@ -162,9 +166,10 @@ struct nfc_llcp_sock {
162 166
163struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); 167struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
164u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, 168u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
165 struct nfc_llcp_sock *sock); 169 struct nfc_llcp_sock *sock);
166u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); 170u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
167void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap); 171void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
172int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock);
168 173
169/* Sock API */ 174/* Sock API */
170struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp); 175struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
@@ -175,7 +180,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
175 180
176/* TLV API */ 181/* TLV API */
177int nfc_llcp_parse_tlv(struct nfc_llcp_local *local, 182int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
178 u8 *tlv_array, u16 tlv_array_len); 183 u8 *tlv_array, u16 tlv_array_len);
179 184
180/* Commands API */ 185/* Commands API */
181void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 186void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
@@ -187,6 +192,9 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
187int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); 192int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
188int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); 193int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
189int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); 194int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
195int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
196 struct msghdr *msg, size_t len);
197int nfc_llcp_send_rr(struct nfc_llcp_sock *sock);
190 198
191/* Socket API */ 199/* Socket API */
192int __init nfc_llcp_sock_init(void); 200int __init nfc_llcp_sock_init(void);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index f738ccd535f1..c13e02ebdef9 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -78,9 +78,11 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
78 llcp_sock->local = local; 78 llcp_sock->local = local;
79 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; 79 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
80 llcp_sock->service_name_len = min_t(unsigned int, 80 llcp_sock->service_name_len = min_t(unsigned int,
81 llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME); 81 llcp_addr.service_name_len,
82 NFC_LLCP_MAX_SERVICE_NAME);
82 llcp_sock->service_name = kmemdup(llcp_addr.service_name, 83 llcp_sock->service_name = kmemdup(llcp_addr.service_name,
83 llcp_sock->service_name_len, GFP_KERNEL); 84 llcp_sock->service_name_len,
85 GFP_KERNEL);
84 86
85 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); 87 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
86 if (llcp_sock->ssap == LLCP_MAX_SAP) 88 if (llcp_sock->ssap == LLCP_MAX_SAP)
@@ -110,7 +112,7 @@ static int llcp_sock_listen(struct socket *sock, int backlog)
110 lock_sock(sk); 112 lock_sock(sk);
111 113
112 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) 114 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
113 || sk->sk_state != LLCP_BOUND) { 115 || sk->sk_state != LLCP_BOUND) {
114 ret = -EBADFD; 116 ret = -EBADFD;
115 goto error; 117 goto error;
116 } 118 }
@@ -149,13 +151,13 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk)
149 sock_hold(sk); 151 sock_hold(sk);
150 152
151 list_add_tail(&llcp_sock->accept_queue, 153 list_add_tail(&llcp_sock->accept_queue,
152 &llcp_sock_parent->accept_queue); 154 &llcp_sock_parent->accept_queue);
153 llcp_sock->parent = parent; 155 llcp_sock->parent = parent;
154 sk_acceptq_added(parent); 156 sk_acceptq_added(parent);
155} 157}
156 158
157struct sock *nfc_llcp_accept_dequeue(struct sock *parent, 159struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
158 struct socket *newsock) 160 struct socket *newsock)
159{ 161{
160 struct nfc_llcp_sock *lsk, *n, *llcp_parent; 162 struct nfc_llcp_sock *lsk, *n, *llcp_parent;
161 struct sock *sk; 163 struct sock *sk;
@@ -163,7 +165,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
163 llcp_parent = nfc_llcp_sock(parent); 165 llcp_parent = nfc_llcp_sock(parent);
164 166
165 list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, 167 list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue,
166 accept_queue) { 168 accept_queue) {
167 sk = &lsk->sk; 169 sk = &lsk->sk;
168 lock_sock(sk); 170 lock_sock(sk);
169 171
@@ -192,7 +194,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
192} 194}
193 195
194static int llcp_sock_accept(struct socket *sock, struct socket *newsock, 196static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
195 int flags) 197 int flags)
196{ 198{
197 DECLARE_WAITQUEUE(wait, current); 199 DECLARE_WAITQUEUE(wait, current);
198 struct sock *sk = sock->sk, *new_sk; 200 struct sock *sk = sock->sk, *new_sk;
@@ -248,7 +250,7 @@ error:
248static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, 250static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
249 int *len, int peer) 251 int *len, int peer)
250{ 252{
251 struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr; 253 struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *)addr;
252 struct sock *sk = sock->sk; 254 struct sock *sk = sock->sk;
253 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 255 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
254 256
@@ -262,7 +264,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
262 llcp_addr->ssap = llcp_sock->ssap; 264 llcp_addr->ssap = llcp_sock->ssap;
263 llcp_addr->service_name_len = llcp_sock->service_name_len; 265 llcp_addr->service_name_len = llcp_sock->service_name_len;
264 memcpy(llcp_addr->service_name, llcp_sock->service_name, 266 memcpy(llcp_addr->service_name, llcp_sock->service_name,
265 llcp_addr->service_name_len); 267 llcp_addr->service_name_len);
266 268
267 return 0; 269 return 0;
268} 270}
@@ -275,7 +277,7 @@ static inline unsigned int llcp_accept_poll(struct sock *parent)
275 parent_sock = nfc_llcp_sock(parent); 277 parent_sock = nfc_llcp_sock(parent);
276 278
277 list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue, 279 list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
278 accept_queue) { 280 accept_queue) {
279 sk = &llcp_sock->sk; 281 sk = &llcp_sock->sk;
280 282
281 if (sk->sk_state == LLCP_CONNECTED) 283 if (sk->sk_state == LLCP_CONNECTED)
@@ -286,7 +288,7 @@ static inline unsigned int llcp_accept_poll(struct sock *parent)
286} 288}
287 289
288static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, 290static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
289 poll_table *wait) 291 poll_table *wait)
290{ 292{
291 struct sock *sk = sock->sk; 293 struct sock *sk = sock->sk;
292 unsigned int mask = 0; 294 unsigned int mask = 0;
@@ -315,6 +317,7 @@ static int llcp_sock_release(struct socket *sock)
315 struct sock *sk = sock->sk; 317 struct sock *sk = sock->sk;
316 struct nfc_llcp_local *local; 318 struct nfc_llcp_local *local;
317 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 319 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
320 int err = 0;
318 321
319 if (!sk) 322 if (!sk)
320 return 0; 323 return 0;
@@ -322,25 +325,17 @@ static int llcp_sock_release(struct socket *sock)
322 pr_debug("%p\n", sk); 325 pr_debug("%p\n", sk);
323 326
324 local = llcp_sock->local; 327 local = llcp_sock->local;
325 if (local == NULL) 328 if (local == NULL) {
326 return -ENODEV; 329 err = -ENODEV;
330 goto out;
331 }
327 332
328 mutex_lock(&local->socket_lock); 333 mutex_lock(&local->socket_lock);
329 334
330 if (llcp_sock == local->sockets[llcp_sock->ssap]) { 335 if (llcp_sock == local->sockets[llcp_sock->ssap])
331 local->sockets[llcp_sock->ssap] = NULL; 336 local->sockets[llcp_sock->ssap] = NULL;
332 } else { 337 else
333 struct nfc_llcp_sock *parent, *s, *n; 338 list_del_init(&llcp_sock->list);
334
335 parent = local->sockets[llcp_sock->ssap];
336
337 list_for_each_entry_safe(s, n, &parent->list, list)
338 if (llcp_sock == s) {
339 list_del(&s->list);
340 break;
341 }
342
343 }
344 339
345 mutex_unlock(&local->socket_lock); 340 mutex_unlock(&local->socket_lock);
346 341
@@ -355,7 +350,7 @@ static int llcp_sock_release(struct socket *sock)
355 struct sock *accept_sk; 350 struct sock *accept_sk;
356 351
357 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, 352 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
358 accept_queue) { 353 accept_queue) {
359 accept_sk = &lsk->sk; 354 accept_sk = &lsk->sk;
360 lock_sock(accept_sk); 355 lock_sock(accept_sk);
361 356
@@ -364,31 +359,27 @@ static int llcp_sock_release(struct socket *sock)
364 359
365 release_sock(accept_sk); 360 release_sock(accept_sk);
366 361
367 sock_set_flag(sk, SOCK_DEAD);
368 sock_orphan(accept_sk); 362 sock_orphan(accept_sk);
369 sock_put(accept_sk);
370 } 363 }
371 } 364 }
372 365
373 /* Freeing the SAP */ 366 /* Freeing the SAP */
374 if ((sk->sk_state == LLCP_CONNECTED 367 if ((sk->sk_state == LLCP_CONNECTED
375 && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) || 368 && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
376 sk->sk_state == LLCP_BOUND || 369 sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
377 sk->sk_state == LLCP_LISTEN)
378 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); 370 nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
379 371
380 sock_set_flag(sk, SOCK_DEAD);
381
382 release_sock(sk); 372 release_sock(sk);
383 373
374out:
384 sock_orphan(sk); 375 sock_orphan(sk);
385 sock_put(sk); 376 sock_put(sk);
386 377
387 return 0; 378 return err;
388} 379}
389 380
390static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, 381static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
391 int len, int flags) 382 int len, int flags)
392{ 383{
393 struct sock *sk = sock->sk; 384 struct sock *sk = sock->sk;
394 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 385 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -400,7 +391,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
400 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); 391 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
401 392
402 if (!addr || len < sizeof(struct sockaddr_nfc) || 393 if (!addr || len < sizeof(struct sockaddr_nfc) ||
403 addr->sa_family != AF_NFC) { 394 addr->sa_family != AF_NFC) {
404 pr_err("Invalid socket\n"); 395 pr_err("Invalid socket\n");
405 return -EINVAL; 396 return -EINVAL;
406 } 397 }
@@ -411,7 +402,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
411 } 402 }
412 403
413 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, 404 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx,
414 addr->target_idx, addr->nfc_protocol); 405 addr->target_idx, addr->nfc_protocol);
415 406
416 lock_sock(sk); 407 lock_sock(sk);
417 408
@@ -441,7 +432,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
441 device_unlock(&dev->dev); 432 device_unlock(&dev->dev);
442 433
443 if (local->rf_mode == NFC_RF_INITIATOR && 434 if (local->rf_mode == NFC_RF_INITIATOR &&
444 addr->target_idx != local->target_idx) { 435 addr->target_idx != local->target_idx) {
445 ret = -ENOLINK; 436 ret = -ENOLINK;
446 goto put_dev; 437 goto put_dev;
447 } 438 }
@@ -459,9 +450,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
459 llcp_sock->dsap = LLCP_SAP_SDP; 450 llcp_sock->dsap = LLCP_SAP_SDP;
460 llcp_sock->nfc_protocol = addr->nfc_protocol; 451 llcp_sock->nfc_protocol = addr->nfc_protocol;
461 llcp_sock->service_name_len = min_t(unsigned int, 452 llcp_sock->service_name_len = min_t(unsigned int,
462 addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME); 453 addr->service_name_len,
454 NFC_LLCP_MAX_SERVICE_NAME);
463 llcp_sock->service_name = kmemdup(addr->service_name, 455 llcp_sock->service_name = kmemdup(addr->service_name,
464 llcp_sock->service_name_len, GFP_KERNEL); 456 llcp_sock->service_name_len,
457 GFP_KERNEL);
465 458
466 local->sockets[llcp_sock->ssap] = llcp_sock; 459 local->sockets[llcp_sock->ssap] = llcp_sock;
467 460
@@ -482,6 +475,34 @@ error:
482 return ret; 475 return ret;
483} 476}
484 477
478static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
479 struct msghdr *msg, size_t len)
480{
481 struct sock *sk = sock->sk;
482 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
483 int ret;
484
485 pr_debug("sock %p sk %p", sock, sk);
486
487 ret = sock_error(sk);
488 if (ret)
489 return ret;
490
491 if (msg->msg_flags & MSG_OOB)
492 return -EOPNOTSUPP;
493
494 lock_sock(sk);
495
496 if (sk->sk_state != LLCP_CONNECTED) {
497 release_sock(sk);
498 return -ENOTCONN;
499 }
500
501 release_sock(sk);
502
503 return nfc_llcp_send_i_frame(llcp_sock, msg, len);
504}
505
485static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 506static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
486 struct msghdr *msg, size_t len, int flags) 507 struct msghdr *msg, size_t len, int flags)
487{ 508{
@@ -496,7 +517,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
496 lock_sock(sk); 517 lock_sock(sk);
497 518
498 if (sk->sk_state == LLCP_CLOSED && 519 if (sk->sk_state == LLCP_CLOSED &&
499 skb_queue_empty(&sk->sk_receive_queue)) { 520 skb_queue_empty(&sk->sk_receive_queue)) {
500 release_sock(sk); 521 release_sock(sk);
501 return 0; 522 return 0;
502 } 523 }
@@ -509,7 +530,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
509 skb = skb_recv_datagram(sk, flags, noblock, &err); 530 skb = skb_recv_datagram(sk, flags, noblock, &err);
510 if (!skb) { 531 if (!skb) {
511 pr_err("Recv datagram failed state %d %d %d", 532 pr_err("Recv datagram failed state %d %d %d",
512 sk->sk_state, err, sock_error(sk)); 533 sk->sk_state, err, sock_error(sk));
513 534
514 if (sk->sk_shutdown & RCV_SHUTDOWN) 535 if (sk->sk_shutdown & RCV_SHUTDOWN)
515 return 0; 536 return 0;
@@ -517,7 +538,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
517 return err; 538 return err;
518 } 539 }
519 540
520 rlen = skb->len; /* real length of skb */ 541 rlen = skb->len; /* real length of skb */
521 copied = min_t(unsigned int, rlen, len); 542 copied = min_t(unsigned int, rlen, len);
522 543
523 cskb = skb; 544 cskb = skb;
@@ -567,7 +588,7 @@ static const struct proto_ops llcp_sock_ops = {
567 .shutdown = sock_no_shutdown, 588 .shutdown = sock_no_shutdown,
568 .setsockopt = sock_no_setsockopt, 589 .setsockopt = sock_no_setsockopt,
569 .getsockopt = sock_no_getsockopt, 590 .getsockopt = sock_no_getsockopt,
570 .sendmsg = sock_no_sendmsg, 591 .sendmsg = llcp_sock_sendmsg,
571 .recvmsg = llcp_sock_recvmsg, 592 .recvmsg = llcp_sock_recvmsg,
572 .mmap = sock_no_mmap, 593 .mmap = sock_no_mmap,
573}; 594};
@@ -627,6 +648,8 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
627 648
628void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) 649void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
629{ 650{
651 struct nfc_llcp_local *local = sock->local;
652
630 kfree(sock->service_name); 653 kfree(sock->service_name);
631 654
632 skb_queue_purge(&sock->tx_queue); 655 skb_queue_purge(&sock->tx_queue);
@@ -635,11 +658,16 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
635 658
636 list_del_init(&sock->accept_queue); 659 list_del_init(&sock->accept_queue);
637 660
661 if (local != NULL && sock == local->sockets[sock->ssap])
662 local->sockets[sock->ssap] = NULL;
663 else
664 list_del_init(&sock->list);
665
638 sock->parent = NULL; 666 sock->parent = NULL;
639} 667}
640 668
641static int llcp_sock_create(struct net *net, struct socket *sock, 669static int llcp_sock_create(struct net *net, struct socket *sock,
642 const struct nfc_protocol *nfc_proto) 670 const struct nfc_protocol *nfc_proto)
643{ 671{
644 struct sock *sk; 672 struct sock *sk;
645 673
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 7650139a1a05..9ec065bb9ee1 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -66,9 +66,8 @@ static void nci_req_cancel(struct nci_dev *ndev, int err)
66 66
67/* Execute request and wait for completion. */ 67/* Execute request and wait for completion. */
68static int __nci_request(struct nci_dev *ndev, 68static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt), 69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt, 70 unsigned long opt, __u32 timeout)
71 __u32 timeout)
72{ 71{
73 int rc = 0; 72 int rc = 0;
74 long completion_rc; 73 long completion_rc;
@@ -77,9 +76,9 @@ static int __nci_request(struct nci_dev *ndev,
77 76
78 init_completion(&ndev->req_completion); 77 init_completion(&ndev->req_completion);
79 req(ndev, opt); 78 req(ndev, opt);
80 completion_rc = wait_for_completion_interruptible_timeout( 79 completion_rc =
81 &ndev->req_completion, 80 wait_for_completion_interruptible_timeout(&ndev->req_completion,
82 timeout); 81 timeout);
83 82
84 pr_debug("wait_for_completion return %ld\n", completion_rc); 83 pr_debug("wait_for_completion return %ld\n", completion_rc);
85 84
@@ -110,8 +109,9 @@ static int __nci_request(struct nci_dev *ndev,
110} 109}
111 110
112static inline int nci_request(struct nci_dev *ndev, 111static inline int nci_request(struct nci_dev *ndev,
113 void (*req)(struct nci_dev *ndev, unsigned long opt), 112 void (*req)(struct nci_dev *ndev,
114 unsigned long opt, __u32 timeout) 113 unsigned long opt),
114 unsigned long opt, __u32 timeout)
115{ 115{
116 int rc; 116 int rc;
117 117
@@ -152,14 +152,14 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */ 152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) { 153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] == 154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) { 155 NCI_RF_INTERFACE_ISO_DEP) {
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 NCI_DISC_MAP_MODE_LISTEN; 158 NCI_DISC_MAP_MODE_LISTEN;
159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; 159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
160 (*num)++; 160 (*num)++;
161 } else if (ndev->supported_rf_interfaces[i] == 161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) { 162 NCI_RF_INTERFACE_NFC_DEP) {
163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 NCI_DISC_MAP_MODE_LISTEN; 165 NCI_DISC_MAP_MODE_LISTEN;
@@ -172,8 +172,7 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
172 } 172 }
173 173
174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD, 174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 (1 + ((*num)*sizeof(struct disc_map_config))), 175 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
176 &cmd);
177} 176}
178 177
179static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) 178static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
@@ -184,36 +183,68 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
184 cmd.num_disc_configs = 0; 183 cmd.num_disc_configs = 0;
185 184
186 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 185 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
187 (protocols & NFC_PROTO_JEWEL_MASK 186 (protocols & NFC_PROTO_JEWEL_MASK
188 || protocols & NFC_PROTO_MIFARE_MASK 187 || protocols & NFC_PROTO_MIFARE_MASK
189 || protocols & NFC_PROTO_ISO14443_MASK 188 || protocols & NFC_PROTO_ISO14443_MASK
190 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 189 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
191 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 190 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
192 NCI_NFC_A_PASSIVE_POLL_MODE; 191 NCI_NFC_A_PASSIVE_POLL_MODE;
193 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 192 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
194 cmd.num_disc_configs++; 193 cmd.num_disc_configs++;
195 } 194 }
196 195
197 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 196 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
198 (protocols & NFC_PROTO_ISO14443_MASK)) { 197 (protocols & NFC_PROTO_ISO14443_MASK)) {
199 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 198 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
200 NCI_NFC_B_PASSIVE_POLL_MODE; 199 NCI_NFC_B_PASSIVE_POLL_MODE;
201 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 200 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
202 cmd.num_disc_configs++; 201 cmd.num_disc_configs++;
203 } 202 }
204 203
205 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 204 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
206 (protocols & NFC_PROTO_FELICA_MASK 205 (protocols & NFC_PROTO_FELICA_MASK
207 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 206 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
208 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 207 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
209 NCI_NFC_F_PASSIVE_POLL_MODE; 208 NCI_NFC_F_PASSIVE_POLL_MODE;
210 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 209 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
211 cmd.num_disc_configs++; 210 cmd.num_disc_configs++;
212 } 211 }
213 212
214 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, 213 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
215 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))), 214 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
216 &cmd); 215 &cmd);
216}
217
218struct nci_rf_discover_select_param {
219 __u8 rf_discovery_id;
220 __u8 rf_protocol;
221};
222
223static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
224{
225 struct nci_rf_discover_select_param *param =
226 (struct nci_rf_discover_select_param *)opt;
227 struct nci_rf_discover_select_cmd cmd;
228
229 cmd.rf_discovery_id = param->rf_discovery_id;
230 cmd.rf_protocol = param->rf_protocol;
231
232 switch (cmd.rf_protocol) {
233 case NCI_RF_PROTOCOL_ISO_DEP:
234 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
235 break;
236
237 case NCI_RF_PROTOCOL_NFC_DEP:
238 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
239 break;
240
241 default:
242 cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
243 break;
244 }
245
246 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
247 sizeof(struct nci_rf_discover_select_cmd), &cmd);
217} 248}
218 249
219static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt) 250static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
@@ -223,8 +254,7 @@ static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
223 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE; 254 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
224 255
225 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, 256 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
226 sizeof(struct nci_rf_deactivate_cmd), 257 sizeof(struct nci_rf_deactivate_cmd), &cmd);
227 &cmd);
228} 258}
229 259
230static int nci_open_device(struct nci_dev *ndev) 260static int nci_open_device(struct nci_dev *ndev)
@@ -248,22 +278,24 @@ static int nci_open_device(struct nci_dev *ndev)
248 set_bit(NCI_INIT, &ndev->flags); 278 set_bit(NCI_INIT, &ndev->flags);
249 279
250 rc = __nci_request(ndev, nci_reset_req, 0, 280 rc = __nci_request(ndev, nci_reset_req, 0,
251 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 281 msecs_to_jiffies(NCI_RESET_TIMEOUT));
252 282
253 if (!rc) { 283 if (!rc) {
254 rc = __nci_request(ndev, nci_init_req, 0, 284 rc = __nci_request(ndev, nci_init_req, 0,
255 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 285 msecs_to_jiffies(NCI_INIT_TIMEOUT));
256 } 286 }
257 287
258 if (!rc) { 288 if (!rc) {
259 rc = __nci_request(ndev, nci_init_complete_req, 0, 289 rc = __nci_request(ndev, nci_init_complete_req, 0,
260 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 290 msecs_to_jiffies(NCI_INIT_TIMEOUT));
261 } 291 }
262 292
263 clear_bit(NCI_INIT, &ndev->flags); 293 clear_bit(NCI_INIT, &ndev->flags);
264 294
265 if (!rc) { 295 if (!rc) {
266 set_bit(NCI_UP, &ndev->flags); 296 set_bit(NCI_UP, &ndev->flags);
297 nci_clear_target_list(ndev);
298 atomic_set(&ndev->state, NCI_IDLE);
267 } else { 299 } else {
268 /* Init failed, cleanup */ 300 /* Init failed, cleanup */
269 skb_queue_purge(&ndev->cmd_q); 301 skb_queue_purge(&ndev->cmd_q);
@@ -286,6 +318,7 @@ static int nci_close_device(struct nci_dev *ndev)
286 318
287 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { 319 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
288 del_timer_sync(&ndev->cmd_timer); 320 del_timer_sync(&ndev->cmd_timer);
321 del_timer_sync(&ndev->data_timer);
289 mutex_unlock(&ndev->req_lock); 322 mutex_unlock(&ndev->req_lock);
290 return 0; 323 return 0;
291 } 324 }
@@ -304,7 +337,7 @@ static int nci_close_device(struct nci_dev *ndev)
304 337
305 set_bit(NCI_INIT, &ndev->flags); 338 set_bit(NCI_INIT, &ndev->flags);
306 __nci_request(ndev, nci_reset_req, 0, 339 __nci_request(ndev, nci_reset_req, 0,
307 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 340 msecs_to_jiffies(NCI_RESET_TIMEOUT));
308 clear_bit(NCI_INIT, &ndev->flags); 341 clear_bit(NCI_INIT, &ndev->flags);
309 342
310 /* Flush cmd wq */ 343 /* Flush cmd wq */
@@ -331,6 +364,15 @@ static void nci_cmd_timer(unsigned long arg)
331 queue_work(ndev->cmd_wq, &ndev->cmd_work); 364 queue_work(ndev->cmd_wq, &ndev->cmd_work);
332} 365}
333 366
367/* NCI data exchange timer function */
368static void nci_data_timer(unsigned long arg)
369{
370 struct nci_dev *ndev = (void *) arg;
371
372 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
373 queue_work(ndev->rx_wq, &ndev->rx_work);
374}
375
334static int nci_dev_up(struct nfc_dev *nfc_dev) 376static int nci_dev_up(struct nfc_dev *nfc_dev)
335{ 377{
336 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 378 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
@@ -350,7 +392,8 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
350 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 392 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
351 int rc; 393 int rc;
352 394
353 if (test_bit(NCI_DISCOVERY, &ndev->flags)) { 395 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
396 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
354 pr_err("unable to start poll, since poll is already active\n"); 397 pr_err("unable to start poll, since poll is already active\n");
355 return -EBUSY; 398 return -EBUSY;
356 } 399 }
@@ -360,17 +403,18 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
360 return -EBUSY; 403 return -EBUSY;
361 } 404 }
362 405
363 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { 406 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
364 pr_debug("target is active, implicitly deactivate...\n"); 407 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
408 pr_debug("target active or w4 select, implicitly deactivate\n");
365 409
366 rc = nci_request(ndev, nci_rf_deactivate_req, 0, 410 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
367 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 411 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
368 if (rc) 412 if (rc)
369 return -EBUSY; 413 return -EBUSY;
370 } 414 }
371 415
372 rc = nci_request(ndev, nci_rf_discover_req, protocols, 416 rc = nci_request(ndev, nci_rf_discover_req, protocols,
373 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 417 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
374 418
375 if (!rc) 419 if (!rc)
376 ndev->poll_prots = protocols; 420 ndev->poll_prots = protocols;
@@ -382,23 +426,29 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
382{ 426{
383 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 427 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
384 428
385 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) { 429 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
430 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
386 pr_err("unable to stop poll, since poll is not active\n"); 431 pr_err("unable to stop poll, since poll is not active\n");
387 return; 432 return;
388 } 433 }
389 434
390 nci_request(ndev, nci_rf_deactivate_req, 0, 435 nci_request(ndev, nci_rf_deactivate_req, 0,
391 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 436 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
392} 437}
393 438
394static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, 439static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
395 __u32 protocol) 440 __u32 protocol)
396{ 441{
397 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 442 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
443 struct nci_rf_discover_select_param param;
444 struct nfc_target *target = NULL;
445 int i;
446 int rc = 0;
398 447
399 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol); 448 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
400 449
401 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { 450 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
451 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
402 pr_err("there is no available target to activate\n"); 452 pr_err("there is no available target to activate\n");
403 return -EINVAL; 453 return -EINVAL;
404 } 454 }
@@ -408,16 +458,47 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
408 return -EBUSY; 458 return -EBUSY;
409 } 459 }
410 460
411 if (!(ndev->target_available_prots & (1 << protocol))) { 461 for (i = 0; i < ndev->n_targets; i++) {
462 if (ndev->targets[i].idx == target_idx) {
463 target = &ndev->targets[i];
464 break;
465 }
466 }
467
468 if (!target) {
469 pr_err("unable to find the selected target\n");
470 return -EINVAL;
471 }
472
473 if (!(target->supported_protocols & (1 << protocol))) {
412 pr_err("target does not support the requested protocol 0x%x\n", 474 pr_err("target does not support the requested protocol 0x%x\n",
413 protocol); 475 protocol);
414 return -EINVAL; 476 return -EINVAL;
415 } 477 }
416 478
417 ndev->target_active_prot = protocol; 479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
418 ndev->target_available_prots = 0; 480 param.rf_discovery_id = target->idx;
419 481
420 return 0; 482 if (protocol == NFC_PROTO_JEWEL)
483 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
484 else if (protocol == NFC_PROTO_MIFARE)
485 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
486 else if (protocol == NFC_PROTO_FELICA)
487 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
488 else if (protocol == NFC_PROTO_ISO14443)
489 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
490 else
491 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
492
493 rc = nci_request(ndev, nci_rf_discover_select_req,
494 (unsigned long)&param,
495 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
496 }
497
498 if (!rc)
499 ndev->target_active_prot = protocol;
500
501 return rc;
421} 502}
422 503
423static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx) 504static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
@@ -433,16 +514,15 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
433 514
434 ndev->target_active_prot = 0; 515 ndev->target_active_prot = 0;
435 516
436 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { 517 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
437 nci_request(ndev, nci_rf_deactivate_req, 0, 518 nci_request(ndev, nci_rf_deactivate_req, 0,
438 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 519 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
439 } 520 }
440} 521}
441 522
442static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, 523static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
443 struct sk_buff *skb, 524 struct sk_buff *skb,
444 data_exchange_cb_t cb, 525 data_exchange_cb_t cb, void *cb_context)
445 void *cb_context)
446{ 526{
447 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 527 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
448 int rc; 528 int rc;
@@ -487,9 +567,8 @@ static struct nfc_ops nci_nfc_ops = {
487 * @supported_protocols: NFC protocols supported by the device 567 * @supported_protocols: NFC protocols supported by the device
488 */ 568 */
489struct nci_dev *nci_allocate_device(struct nci_ops *ops, 569struct nci_dev *nci_allocate_device(struct nci_ops *ops,
490 __u32 supported_protocols, 570 __u32 supported_protocols,
491 int tx_headroom, 571 int tx_headroom, int tx_tailroom)
492 int tx_tailroom)
493{ 572{
494 struct nci_dev *ndev; 573 struct nci_dev *ndev;
495 574
@@ -510,9 +589,9 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
510 ndev->tx_tailroom = tx_tailroom; 589 ndev->tx_tailroom = tx_tailroom;
511 590
512 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, 591 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
513 supported_protocols, 592 supported_protocols,
514 tx_headroom + NCI_DATA_HDR_SIZE, 593 tx_headroom + NCI_DATA_HDR_SIZE,
515 tx_tailroom); 594 tx_tailroom);
516 if (!ndev->nfc_dev) 595 if (!ndev->nfc_dev)
517 goto free_exit; 596 goto free_exit;
518 597
@@ -584,7 +663,9 @@ int nci_register_device(struct nci_dev *ndev)
584 skb_queue_head_init(&ndev->tx_q); 663 skb_queue_head_init(&ndev->tx_q);
585 664
586 setup_timer(&ndev->cmd_timer, nci_cmd_timer, 665 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
587 (unsigned long) ndev); 666 (unsigned long) ndev);
667 setup_timer(&ndev->data_timer, nci_data_timer,
668 (unsigned long) ndev);
588 669
589 mutex_init(&ndev->req_lock); 670 mutex_init(&ndev->req_lock);
590 671
@@ -633,7 +714,7 @@ int nci_recv_frame(struct sk_buff *skb)
633 pr_debug("len %d\n", skb->len); 714 pr_debug("len %d\n", skb->len);
634 715
635 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) 716 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
636 && !test_bit(NCI_INIT, &ndev->flags))) { 717 && !test_bit(NCI_INIT, &ndev->flags))) {
637 kfree_skb(skb); 718 kfree_skb(skb);
638 return -ENXIO; 719 return -ENXIO;
639 } 720 }
@@ -713,7 +794,7 @@ static void nci_tx_work(struct work_struct *work)
713 794
714 /* Check if data flow control is used */ 795 /* Check if data flow control is used */
715 if (atomic_read(&ndev->credits_cnt) != 796 if (atomic_read(&ndev->credits_cnt) !=
716 NCI_DATA_FLOW_CONTROL_NOT_USED) 797 NCI_DATA_FLOW_CONTROL_NOT_USED)
717 atomic_dec(&ndev->credits_cnt); 798 atomic_dec(&ndev->credits_cnt);
718 799
719 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", 800 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
@@ -722,6 +803,9 @@ static void nci_tx_work(struct work_struct *work)
722 nci_plen(skb->data)); 803 nci_plen(skb->data));
723 804
724 nci_send_frame(skb); 805 nci_send_frame(skb);
806
807 mod_timer(&ndev->data_timer,
808 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
725 } 809 }
726} 810}
727 811
@@ -753,6 +837,15 @@ static void nci_rx_work(struct work_struct *work)
753 break; 837 break;
754 } 838 }
755 } 839 }
840
841 /* check if a data exchange timout has occurred */
842 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
843 /* complete the data exchange transaction, if exists */
844 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
845 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
846
847 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
848 }
756} 849}
757 850
758/* ----- NCI TX CMD worker thread ----- */ 851/* ----- NCI TX CMD worker thread ----- */
@@ -781,6 +874,6 @@ static void nci_cmd_work(struct work_struct *work)
781 nci_send_frame(skb); 874 nci_send_frame(skb);
782 875
783 mod_timer(&ndev->cmd_timer, 876 mod_timer(&ndev->cmd_timer,
784 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); 877 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
785 } 878 }
786} 879}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index e5756b30e602..a0bc326308a5 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -35,8 +35,7 @@
35#include <linux/nfc.h> 35#include <linux/nfc.h>
36 36
37/* Complete data exchange transaction and forward skb to nfc core */ 37/* Complete data exchange transaction and forward skb to nfc core */
38void nci_data_exchange_complete(struct nci_dev *ndev, 38void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
39 struct sk_buff *skb,
40 int err) 39 int err)
41{ 40{
42 data_exchange_cb_t cb = ndev->data_exchange_cb; 41 data_exchange_cb_t cb = ndev->data_exchange_cb;
@@ -44,6 +43,10 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
44 43
45 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); 44 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
46 45
46 /* data exchange is complete, stop the data timer */
47 del_timer_sync(&ndev->data_timer);
48 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
49
47 if (cb) { 50 if (cb) {
48 ndev->data_exchange_cb = NULL; 51 ndev->data_exchange_cb = NULL;
49 ndev->data_exchange_cb_context = 0; 52 ndev->data_exchange_cb_context = 0;
@@ -63,9 +66,9 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
63/* ----------------- NCI TX Data ----------------- */ 66/* ----------------- NCI TX Data ----------------- */
64 67
65static inline void nci_push_data_hdr(struct nci_dev *ndev, 68static inline void nci_push_data_hdr(struct nci_dev *ndev,
66 __u8 conn_id, 69 __u8 conn_id,
67 struct sk_buff *skb, 70 struct sk_buff *skb,
68 __u8 pbf) 71 __u8 pbf)
69{ 72{
70 struct nci_data_hdr *hdr; 73 struct nci_data_hdr *hdr;
71 int plen = skb->len; 74 int plen = skb->len;
@@ -82,8 +85,8 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev,
82} 85}
83 86
84static int nci_queue_tx_data_frags(struct nci_dev *ndev, 87static int nci_queue_tx_data_frags(struct nci_dev *ndev,
85 __u8 conn_id, 88 __u8 conn_id,
86 struct sk_buff *skb) { 89 struct sk_buff *skb) {
87 int total_len = skb->len; 90 int total_len = skb->len;
88 unsigned char *data = skb->data; 91 unsigned char *data = skb->data;
89 unsigned long flags; 92 unsigned long flags;
@@ -101,8 +104,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
101 min_t(int, total_len, ndev->max_data_pkt_payload_size); 104 min_t(int, total_len, ndev->max_data_pkt_payload_size);
102 105
103 skb_frag = nci_skb_alloc(ndev, 106 skb_frag = nci_skb_alloc(ndev,
104 (NCI_DATA_HDR_SIZE + frag_len), 107 (NCI_DATA_HDR_SIZE + frag_len),
105 GFP_KERNEL); 108 GFP_KERNEL);
106 if (skb_frag == NULL) { 109 if (skb_frag == NULL) {
107 rc = -ENOMEM; 110 rc = -ENOMEM;
108 goto free_exit; 111 goto free_exit;
@@ -114,7 +117,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
114 117
115 /* second, set the header */ 118 /* second, set the header */
116 nci_push_data_hdr(ndev, conn_id, skb_frag, 119 nci_push_data_hdr(ndev, conn_id, skb_frag,
117 ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT))); 120 ((total_len == frag_len) ?
121 (NCI_PBF_LAST) : (NCI_PBF_CONT)));
118 122
119 __skb_queue_tail(&frags_q, skb_frag); 123 __skb_queue_tail(&frags_q, skb_frag);
120 124
@@ -182,8 +186,8 @@ exit:
182/* ----------------- NCI RX Data ----------------- */ 186/* ----------------- NCI RX Data ----------------- */
183 187
184static void nci_add_rx_data_frag(struct nci_dev *ndev, 188static void nci_add_rx_data_frag(struct nci_dev *ndev,
185 struct sk_buff *skb, 189 struct sk_buff *skb,
186 __u8 pbf) 190 __u8 pbf)
187{ 191{
188 int reassembly_len; 192 int reassembly_len;
189 int err = 0; 193 int err = 0;
@@ -207,8 +211,8 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
207 211
208 /* second, combine the two fragments */ 212 /* second, combine the two fragments */
209 memcpy(skb_push(skb, reassembly_len), 213 memcpy(skb_push(skb, reassembly_len),
210 ndev->rx_data_reassembly->data, 214 ndev->rx_data_reassembly->data,
211 reassembly_len); 215 reassembly_len);
212 216
213 /* third, free old reassembly */ 217 /* third, free old reassembly */
214 kfree_skb(ndev->rx_data_reassembly); 218 kfree_skb(ndev->rx_data_reassembly);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index b16a8dc2afbe..2e3dee42196d 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -40,7 +40,7 @@
40/* Handle NCI Notification packets */ 40/* Handle NCI Notification packets */
41 41
42static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, 42static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
43 struct sk_buff *skb) 43 struct sk_buff *skb)
44{ 44{
45 struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; 45 struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
46 int i; 46 int i;
@@ -62,7 +62,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
62 if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) { 62 if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
63 /* found static rf connection */ 63 /* found static rf connection */
64 atomic_add(ntf->conn_entries[i].credits, 64 atomic_add(ntf->conn_entries[i].credits,
65 &ndev->credits_cnt); 65 &ndev->credits_cnt);
66 } 66 }
67 } 67 }
68 68
@@ -71,6 +71,20 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
71 queue_work(ndev->tx_wq, &ndev->tx_work); 71 queue_work(ndev->tx_wq, &ndev->tx_work);
72} 72}
73 73
74static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
75 struct sk_buff *skb)
76{
77 __u8 status = skb->data[0];
78
79 pr_debug("status 0x%x\n", status);
80
81 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
82 /* Activation failed, so complete the request
83 (the state remains the same) */
84 nci_req_complete(ndev, status);
85 }
86}
87
74static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, 88static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
75 struct sk_buff *skb) 89 struct sk_buff *skb)
76{ 90{
@@ -86,12 +100,9 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
86} 100}
87 101
88static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, 102static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
89 struct nci_rf_intf_activated_ntf *ntf, __u8 *data) 103 struct rf_tech_specific_params_nfca_poll *nfca_poll,
104 __u8 *data)
90{ 105{
91 struct rf_tech_specific_params_nfca_poll *nfca_poll;
92
93 nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
94
95 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); 106 nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
96 data += 2; 107 data += 2;
97 108
@@ -115,79 +126,266 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
115 return data; 126 return data;
116} 127}
117 128
129static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
130 struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
131 __u8 *data)
132{
133 nfcb_poll->sensb_res_len = *data++;
134
135 pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
136
137 memcpy(nfcb_poll->sensb_res, data, nfcb_poll->sensb_res_len);
138 data += nfcb_poll->sensb_res_len;
139
140 return data;
141}
142
143static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
144 struct rf_tech_specific_params_nfcf_poll *nfcf_poll,
145 __u8 *data)
146{
147 nfcf_poll->bit_rate = *data++;
148 nfcf_poll->sensf_res_len = *data++;
149
150 pr_debug("bit_rate %d, sensf_res_len %d\n",
151 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
152
153 memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len);
154 data += nfcf_poll->sensf_res_len;
155
156 return data;
157}
158
159static int nci_add_new_protocol(struct nci_dev *ndev,
160 struct nfc_target *target,
161 __u8 rf_protocol,
162 __u8 rf_tech_and_mode,
163 void *params)
164{
165 struct rf_tech_specific_params_nfca_poll *nfca_poll;
166 struct rf_tech_specific_params_nfcb_poll *nfcb_poll;
167 struct rf_tech_specific_params_nfcf_poll *nfcf_poll;
168 __u32 protocol;
169
170 if (rf_protocol == NCI_RF_PROTOCOL_T2T)
171 protocol = NFC_PROTO_MIFARE_MASK;
172 else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
173 protocol = NFC_PROTO_ISO14443_MASK;
174 else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
175 protocol = NFC_PROTO_FELICA_MASK;
176 else
177 protocol = 0;
178
179 if (!(protocol & ndev->poll_prots)) {
180 pr_err("the target found does not have the desired protocol\n");
181 return -EPROTO;
182 }
183
184 if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) {
185 nfca_poll = (struct rf_tech_specific_params_nfca_poll *)params;
186
187 target->sens_res = nfca_poll->sens_res;
188 target->sel_res = nfca_poll->sel_res;
189 target->nfcid1_len = nfca_poll->nfcid1_len;
190 if (target->nfcid1_len > 0) {
191 memcpy(target->nfcid1, nfca_poll->nfcid1,
192 target->nfcid1_len);
193 }
194 } else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) {
195 nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
196
197 target->sensb_res_len = nfcb_poll->sensb_res_len;
198 if (target->sensb_res_len > 0) {
199 memcpy(target->sensb_res, nfcb_poll->sensb_res,
200 target->sensb_res_len);
201 }
202 } else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) {
203 nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
204
205 target->sensf_res_len = nfcf_poll->sensf_res_len;
206 if (target->sensf_res_len > 0) {
207 memcpy(target->sensf_res, nfcf_poll->sensf_res,
208 target->sensf_res_len);
209 }
210 } else {
211 pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode);
212 return -EPROTO;
213 }
214
215 target->supported_protocols |= protocol;
216
217 pr_debug("protocol 0x%x\n", protocol);
218
219 return 0;
220}
221
222static void nci_add_new_target(struct nci_dev *ndev,
223 struct nci_rf_discover_ntf *ntf)
224{
225 struct nfc_target *target;
226 int i, rc;
227
228 for (i = 0; i < ndev->n_targets; i++) {
229 target = &ndev->targets[i];
230 if (target->idx == ntf->rf_discovery_id) {
231 /* This target already exists, add the new protocol */
232 nci_add_new_protocol(ndev, target, ntf->rf_protocol,
233 ntf->rf_tech_and_mode,
234 &ntf->rf_tech_specific_params);
235 return;
236 }
237 }
238
239 /* This is a new target, check if we've enough room */
240 if (ndev->n_targets == NCI_MAX_DISCOVERED_TARGETS) {
241 pr_debug("not enough room, ignoring new target...\n");
242 return;
243 }
244
245 target = &ndev->targets[ndev->n_targets];
246
247 rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
248 ntf->rf_tech_and_mode,
249 &ntf->rf_tech_specific_params);
250 if (!rc) {
251 target->idx = ntf->rf_discovery_id;
252 ndev->n_targets++;
253
254 pr_debug("target_idx %d, n_targets %d\n", target->idx,
255 ndev->n_targets);
256 }
257}
258
259void nci_clear_target_list(struct nci_dev *ndev)
260{
261 memset(ndev->targets, 0,
262 (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS));
263
264 ndev->n_targets = 0;
265}
266
267static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
268 struct sk_buff *skb)
269{
270 struct nci_rf_discover_ntf ntf;
271 __u8 *data = skb->data;
272 bool add_target = true;
273
274 ntf.rf_discovery_id = *data++;
275 ntf.rf_protocol = *data++;
276 ntf.rf_tech_and_mode = *data++;
277 ntf.rf_tech_specific_params_len = *data++;
278
279 pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
280 pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
281 pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode);
282 pr_debug("rf_tech_specific_params_len %d\n",
283 ntf.rf_tech_specific_params_len);
284
285 if (ntf.rf_tech_specific_params_len > 0) {
286 switch (ntf.rf_tech_and_mode) {
287 case NCI_NFC_A_PASSIVE_POLL_MODE:
288 data = nci_extract_rf_params_nfca_passive_poll(ndev,
289 &(ntf.rf_tech_specific_params.nfca_poll), data);
290 break;
291
292 case NCI_NFC_B_PASSIVE_POLL_MODE:
293 data = nci_extract_rf_params_nfcb_passive_poll(ndev,
294 &(ntf.rf_tech_specific_params.nfcb_poll), data);
295 break;
296
297 case NCI_NFC_F_PASSIVE_POLL_MODE:
298 data = nci_extract_rf_params_nfcf_passive_poll(ndev,
299 &(ntf.rf_tech_specific_params.nfcf_poll), data);
300 break;
301
302 default:
303 pr_err("unsupported rf_tech_and_mode 0x%x\n",
304 ntf.rf_tech_and_mode);
305 data += ntf.rf_tech_specific_params_len;
306 add_target = false;
307 }
308 }
309
310 ntf.ntf_type = *data++;
311 pr_debug("ntf_type %d\n", ntf.ntf_type);
312
313 if (add_target == true)
314 nci_add_new_target(ndev, &ntf);
315
316 if (ntf.ntf_type == NCI_DISCOVER_NTF_TYPE_MORE) {
317 atomic_set(&ndev->state, NCI_W4_ALL_DISCOVERIES);
318 } else {
319 atomic_set(&ndev->state, NCI_W4_HOST_SELECT);
320 nfc_targets_found(ndev->nfc_dev, ndev->targets,
321 ndev->n_targets);
322 }
323}
324
118static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, 325static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
119 struct nci_rf_intf_activated_ntf *ntf, __u8 *data) 326 struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
120{ 327{
121 struct activation_params_nfca_poll_iso_dep *nfca_poll; 328 struct activation_params_nfca_poll_iso_dep *nfca_poll;
329 struct activation_params_nfcb_poll_iso_dep *nfcb_poll;
122 330
123 switch (ntf->activation_rf_tech_and_mode) { 331 switch (ntf->activation_rf_tech_and_mode) {
124 case NCI_NFC_A_PASSIVE_POLL_MODE: 332 case NCI_NFC_A_PASSIVE_POLL_MODE:
125 nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; 333 nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
126 nfca_poll->rats_res_len = *data++; 334 nfca_poll->rats_res_len = *data++;
335 pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
127 if (nfca_poll->rats_res_len > 0) { 336 if (nfca_poll->rats_res_len > 0) {
128 memcpy(nfca_poll->rats_res, 337 memcpy(nfca_poll->rats_res,
129 data, 338 data, nfca_poll->rats_res_len);
130 nfca_poll->rats_res_len); 339 }
340 break;
341
342 case NCI_NFC_B_PASSIVE_POLL_MODE:
343 nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
344 nfcb_poll->attrib_res_len = *data++;
345 pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
346 if (nfcb_poll->attrib_res_len > 0) {
347 memcpy(nfcb_poll->attrib_res,
348 data, nfcb_poll->attrib_res_len);
131 } 349 }
132 break; 350 break;
133 351
134 default: 352 default:
135 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", 353 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
136 ntf->activation_rf_tech_and_mode); 354 ntf->activation_rf_tech_and_mode);
137 return -EPROTO; 355 return NCI_STATUS_RF_PROTOCOL_ERROR;
138 } 356 }
139 357
140 return 0; 358 return NCI_STATUS_OK;
141} 359}
142 360
143static void nci_target_found(struct nci_dev *ndev, 361static void nci_target_auto_activated(struct nci_dev *ndev,
144 struct nci_rf_intf_activated_ntf *ntf) 362 struct nci_rf_intf_activated_ntf *ntf)
145{ 363{
146 struct nfc_target nfc_tgt; 364 struct nfc_target *target;
365 int rc;
147 366
148 if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */ 367 target = &ndev->targets[ndev->n_targets];
149 nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
150 else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
151 nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
152 else
153 nfc_tgt.supported_protocols = 0;
154
155 nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
156 nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
157 nfc_tgt.nfcid1_len = ntf->rf_tech_specific_params.nfca_poll.nfcid1_len;
158 if (nfc_tgt.nfcid1_len > 0) {
159 memcpy(nfc_tgt.nfcid1,
160 ntf->rf_tech_specific_params.nfca_poll.nfcid1,
161 nfc_tgt.nfcid1_len);
162 }
163 368
164 if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) { 369 rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol,
165 pr_debug("the target found does not have the desired protocol\n"); 370 ntf->activation_rf_tech_and_mode,
371 &ntf->rf_tech_specific_params);
372 if (rc)
166 return; 373 return;
167 }
168 374
169 pr_debug("new target found, supported_protocols 0x%x\n", 375 target->idx = ntf->rf_discovery_id;
170 nfc_tgt.supported_protocols); 376 ndev->n_targets++;
171 377
172 ndev->target_available_prots = nfc_tgt.supported_protocols; 378 pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets);
173 ndev->max_data_pkt_payload_size = ntf->max_data_pkt_payload_size;
174 ndev->initial_num_credits = ntf->initial_num_credits;
175 379
176 /* set the available credits to initial value */ 380 nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
177 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
178
179 nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
180} 381}
181 382
182static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, 383static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
183 struct sk_buff *skb) 384 struct sk_buff *skb)
184{ 385{
185 struct nci_rf_intf_activated_ntf ntf; 386 struct nci_rf_intf_activated_ntf ntf;
186 __u8 *data = skb->data; 387 __u8 *data = skb->data;
187 int err = 0; 388 int err = NCI_STATUS_OK;
188
189 clear_bit(NCI_DISCOVERY, &ndev->flags);
190 set_bit(NCI_POLL_ACTIVE, &ndev->flags);
191 389
192 ntf.rf_discovery_id = *data++; 390 ntf.rf_discovery_id = *data++;
193 ntf.rf_interface = *data++; 391 ntf.rf_interface = *data++;
@@ -204,7 +402,8 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
204 ntf.activation_rf_tech_and_mode); 402 ntf.activation_rf_tech_and_mode);
205 pr_debug("max_data_pkt_payload_size 0x%x\n", 403 pr_debug("max_data_pkt_payload_size 0x%x\n",
206 ntf.max_data_pkt_payload_size); 404 ntf.max_data_pkt_payload_size);
207 pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits); 405 pr_debug("initial_num_credits 0x%x\n",
406 ntf.initial_num_credits);
208 pr_debug("rf_tech_specific_params_len %d\n", 407 pr_debug("rf_tech_specific_params_len %d\n",
209 ntf.rf_tech_specific_params_len); 408 ntf.rf_tech_specific_params_len);
210 409
@@ -212,13 +411,24 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
212 switch (ntf.activation_rf_tech_and_mode) { 411 switch (ntf.activation_rf_tech_and_mode) {
213 case NCI_NFC_A_PASSIVE_POLL_MODE: 412 case NCI_NFC_A_PASSIVE_POLL_MODE:
214 data = nci_extract_rf_params_nfca_passive_poll(ndev, 413 data = nci_extract_rf_params_nfca_passive_poll(ndev,
215 &ntf, data); 414 &(ntf.rf_tech_specific_params.nfca_poll), data);
415 break;
416
417 case NCI_NFC_B_PASSIVE_POLL_MODE:
418 data = nci_extract_rf_params_nfcb_passive_poll(ndev,
419 &(ntf.rf_tech_specific_params.nfcb_poll), data);
420 break;
421
422 case NCI_NFC_F_PASSIVE_POLL_MODE:
423 data = nci_extract_rf_params_nfcf_passive_poll(ndev,
424 &(ntf.rf_tech_specific_params.nfcf_poll), data);
216 break; 425 break;
217 426
218 default: 427 default:
219 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", 428 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
220 ntf.activation_rf_tech_and_mode); 429 ntf.activation_rf_tech_and_mode);
221 return; 430 err = NCI_STATUS_RF_PROTOCOL_ERROR;
431 goto exit;
222 } 432 }
223 } 433 }
224 434
@@ -229,18 +439,15 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
229 439
230 pr_debug("data_exch_rf_tech_and_mode 0x%x\n", 440 pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
231 ntf.data_exch_rf_tech_and_mode); 441 ntf.data_exch_rf_tech_and_mode);
232 pr_debug("data_exch_tx_bit_rate 0x%x\n", 442 pr_debug("data_exch_tx_bit_rate 0x%x\n", ntf.data_exch_tx_bit_rate);
233 ntf.data_exch_tx_bit_rate); 443 pr_debug("data_exch_rx_bit_rate 0x%x\n", ntf.data_exch_rx_bit_rate);
234 pr_debug("data_exch_rx_bit_rate 0x%x\n", 444 pr_debug("activation_params_len %d\n", ntf.activation_params_len);
235 ntf.data_exch_rx_bit_rate);
236 pr_debug("activation_params_len %d\n",
237 ntf.activation_params_len);
238 445
239 if (ntf.activation_params_len > 0) { 446 if (ntf.activation_params_len > 0) {
240 switch (ntf.rf_interface) { 447 switch (ntf.rf_interface) {
241 case NCI_RF_INTERFACE_ISO_DEP: 448 case NCI_RF_INTERFACE_ISO_DEP:
242 err = nci_extract_activation_params_iso_dep(ndev, 449 err = nci_extract_activation_params_iso_dep(ndev,
243 &ntf, data); 450 &ntf, data);
244 break; 451 break;
245 452
246 case NCI_RF_INTERFACE_FRAME: 453 case NCI_RF_INTERFACE_FRAME:
@@ -250,24 +457,39 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
250 default: 457 default:
251 pr_err("unsupported rf_interface 0x%x\n", 458 pr_err("unsupported rf_interface 0x%x\n",
252 ntf.rf_interface); 459 ntf.rf_interface);
253 return; 460 err = NCI_STATUS_RF_PROTOCOL_ERROR;
461 break;
254 } 462 }
255 } 463 }
256 464
257 if (!err) 465exit:
258 nci_target_found(ndev, &ntf); 466 if (err == NCI_STATUS_OK) {
467 ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size;
468 ndev->initial_num_credits = ntf.initial_num_credits;
469
470 /* set the available credits to initial value */
471 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
472 }
473
474 if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
475 /* A single target was found and activated automatically */
476 atomic_set(&ndev->state, NCI_POLL_ACTIVE);
477 if (err == NCI_STATUS_OK)
478 nci_target_auto_activated(ndev, &ntf);
479 } else { /* ndev->state == NCI_W4_HOST_SELECT */
480 /* A selected target was activated, so complete the request */
481 atomic_set(&ndev->state, NCI_POLL_ACTIVE);
482 nci_req_complete(ndev, err);
483 }
259} 484}
260 485
261static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, 486static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
262 struct sk_buff *skb) 487 struct sk_buff *skb)
263{ 488{
264 struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; 489 struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
265 490
266 pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); 491 pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
267 492
268 clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
269 ndev->target_active_prot = 0;
270
271 /* drop tx data queue */ 493 /* drop tx data queue */
272 skb_queue_purge(&ndev->tx_q); 494 skb_queue_purge(&ndev->tx_q);
273 495
@@ -280,6 +502,10 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
280 /* complete the data exchange transaction, if exists */ 502 /* complete the data exchange transaction, if exists */
281 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) 503 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
282 nci_data_exchange_complete(ndev, NULL, -EIO); 504 nci_data_exchange_complete(ndev, NULL, -EIO);
505
506 nci_clear_target_list(ndev);
507 atomic_set(&ndev->state, NCI_IDLE);
508 nci_req_complete(ndev, NCI_STATUS_OK);
283} 509}
284 510
285void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) 511void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -300,10 +526,18 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
300 nci_core_conn_credits_ntf_packet(ndev, skb); 526 nci_core_conn_credits_ntf_packet(ndev, skb);
301 break; 527 break;
302 528
529 case NCI_OP_CORE_GENERIC_ERROR_NTF:
530 nci_core_generic_error_ntf_packet(ndev, skb);
531 break;
532
303 case NCI_OP_CORE_INTF_ERROR_NTF: 533 case NCI_OP_CORE_INTF_ERROR_NTF:
304 nci_core_conn_intf_error_ntf_packet(ndev, skb); 534 nci_core_conn_intf_error_ntf_packet(ndev, skb);
305 break; 535 break;
306 536
537 case NCI_OP_RF_DISCOVER_NTF:
538 nci_rf_discover_ntf_packet(ndev, skb);
539 break;
540
307 case NCI_OP_RF_INTF_ACTIVATED_NTF: 541 case NCI_OP_RF_INTF_ACTIVATED_NTF:
308 nci_rf_intf_activated_ntf_packet(ndev, skb); 542 nci_rf_intf_activated_ntf_packet(ndev, skb);
309 break; 543 break;
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 2840ae2f3615..3003c3390e49 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -67,19 +67,18 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
67 ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; 67 ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
68 68
69 if (ndev->num_supported_rf_interfaces > 69 if (ndev->num_supported_rf_interfaces >
70 NCI_MAX_SUPPORTED_RF_INTERFACES) { 70 NCI_MAX_SUPPORTED_RF_INTERFACES) {
71 ndev->num_supported_rf_interfaces = 71 ndev->num_supported_rf_interfaces =
72 NCI_MAX_SUPPORTED_RF_INTERFACES; 72 NCI_MAX_SUPPORTED_RF_INTERFACES;
73 } 73 }
74 74
75 memcpy(ndev->supported_rf_interfaces, 75 memcpy(ndev->supported_rf_interfaces,
76 rsp_1->supported_rf_interfaces, 76 rsp_1->supported_rf_interfaces,
77 ndev->num_supported_rf_interfaces); 77 ndev->num_supported_rf_interfaces);
78 78
79 rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); 79 rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
80 80
81 ndev->max_logical_connections = 81 ndev->max_logical_connections = rsp_2->max_logical_connections;
82 rsp_2->max_logical_connections;
83 ndev->max_routing_table_size = 82 ndev->max_routing_table_size =
84 __le16_to_cpu(rsp_2->max_routing_table_size); 83 __le16_to_cpu(rsp_2->max_routing_table_size);
85 ndev->max_ctrl_pkt_payload_len = 84 ndev->max_ctrl_pkt_payload_len =
@@ -121,7 +120,7 @@ exit:
121} 120}
122 121
123static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, 122static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
124 struct sk_buff *skb) 123 struct sk_buff *skb)
125{ 124{
126 __u8 status = skb->data[0]; 125 __u8 status = skb->data[0];
127 126
@@ -137,21 +136,37 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
137 pr_debug("status 0x%x\n", status); 136 pr_debug("status 0x%x\n", status);
138 137
139 if (status == NCI_STATUS_OK) 138 if (status == NCI_STATUS_OK)
140 set_bit(NCI_DISCOVERY, &ndev->flags); 139 atomic_set(&ndev->state, NCI_DISCOVERY);
141 140
142 nci_req_complete(ndev, status); 141 nci_req_complete(ndev, status);
143} 142}
144 143
145static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, 144static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev,
146 struct sk_buff *skb) 145 struct sk_buff *skb)
147{ 146{
148 __u8 status = skb->data[0]; 147 __u8 status = skb->data[0];
149 148
150 pr_debug("status 0x%x\n", status); 149 pr_debug("status 0x%x\n", status);
151 150
152 clear_bit(NCI_DISCOVERY, &ndev->flags); 151 /* Complete the request on intf_activated_ntf or generic_error_ntf */
152 if (status != NCI_STATUS_OK)
153 nci_req_complete(ndev, status);
154}
153 155
154 nci_req_complete(ndev, status); 156static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
157 struct sk_buff *skb)
158{
159 __u8 status = skb->data[0];
160
161 pr_debug("status 0x%x\n", status);
162
163 /* If target was active, complete the request only in deactivate_ntf */
164 if ((status != NCI_STATUS_OK) ||
165 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
166 nci_clear_target_list(ndev);
167 atomic_set(&ndev->state, NCI_IDLE);
168 nci_req_complete(ndev, status);
169 }
155} 170}
156 171
157void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) 172void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -187,6 +202,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
187 nci_rf_disc_rsp_packet(ndev, skb); 202 nci_rf_disc_rsp_packet(ndev, skb);
188 break; 203 break;
189 204
205 case NCI_OP_RF_DISCOVER_SELECT_RSP:
206 nci_rf_disc_select_rsp_packet(ndev, skb);
207 break;
208
190 case NCI_OP_RF_DEACTIVATE_RSP: 209 case NCI_OP_RF_DEACTIVATE_RSP:
191 nci_rf_deactivate_rsp_packet(ndev, skb); 210 nci_rf_deactivate_rsp_packet(ndev, skb);
192 break; 211 break;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6989dfa28ee2..6404052d6c07 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -48,28 +48,34 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
48 [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, 48 [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, 49 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, 50 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
51 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
51}; 52};
52 53
53static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 54static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
54 struct netlink_callback *cb, int flags) 55 struct netlink_callback *cb, int flags)
55{ 56{
56 void *hdr; 57 void *hdr;
57 58
58 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 59 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
59 &nfc_genl_family, flags, NFC_CMD_GET_TARGET); 60 &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
60 if (!hdr) 61 if (!hdr)
61 return -EMSGSIZE; 62 return -EMSGSIZE;
62 63
63 genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 64 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
64 65
65 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); 66 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
66 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, 67 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
67 target->supported_protocols);
68 NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); 68 NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
69 NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); 69 NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
70 if (target->nfcid1_len > 0) 70 if (target->nfcid1_len > 0)
71 NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, 71 NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
72 target->nfcid1); 72 target->nfcid1);
73 if (target->sensb_res_len > 0)
74 NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
75 target->sensb_res);
76 if (target->sensf_res_len > 0)
77 NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
78 target->sensf_res);
73 79
74 return genlmsg_end(msg, hdr); 80 return genlmsg_end(msg, hdr);
75 81
@@ -85,9 +91,9 @@ static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
85 u32 idx; 91 u32 idx;
86 92
87 rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, 93 rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize,
88 nfc_genl_family.attrbuf, 94 nfc_genl_family.attrbuf,
89 nfc_genl_family.maxattr, 95 nfc_genl_family.maxattr,
90 nfc_genl_policy); 96 nfc_genl_policy);
91 if (rc < 0) 97 if (rc < 0)
92 return ERR_PTR(rc); 98 return ERR_PTR(rc);
93 99
@@ -104,7 +110,7 @@ static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
104} 110}
105 111
106static int nfc_genl_dump_targets(struct sk_buff *skb, 112static int nfc_genl_dump_targets(struct sk_buff *skb,
107 struct netlink_callback *cb) 113 struct netlink_callback *cb)
108{ 114{
109 int i = cb->args[0]; 115 int i = cb->args[0];
110 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 116 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
@@ -124,7 +130,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
124 130
125 while (i < dev->n_targets) { 131 while (i < dev->n_targets) {
126 rc = nfc_genl_send_target(skb, &dev->targets[i], cb, 132 rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
127 NLM_F_MULTI); 133 NLM_F_MULTI);
128 if (rc < 0) 134 if (rc < 0)
129 break; 135 break;
130 136
@@ -160,7 +166,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
160 return -ENOMEM; 166 return -ENOMEM;
161 167
162 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 168 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
163 NFC_EVENT_TARGETS_FOUND); 169 NFC_EVENT_TARGETS_FOUND);
164 if (!hdr) 170 if (!hdr)
165 goto free_msg; 171 goto free_msg;
166 172
@@ -187,13 +193,14 @@ int nfc_genl_device_added(struct nfc_dev *dev)
187 return -ENOMEM; 193 return -ENOMEM;
188 194
189 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 195 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
190 NFC_EVENT_DEVICE_ADDED); 196 NFC_EVENT_DEVICE_ADDED);
191 if (!hdr) 197 if (!hdr)
192 goto free_msg; 198 goto free_msg;
193 199
194 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 200 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
195 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 201 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
196 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 202 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
203 NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
197 204
198 genlmsg_end(msg, hdr); 205 genlmsg_end(msg, hdr);
199 206
@@ -218,7 +225,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
218 return -ENOMEM; 225 return -ENOMEM;
219 226
220 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 227 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
221 NFC_EVENT_DEVICE_REMOVED); 228 NFC_EVENT_DEVICE_REMOVED);
222 if (!hdr) 229 if (!hdr)
223 goto free_msg; 230 goto free_msg;
224 231
@@ -238,14 +245,14 @@ free_msg:
238} 245}
239 246
240static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, 247static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
241 u32 pid, u32 seq, 248 u32 pid, u32 seq,
242 struct netlink_callback *cb, 249 struct netlink_callback *cb,
243 int flags) 250 int flags)
244{ 251{
245 void *hdr; 252 void *hdr;
246 253
247 hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags, 254 hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
248 NFC_CMD_GET_DEVICE); 255 NFC_CMD_GET_DEVICE);
249 if (!hdr) 256 if (!hdr)
250 return -EMSGSIZE; 257 return -EMSGSIZE;
251 258
@@ -255,6 +262,7 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
255 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 262 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
256 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 263 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
257 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 264 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
265 NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
258 266
259 return genlmsg_end(msg, hdr); 267 return genlmsg_end(msg, hdr);
260 268
@@ -264,7 +272,7 @@ nla_put_failure:
264} 272}
265 273
266static int nfc_genl_dump_devices(struct sk_buff *skb, 274static int nfc_genl_dump_devices(struct sk_buff *skb,
267 struct netlink_callback *cb) 275 struct netlink_callback *cb)
268{ 276{
269 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; 277 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
270 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; 278 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
@@ -291,8 +299,7 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
291 int rc; 299 int rc;
292 300
293 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid, 301 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
294 cb->nlh->nlmsg_seq, 302 cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
295 cb, NLM_F_MULTI);
296 if (rc < 0) 303 if (rc < 0)
297 break; 304 break;
298 305
@@ -317,7 +324,7 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
317} 324}
318 325
319int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, 326int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
320 u8 comm_mode, u8 rf_mode) 327 u8 comm_mode, u8 rf_mode)
321{ 328{
322 struct sk_buff *msg; 329 struct sk_buff *msg;
323 void *hdr; 330 void *hdr;
@@ -328,8 +335,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
328 if (!msg) 335 if (!msg)
329 return -ENOMEM; 336 return -ENOMEM;
330 337
331 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 338 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP);
332 NFC_CMD_DEP_LINK_UP);
333 if (!hdr) 339 if (!hdr)
334 goto free_msg; 340 goto free_msg;
335 341
@@ -366,7 +372,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
366 return -ENOMEM; 372 return -ENOMEM;
367 373
368 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, 374 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
369 NFC_CMD_DEP_LINK_DOWN); 375 NFC_CMD_DEP_LINK_DOWN);
370 if (!hdr) 376 if (!hdr)
371 goto free_msg; 377 goto free_msg;
372 378
@@ -408,7 +414,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
408 } 414 }
409 415
410 rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq, 416 rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
411 NULL, 0); 417 NULL, 0);
412 if (rc < 0) 418 if (rc < 0)
413 goto out_free; 419 goto out_free;
414 420
@@ -475,7 +481,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
475 pr_debug("Poll start\n"); 481 pr_debug("Poll start\n");
476 482
477 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 483 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
478 !info->attrs[NFC_ATTR_PROTOCOLS]) 484 !info->attrs[NFC_ATTR_PROTOCOLS])
479 return -EINVAL; 485 return -EINVAL;
480 486
481 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 487 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -533,13 +539,12 @@ static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
533 struct nfc_dev *dev; 539 struct nfc_dev *dev;
534 int rc, tgt_idx; 540 int rc, tgt_idx;
535 u32 idx; 541 u32 idx;
536 u8 comm, rf; 542 u8 comm;
537 543
538 pr_debug("DEP link up\n"); 544 pr_debug("DEP link up\n");
539 545
540 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 546 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
541 !info->attrs[NFC_ATTR_COMM_MODE] || 547 !info->attrs[NFC_ATTR_COMM_MODE])
542 !info->attrs[NFC_ATTR_RF_MODE])
543 return -EINVAL; 548 return -EINVAL;
544 549
545 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 550 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -549,19 +554,15 @@ static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
549 tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); 554 tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
550 555
551 comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]); 556 comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
552 rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]);
553 557
554 if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE) 558 if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
555 return -EINVAL; 559 return -EINVAL;
556 560
557 if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET)
558 return -EINVAL;
559
560 dev = nfc_get_device(idx); 561 dev = nfc_get_device(idx);
561 if (!dev) 562 if (!dev)
562 return -ENODEV; 563 return -ENODEV;
563 564
564 rc = nfc_dep_link_up(dev, tgt_idx, comm, rf); 565 rc = nfc_dep_link_up(dev, tgt_idx, comm);
565 566
566 nfc_put_device(dev); 567 nfc_put_device(dev);
567 568
@@ -636,7 +637,7 @@ static struct genl_ops nfc_genl_ops[] = {
636}; 637};
637 638
638static int nfc_genl_rcv_nl_event(struct notifier_block *this, 639static int nfc_genl_rcv_nl_event(struct notifier_block *this,
639 unsigned long event, void *ptr) 640 unsigned long event, void *ptr)
640{ 641{
641 struct netlink_notify *n = ptr; 642 struct netlink_notify *n = ptr;
642 struct class_dev_iter iter; 643 struct class_dev_iter iter;
@@ -689,7 +690,7 @@ int __init nfc_genl_init(void)
689 int rc; 690 int rc;
690 691
691 rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops, 692 rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops,
692 ARRAY_SIZE(nfc_genl_ops)); 693 ARRAY_SIZE(nfc_genl_ops));
693 if (rc) 694 if (rc)
694 return rc; 695 return rc;
695 696
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 6d28d75995b0..ec8794c1099c 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -32,7 +32,7 @@ struct nfc_protocol {
32 struct proto *proto; 32 struct proto *proto;
33 struct module *owner; 33 struct module *owner;
34 int (*create)(struct net *net, struct socket *sock, 34 int (*create)(struct net *net, struct socket *sock,
35 const struct nfc_protocol *nfc_proto); 35 const struct nfc_protocol *nfc_proto);
36}; 36};
37 37
38struct nfc_rawsock { 38struct nfc_rawsock {
@@ -54,7 +54,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
54int nfc_llcp_register_device(struct nfc_dev *dev); 54int nfc_llcp_register_device(struct nfc_dev *dev);
55void nfc_llcp_unregister_device(struct nfc_dev *dev); 55void nfc_llcp_unregister_device(struct nfc_dev *dev);
56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); 56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len); 57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
58int __init nfc_llcp_init(void); 58int __init nfc_llcp_init(void);
59void nfc_llcp_exit(void); 59void nfc_llcp_exit(void);
60 60
@@ -65,7 +65,7 @@ static inline void nfc_llcp_mac_is_down(struct nfc_dev *dev)
65} 65}
66 66
67static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, 67static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
68 u8 comm_mode, u8 rf_mode) 68 u8 comm_mode, u8 rf_mode)
69{ 69{
70} 70}
71 71
@@ -78,7 +78,8 @@ static inline void nfc_llcp_unregister_device(struct nfc_dev *dev)
78{ 78{
79} 79}
80 80
81static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) 81static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
82 u8 *gb, u8 gb_len)
82{ 83{
83 return 0; 84 return 0;
84} 85}
@@ -160,8 +161,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
160 161
161int nfc_stop_poll(struct nfc_dev *dev); 162int nfc_stop_poll(struct nfc_dev *dev);
162 163
163int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, 164int nfc_dep_link_up(struct nfc_dev *dev, int target_idx, u8 comm_mode);
164 u8 comm_mode, u8 rf_mode);
165 165
166int nfc_dep_link_down(struct nfc_dev *dev); 166int nfc_dep_link_down(struct nfc_dev *dev);
167 167
@@ -169,9 +169,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
169 169
170int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx); 170int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
171 171
172int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, 172int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
173 struct sk_buff *skb, 173 data_exchange_cb_t cb, void *cb_context);
174 data_exchange_cb_t cb,
175 void *cb_context);
176 174
177#endif /* __LOCAL_NFC_H */ 175#endif /* __LOCAL_NFC_H */
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 2e2f8c6a61fe..5a839ceb2e82 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -63,7 +63,7 @@ static int rawsock_release(struct socket *sock)
63} 63}
64 64
65static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, 65static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
66 int len, int flags) 66 int len, int flags)
67{ 67{
68 struct sock *sk = sock->sk; 68 struct sock *sk = sock->sk;
69 struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; 69 struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
@@ -73,7 +73,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
73 pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); 73 pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
74 74
75 if (!addr || len < sizeof(struct sockaddr_nfc) || 75 if (!addr || len < sizeof(struct sockaddr_nfc) ||
76 addr->sa_family != AF_NFC) 76 addr->sa_family != AF_NFC)
77 return -EINVAL; 77 return -EINVAL;
78 78
79 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", 79 pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
@@ -92,18 +92,6 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
92 goto error; 92 goto error;
93 } 93 }
94 94
95 if (addr->target_idx > dev->target_idx - 1 ||
96 addr->target_idx < dev->target_idx - dev->n_targets) {
97 rc = -EINVAL;
98 goto error;
99 }
100
101 if (addr->target_idx > dev->target_idx - 1 ||
102 addr->target_idx < dev->target_idx - dev->n_targets) {
103 rc = -EINVAL;
104 goto error;
105 }
106
107 rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); 95 rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
108 if (rc) 96 if (rc)
109 goto put_dev; 97 goto put_dev;
@@ -132,7 +120,7 @@ static int rawsock_add_header(struct sk_buff *skb)
132} 120}
133 121
134static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, 122static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
135 int err) 123 int err)
136{ 124{
137 struct sock *sk = (struct sock *) context; 125 struct sock *sk = (struct sock *) context;
138 126
@@ -185,7 +173,7 @@ static void rawsock_tx_work(struct work_struct *work)
185 173
186 sock_hold(sk); 174 sock_hold(sk);
187 rc = nfc_data_exchange(dev, target_idx, skb, 175 rc = nfc_data_exchange(dev, target_idx, skb,
188 rawsock_data_exchange_complete, sk); 176 rawsock_data_exchange_complete, sk);
189 if (rc) { 177 if (rc) {
190 rawsock_report_error(sk, rc); 178 rawsock_report_error(sk, rc);
191 sock_put(sk); 179 sock_put(sk);
@@ -193,7 +181,7 @@ static void rawsock_tx_work(struct work_struct *work)
193} 181}
194 182
195static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, 183static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
196 struct msghdr *msg, size_t len) 184 struct msghdr *msg, size_t len)
197{ 185{
198 struct sock *sk = sock->sk; 186 struct sock *sk = sock->sk;
199 struct nfc_dev *dev = nfc_rawsock(sk)->dev; 187 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
@@ -230,7 +218,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
230} 218}
231 219
232static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, 220static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
233 struct msghdr *msg, size_t len, int flags) 221 struct msghdr *msg, size_t len, int flags)
234{ 222{
235 int noblock = flags & MSG_DONTWAIT; 223 int noblock = flags & MSG_DONTWAIT;
236 struct sock *sk = sock->sk; 224 struct sock *sk = sock->sk;
@@ -286,7 +274,7 @@ static void rawsock_destruct(struct sock *sk)
286 274
287 if (sk->sk_state == TCP_ESTABLISHED) { 275 if (sk->sk_state == TCP_ESTABLISHED) {
288 nfc_deactivate_target(nfc_rawsock(sk)->dev, 276 nfc_deactivate_target(nfc_rawsock(sk)->dev,
289 nfc_rawsock(sk)->target_idx); 277 nfc_rawsock(sk)->target_idx);
290 nfc_put_device(nfc_rawsock(sk)->dev); 278 nfc_put_device(nfc_rawsock(sk)->dev);
291 } 279 }
292 280
@@ -299,7 +287,7 @@ static void rawsock_destruct(struct sock *sk)
299} 287}
300 288
301static int rawsock_create(struct net *net, struct socket *sock, 289static int rawsock_create(struct net *net, struct socket *sock,
302 const struct nfc_protocol *nfc_proto) 290 const struct nfc_protocol *nfc_proto)
303{ 291{
304 struct sock *sk; 292 struct sock *sk;
305 293
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 322b8d206693..b6b1d7daa3cb 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -66,6 +66,7 @@ static int internal_dev_mac_addr(struct net_device *dev, void *p)
66 66
67 if (!is_valid_ether_addr(addr->sa_data)) 67 if (!is_valid_ether_addr(addr->sa_data))
68 return -EADDRNOTAVAIL; 68 return -EADDRNOTAVAIL;
69 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
69 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 70 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
70 return 0; 71 return 0;
71} 72}
@@ -145,7 +146,7 @@ static void do_setup(struct net_device *netdev)
145 netdev->vlan_features = netdev->features; 146 netdev->vlan_features = netdev->features;
146 netdev->features |= NETIF_F_HW_VLAN_TX; 147 netdev->features |= NETIF_F_HW_VLAN_TX;
147 netdev->hw_features = netdev->features & ~NETIF_F_LLTX; 148 netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
148 random_ether_addr(netdev->dev_addr); 149 eth_hw_addr_random(netdev);
149} 150}
150 151
151static struct vport *internal_dev_create(const struct vport_parms *parms) 152static struct vport *internal_dev_create(const struct vport_parms *parms)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2dbb32b988c4..ae2d484416dd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1459,6 +1459,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1459 struct net_device *dev; 1459 struct net_device *dev;
1460 __be16 proto = 0; 1460 __be16 proto = 0;
1461 int err; 1461 int err;
1462 int extra_len = 0;
1462 1463
1463 /* 1464 /*
1464 * Get and verify the address. 1465 * Get and verify the address.
@@ -1493,8 +1494,16 @@ retry:
1493 * raw protocol and you must do your own fragmentation at this level. 1494 * raw protocol and you must do your own fragmentation at this level.
1494 */ 1495 */
1495 1496
1497 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1498 if (!netif_supports_nofcs(dev)) {
1499 err = -EPROTONOSUPPORT;
1500 goto out_unlock;
1501 }
1502 extra_len = 4; /* We're doing our own CRC */
1503 }
1504
1496 err = -EMSGSIZE; 1505 err = -EMSGSIZE;
1497 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN) 1506 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1498 goto out_unlock; 1507 goto out_unlock;
1499 1508
1500 if (!skb) { 1509 if (!skb) {
@@ -1526,7 +1535,7 @@ retry:
1526 goto retry; 1535 goto retry;
1527 } 1536 }
1528 1537
1529 if (len > (dev->mtu + dev->hard_header_len)) { 1538 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1530 /* Earlier code assumed this would be a VLAN pkt, 1539 /* Earlier code assumed this would be a VLAN pkt,
1531 * double-check this now that we have the actual 1540 * double-check this now that we have the actual
1532 * packet in hand. 1541 * packet in hand.
@@ -1548,6 +1557,9 @@ retry:
1548 if (err < 0) 1557 if (err < 0)
1549 goto out_unlock; 1558 goto out_unlock;
1550 1559
1560 if (unlikely(extra_len == 4))
1561 skb->no_fcs = 1;
1562
1551 dev_queue_xmit(skb); 1563 dev_queue_xmit(skb);
1552 rcu_read_unlock(); 1564 rcu_read_unlock();
1553 return len; 1565 return len;
@@ -2209,6 +2221,7 @@ static int packet_snd(struct socket *sock,
2209 struct packet_sock *po = pkt_sk(sk); 2221 struct packet_sock *po = pkt_sk(sk);
2210 unsigned short gso_type = 0; 2222 unsigned short gso_type = 0;
2211 int hlen, tlen; 2223 int hlen, tlen;
2224 int extra_len = 0;
2212 2225
2213 /* 2226 /*
2214 * Get and verify the address. 2227 * Get and verify the address.
@@ -2288,8 +2301,16 @@ static int packet_snd(struct socket *sock,
2288 } 2301 }
2289 } 2302 }
2290 2303
2304 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2305 if (!netif_supports_nofcs(dev)) {
2306 err = -EPROTONOSUPPORT;
2307 goto out_unlock;
2308 }
2309 extra_len = 4; /* We're doing our own CRC */
2310 }
2311
2291 err = -EMSGSIZE; 2312 err = -EMSGSIZE;
2292 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN)) 2313 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2293 goto out_unlock; 2314 goto out_unlock;
2294 2315
2295 err = -ENOBUFS; 2316 err = -ENOBUFS;
@@ -2315,7 +2336,7 @@ static int packet_snd(struct socket *sock,
2315 if (err < 0) 2336 if (err < 0)
2316 goto out_free; 2337 goto out_free;
2317 2338
2318 if (!gso_type && (len > dev->mtu + reserve)) { 2339 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2319 /* Earlier code assumed this would be a VLAN pkt, 2340 /* Earlier code assumed this would be a VLAN pkt,
2320 * double-check this now that we have the actual 2341 * double-check this now that we have the actual
2321 * packet in hand. 2342 * packet in hand.
@@ -2353,6 +2374,9 @@ static int packet_snd(struct socket *sock,
2353 len += vnet_hdr_len; 2374 len += vnet_hdr_len;
2354 } 2375 }
2355 2376
2377 if (unlikely(extra_len == 4))
2378 skb->no_fcs = 1;
2379
2356 /* 2380 /*
2357 * Now send it 2381 * Now send it
2358 */ 2382 */
diff --git a/net/rds/send.c b/net/rds/send.c
index e2d63c59e7c2..96531d4033a2 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -935,7 +935,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
935 /* Mirror Linux UDP mirror of BSD error message compatibility */ 935 /* Mirror Linux UDP mirror of BSD error message compatibility */
936 /* XXX: Perhaps MSG_MORE someday */ 936 /* XXX: Perhaps MSG_MORE someday */
937 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { 937 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
938 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
939 ret = -EOPNOTSUPP; 938 ret = -EOPNOTSUPP;
940 goto out; 939 goto out;
941 } 940 }
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2590e91b3289..75b58f81d53d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -260,6 +260,32 @@ config NET_SCH_INGRESS
260 To compile this code as a module, choose M here: the 260 To compile this code as a module, choose M here: the
261 module will be called sch_ingress. 261 module will be called sch_ingress.
262 262
263config NET_SCH_PLUG
264 tristate "Plug network traffic until release (PLUG)"
265 ---help---
266
267 This queuing discipline allows userspace to plug/unplug a network
268 output queue, using the netlink interface. When it receives an
269 enqueue command it inserts a plug into the outbound queue that
270 causes following packets to enqueue until a dequeue command arrives
271 over netlink, causing the plug to be removed and resuming the normal
272 packet flow.
273
274 This module also provides a generic "network output buffering"
275 functionality (aka output commit), wherein upon arrival of a dequeue
276 command, only packets up to the first plug are released for delivery.
277 The Remus HA project uses this module to enable speculative execution
278 of virtual machines by allowing the generated network output to be rolled
279 back if needed.
280
281 For more information, please refer to http://wiki.xensource.com/xenwiki/Remus
282
283 Say Y here if you are using this kernel for Xen dom0 and
284 want to protect Xen guests with Remus.
285
286 To compile this code as a module, choose M here: the
287 module will be called sch_plug.
288
263comment "Classification" 289comment "Classification"
264 290
265config NET_CLS 291config NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index dc5889c0a15a..8cdf4e2b51d3 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
33obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o 33obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
34obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o 34obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o 35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
36obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o 37obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o 38obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
38obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o 39obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
new file mode 100644
index 000000000000..89f8fcf73f18
--- /dev/null
+++ b/net/sched/sch_plug.c
@@ -0,0 +1,233 @@
1/*
2 * sch_plug.c Queue traffic until an explicit release command
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * There are two ways to use this qdisc:
10 * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating
11 * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands.
12 *
13 * 2. For network output buffering (a.k.a output commit) functionality.
14 * Output commit property is commonly used by applications using checkpoint
15 * based fault-tolerance to ensure that the checkpoint from which a system
16 * is being restored is consistent w.r.t outside world.
17 *
18 * Consider for e.g. Remus - a Virtual Machine checkpointing system,
19 * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated
20 * asynchronously to the backup host, while the VM continues executing the
21 * next epoch speculatively.
22 *
23 * The following is a typical sequence of output buffer operations:
24 * 1.At epoch i, start_buffer(i)
25 * 2. At end of epoch i (i.e. after 50ms):
26 * 2.1 Stop VM and take checkpoint(i).
27 * 2.2 start_buffer(i+1) and Resume VM
28 * 3. While speculatively executing epoch(i+1), asynchronously replicate
29 * checkpoint(i) to backup host.
30 * 4. When checkpoint_ack(i) is received from backup, release_buffer(i)
31 * Thus, this Qdisc would receive the following sequence of commands:
32 * TCQ_PLUG_BUFFER (epoch i)
33 * .. TCQ_PLUG_BUFFER (epoch i+1)
34 * ....TCQ_PLUG_RELEASE_ONE (epoch i)
35 * ......TCQ_PLUG_BUFFER (epoch i+2)
36 * ........
37 */
38
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/kernel.h>
42#include <linux/errno.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <net/pkt_sched.h>
46
47/*
48 * State of the queue, when used for network output buffering:
49 *
50 * plug(i+1) plug(i) head
51 * ------------------+--------------------+---------------->
52 * | |
53 * | |
54 * pkts_current_epoch| pkts_last_epoch |pkts_to_release
55 * ----------------->|<--------+--------->|+--------------->
56 * v v
57 *
58 */
59
60struct plug_sched_data {
61 /* If true, the dequeue function releases all packets
62 * from head to end of the queue. The queue turns into
63 * a pass-through queue for newly arriving packets.
64 */
65 bool unplug_indefinite;
66
67 /* Queue Limit in bytes */
68 u32 limit;
69
70 /* Number of packets (output) from the current speculatively
71 * executing epoch.
72 */
73 u32 pkts_current_epoch;
74
75 /* Number of packets corresponding to the recently finished
76 * epoch. These will be released when we receive a
77 * TCQ_PLUG_RELEASE_ONE command. This command is typically
78 * issued after committing a checkpoint at the target.
79 */
80 u32 pkts_last_epoch;
81
82 /*
83 * Number of packets from the head of the queue, that can
84 * be released (committed checkpoint).
85 */
86 u32 pkts_to_release;
87};
88
89static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
90{
91 struct plug_sched_data *q = qdisc_priv(sch);
92
93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
94 if (!q->unplug_indefinite)
95 q->pkts_current_epoch++;
96 return qdisc_enqueue_tail(skb, sch);
97 }
98
99 return qdisc_reshape_fail(skb, sch);
100}
101
102static struct sk_buff *plug_dequeue(struct Qdisc *sch)
103{
104 struct plug_sched_data *q = qdisc_priv(sch);
105
106 if (qdisc_is_throttled(sch))
107 return NULL;
108
109 if (!q->unplug_indefinite) {
110 if (!q->pkts_to_release) {
111 /* No more packets to dequeue. Block the queue
112 * and wait for the next release command.
113 */
114 qdisc_throttled(sch);
115 return NULL;
116 }
117 q->pkts_to_release--;
118 }
119
120 return qdisc_dequeue_head(sch);
121}
122
123static int plug_init(struct Qdisc *sch, struct nlattr *opt)
124{
125 struct plug_sched_data *q = qdisc_priv(sch);
126
127 q->pkts_current_epoch = 0;
128 q->pkts_last_epoch = 0;
129 q->pkts_to_release = 0;
130 q->unplug_indefinite = false;
131
132 if (opt == NULL) {
133 /* We will set a default limit of 100 pkts (~150kB)
134 * in case tx_queue_len is not available. The
135 * default value is completely arbitrary.
136 */
137 u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
138 q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
139 } else {
140 struct tc_plug_qopt *ctl = nla_data(opt);
141
142 if (nla_len(opt) < sizeof(*ctl))
143 return -EINVAL;
144
145 q->limit = ctl->limit;
146 }
147
148 qdisc_throttled(sch);
149 return 0;
150}
151
152/* Receives 4 types of messages:
153 * TCQ_PLUG_BUFFER: Inset a plug into the queue and
154 * buffer any incoming packets
155 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
156 * to beginning of the next plug.
157 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
158 * Stop buffering packets until the next TCQ_PLUG_BUFFER
159 * command is received (just act as a pass-thru queue).
160 * TCQ_PLUG_LIMIT: Increase/decrease queue size
161 */
162static int plug_change(struct Qdisc *sch, struct nlattr *opt)
163{
164 struct plug_sched_data *q = qdisc_priv(sch);
165 struct tc_plug_qopt *msg;
166
167 if (opt == NULL)
168 return -EINVAL;
169
170 msg = nla_data(opt);
171 if (nla_len(opt) < sizeof(*msg))
172 return -EINVAL;
173
174 switch (msg->action) {
175 case TCQ_PLUG_BUFFER:
176 /* Save size of the current buffer */
177 q->pkts_last_epoch = q->pkts_current_epoch;
178 q->pkts_current_epoch = 0;
179 if (q->unplug_indefinite)
180 qdisc_throttled(sch);
181 q->unplug_indefinite = false;
182 break;
183 case TCQ_PLUG_RELEASE_ONE:
184 /* Add packets from the last complete buffer to the
185 * packets to be released set.
186 */
187 q->pkts_to_release += q->pkts_last_epoch;
188 q->pkts_last_epoch = 0;
189 qdisc_unthrottled(sch);
190 netif_schedule_queue(sch->dev_queue);
191 break;
192 case TCQ_PLUG_RELEASE_INDEFINITE:
193 q->unplug_indefinite = true;
194 q->pkts_to_release = 0;
195 q->pkts_last_epoch = 0;
196 q->pkts_current_epoch = 0;
197 qdisc_unthrottled(sch);
198 netif_schedule_queue(sch->dev_queue);
199 break;
200 case TCQ_PLUG_LIMIT:
201 /* Limit is supplied in bytes */
202 q->limit = msg->limit;
203 break;
204 default:
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
212 .id = "plug",
213 .priv_size = sizeof(struct plug_sched_data),
214 .enqueue = plug_enqueue,
215 .dequeue = plug_dequeue,
216 .peek = qdisc_peek_head,
217 .init = plug_init,
218 .change = plug_change,
219 .owner = THIS_MODULE,
220};
221
222static int __init plug_module_init(void)
223{
224 return register_qdisc(&plug_qdisc_ops);
225}
226
227static void __exit plug_module_exit(void)
228{
229 unregister_qdisc(&plug_qdisc_ops);
230}
231module_init(plug_module_init)
232module_exit(plug_module_exit)
233MODULE_LICENSE("GPL");
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 408ebd0e7330..06b42b7f5a02 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4170,14 +4170,16 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
4170} 4170}
4171 4171
4172/* Helper routine to branch off an association to a new socket. */ 4172/* Helper routine to branch off an association to a new socket. */
4173SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, 4173int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4174 struct socket **sockp)
4175{ 4174{
4176 struct sock *sk = asoc->base.sk; 4175 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4177 struct socket *sock; 4176 struct socket *sock;
4178 struct sctp_af *af; 4177 struct sctp_af *af;
4179 int err = 0; 4178 int err = 0;
4180 4179
4180 if (!asoc)
4181 return -EINVAL;
4182
4181 /* An association cannot be branched off from an already peeled-off 4183 /* An association cannot be branched off from an already peeled-off
4182 * socket, nor is this supported for tcp style sockets. 4184 * socket, nor is this supported for tcp style sockets.
4183 */ 4185 */
@@ -4206,13 +4208,13 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
4206 4208
4207 return err; 4209 return err;
4208} 4210}
4211EXPORT_SYMBOL(sctp_do_peeloff);
4209 4212
4210static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) 4213static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
4211{ 4214{
4212 sctp_peeloff_arg_t peeloff; 4215 sctp_peeloff_arg_t peeloff;
4213 struct socket *newsock; 4216 struct socket *newsock;
4214 int retval = 0; 4217 int retval = 0;
4215 struct sctp_association *asoc;
4216 4218
4217 if (len < sizeof(sctp_peeloff_arg_t)) 4219 if (len < sizeof(sctp_peeloff_arg_t))
4218 return -EINVAL; 4220 return -EINVAL;
@@ -4220,15 +4222,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
4220 if (copy_from_user(&peeloff, optval, len)) 4222 if (copy_from_user(&peeloff, optval, len))
4221 return -EFAULT; 4223 return -EFAULT;
4222 4224
4223 asoc = sctp_id2assoc(sk, peeloff.associd); 4225 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
4224 if (!asoc) {
4225 retval = -EINVAL;
4226 goto out;
4227 }
4228
4229 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__, sk, asoc);
4230
4231 retval = sctp_do_peeloff(asoc, &newsock);
4232 if (retval < 0) 4226 if (retval < 0)
4233 goto out; 4227 goto out;
4234 4228
@@ -4239,8 +4233,8 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
4239 goto out; 4233 goto out;
4240 } 4234 }
4241 4235
4242 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n", 4236 SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n",
4243 __func__, sk, asoc, newsock->sk, retval); 4237 __func__, sk, newsock->sk, retval);
4244 4238
4245 /* Return the fd mapped to the new socket. */ 4239 /* Return the fd mapped to the new socket. */
4246 peeloff.sd = retval; 4240 peeloff.sd = retval;
diff --git a/net/socket.c b/net/socket.c
index 28a96af484b4..12a48d846223 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -181,7 +181,7 @@ static DEFINE_PER_CPU(int, sockets_in_use);
181 * invalid addresses -EFAULT is returned. On a success 0 is returned. 181 * invalid addresses -EFAULT is returned. On a success 0 is returned.
182 */ 182 */
183 183
184int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) 184int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr)
185{ 185{
186 if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) 186 if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
187 return -EINVAL; 187 return -EINVAL;
@@ -209,7 +209,7 @@ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr)
209 * specified. Zero is returned for a success. 209 * specified. Zero is returned for a success.
210 */ 210 */
211 211
212static int move_addr_to_user(struct sockaddr *kaddr, int klen, 212static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
213 void __user *uaddr, int __user *ulen) 213 void __user *uaddr, int __user *ulen)
214{ 214{
215 int err; 215 int err;
@@ -1449,7 +1449,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
1449 1449
1450 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1450 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1451 if (sock) { 1451 if (sock) {
1452 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); 1452 err = move_addr_to_kernel(umyaddr, addrlen, &address);
1453 if (err >= 0) { 1453 if (err >= 0) {
1454 err = security_socket_bind(sock, 1454 err = security_socket_bind(sock,
1455 (struct sockaddr *)&address, 1455 (struct sockaddr *)&address,
@@ -1556,7 +1556,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1556 err = -ECONNABORTED; 1556 err = -ECONNABORTED;
1557 goto out_fd; 1557 goto out_fd;
1558 } 1558 }
1559 err = move_addr_to_user((struct sockaddr *)&address, 1559 err = move_addr_to_user(&address,
1560 len, upeer_sockaddr, upeer_addrlen); 1560 len, upeer_sockaddr, upeer_addrlen);
1561 if (err < 0) 1561 if (err < 0)
1562 goto out_fd; 1562 goto out_fd;
@@ -1605,7 +1605,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
1605 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1605 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1606 if (!sock) 1606 if (!sock)
1607 goto out; 1607 goto out;
1608 err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address); 1608 err = move_addr_to_kernel(uservaddr, addrlen, &address);
1609 if (err < 0) 1609 if (err < 0)
1610 goto out_put; 1610 goto out_put;
1611 1611
@@ -1645,7 +1645,7 @@ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
1645 err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); 1645 err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
1646 if (err) 1646 if (err)
1647 goto out_put; 1647 goto out_put;
1648 err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); 1648 err = move_addr_to_user(&address, len, usockaddr, usockaddr_len);
1649 1649
1650out_put: 1650out_put:
1651 fput_light(sock->file, fput_needed); 1651 fput_light(sock->file, fput_needed);
@@ -1677,7 +1677,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
1677 sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1677 sock->ops->getname(sock, (struct sockaddr *)&address, &len,
1678 1); 1678 1);
1679 if (!err) 1679 if (!err)
1680 err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, 1680 err = move_addr_to_user(&address, len, usockaddr,
1681 usockaddr_len); 1681 usockaddr_len);
1682 fput_light(sock->file, fput_needed); 1682 fput_light(sock->file, fput_needed);
1683 } 1683 }
@@ -1716,7 +1716,7 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1716 msg.msg_controllen = 0; 1716 msg.msg_controllen = 0;
1717 msg.msg_namelen = 0; 1717 msg.msg_namelen = 0;
1718 if (addr) { 1718 if (addr) {
1719 err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); 1719 err = move_addr_to_kernel(addr, addr_len, &address);
1720 if (err < 0) 1720 if (err < 0)
1721 goto out_put; 1721 goto out_put;
1722 msg.msg_name = (struct sockaddr *)&address; 1722 msg.msg_name = (struct sockaddr *)&address;
@@ -1779,7 +1779,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1779 err = sock_recvmsg(sock, &msg, size, flags); 1779 err = sock_recvmsg(sock, &msg, size, flags);
1780 1780
1781 if (err >= 0 && addr != NULL) { 1781 if (err >= 0 && addr != NULL) {
1782 err2 = move_addr_to_user((struct sockaddr *)&address, 1782 err2 = move_addr_to_user(&address,
1783 msg.msg_namelen, addr, addr_len); 1783 msg.msg_namelen, addr, addr_len);
1784 if (err2 < 0) 1784 if (err2 < 0)
1785 err = err2; 1785 err = err2;
@@ -1933,13 +1933,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1933 1933
1934 /* This will also move the address data into kernel space */ 1934 /* This will also move the address data into kernel space */
1935 if (MSG_CMSG_COMPAT & flags) { 1935 if (MSG_CMSG_COMPAT & flags) {
1936 err = verify_compat_iovec(msg_sys, iov, 1936 err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ);
1937 (struct sockaddr *)&address,
1938 VERIFY_READ);
1939 } else 1937 } else
1940 err = verify_iovec(msg_sys, iov, 1938 err = verify_iovec(msg_sys, iov, &address, VERIFY_READ);
1941 (struct sockaddr *)&address,
1942 VERIFY_READ);
1943 if (err < 0) 1939 if (err < 0)
1944 goto out_freeiov; 1940 goto out_freeiov;
1945 total_len = err; 1941 total_len = err;
@@ -2143,13 +2139,9 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2143 uaddr = (__force void __user *)msg_sys->msg_name; 2139 uaddr = (__force void __user *)msg_sys->msg_name;
2144 uaddr_len = COMPAT_NAMELEN(msg); 2140 uaddr_len = COMPAT_NAMELEN(msg);
2145 if (MSG_CMSG_COMPAT & flags) { 2141 if (MSG_CMSG_COMPAT & flags) {
2146 err = verify_compat_iovec(msg_sys, iov, 2142 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
2147 (struct sockaddr *)&addr,
2148 VERIFY_WRITE);
2149 } else 2143 } else
2150 err = verify_iovec(msg_sys, iov, 2144 err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
2151 (struct sockaddr *)&addr,
2152 VERIFY_WRITE);
2153 if (err < 0) 2145 if (err < 0)
2154 goto out_freeiov; 2146 goto out_freeiov;
2155 total_len = err; 2147 total_len = err;
@@ -2166,7 +2158,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2166 len = err; 2158 len = err;
2167 2159
2168 if (uaddr != NULL) { 2160 if (uaddr != NULL) {
2169 err = move_addr_to_user((struct sockaddr *)&addr, 2161 err = move_addr_to_user(&addr,
2170 msg_sys->msg_namelen, uaddr, 2162 msg_sys->msg_namelen, uaddr,
2171 uaddr_len); 2163 uaddr_len);
2172 if (err < 0) 2164 if (err < 0)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 8eb87b11d100..e00441a2092f 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -157,39 +157,14 @@ u32 tipc_bclink_get_last_sent(void)
157 return bcl->fsm_msg_cnt; 157 return bcl->fsm_msg_cnt;
158} 158}
159 159
160/** 160static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
161 * bclink_set_gap - set gap according to contents of current deferred pkt queue
162 *
163 * Called with 'node' locked, bc_lock unlocked
164 */
165
166static void bclink_set_gap(struct tipc_node *n_ptr)
167{
168 struct sk_buff *buf = n_ptr->bclink.deferred_head;
169
170 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
171 mod(n_ptr->bclink.last_in);
172 if (unlikely(buf != NULL))
173 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
174}
175
176/**
177 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
178 *
179 * This mechanism endeavours to prevent all nodes in network from trying
180 * to ACK or NACK at the same time.
181 *
182 * Note: TIPC uses a different trigger to distribute ACKs than it does to
183 * distribute NACKs, but tries to use the same spacing (divide by 16).
184 */
185
186static int bclink_ack_allowed(u32 n)
187{ 161{
188 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; 162 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
163 seqno : node->bclink.last_sent;
189} 164}
190 165
191 166
192/** 167/*
193 * tipc_bclink_retransmit_to - get most recent node to request retransmission 168 * tipc_bclink_retransmit_to - get most recent node to request retransmission
194 * 169 *
195 * Called with bc_lock locked 170 * Called with bc_lock locked
@@ -281,7 +256,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
281 if (bcbuf_acks(crs) == 0) { 256 if (bcbuf_acks(crs) == 0) {
282 bcl->first_out = next; 257 bcl->first_out = next;
283 bcl->out_queue_size--; 258 bcl->out_queue_size--;
284 buf_discard(crs); 259 kfree_skb(crs);
285 released = 1; 260 released = 1;
286 } 261 }
287 crs = next; 262 crs = next;
@@ -300,140 +275,94 @@ exit:
300 spin_unlock_bh(&bc_lock); 275 spin_unlock_bh(&bc_lock);
301} 276}
302 277
303/** 278/*
304 * bclink_send_ack - unicast an ACK msg 279 * tipc_bclink_update_link_state - update broadcast link state
305 * 280 *
306 * tipc_net_lock and node lock set 281 * tipc_net_lock and node lock set
307 */ 282 */
308 283
309static void bclink_send_ack(struct tipc_node *n_ptr) 284void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
310{ 285{
311 struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 286 struct sk_buff *buf;
312 287
313 if (l_ptr != NULL) 288 /* Ignore "stale" link state info */
314 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
315}
316 289
317/** 290 if (less_eq(last_sent, n_ptr->bclink.last_in))
318 * bclink_send_nack- broadcast a NACK msg 291 return;
319 *
320 * tipc_net_lock and node lock set
321 */
322 292
323static void bclink_send_nack(struct tipc_node *n_ptr) 293 /* Update link synchronization state; quit if in sync */
324{ 294
325 struct sk_buff *buf; 295 bclink_update_last_sent(n_ptr, last_sent);
326 struct tipc_msg *msg; 296
297 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
298 return;
299
300 /* Update out-of-sync state; quit if loss is still unconfirmed */
301
302 if ((++n_ptr->bclink.oos_state) == 1) {
303 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
304 return;
305 n_ptr->bclink.oos_state++;
306 }
327 307
328 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) 308 /* Don't NACK if one has been recently sent (or seen) */
309
310 if (n_ptr->bclink.oos_state & 0x1)
329 return; 311 return;
330 312
313 /* Send NACK */
314
331 buf = tipc_buf_acquire(INT_H_SIZE); 315 buf = tipc_buf_acquire(INT_H_SIZE);
332 if (buf) { 316 if (buf) {
333 msg = buf_msg(buf); 317 struct tipc_msg *msg = buf_msg(buf);
318
334 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 319 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
335 INT_H_SIZE, n_ptr->addr); 320 INT_H_SIZE, n_ptr->addr);
336 msg_set_non_seq(msg, 1); 321 msg_set_non_seq(msg, 1);
337 msg_set_mc_netid(msg, tipc_net_id); 322 msg_set_mc_netid(msg, tipc_net_id);
338 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 323 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
339 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 324 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
340 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 325 msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
341 msg_set_bcast_tag(msg, tipc_own_tag); 326 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
327 : n_ptr->bclink.last_sent);
342 328
329 spin_lock_bh(&bc_lock);
343 tipc_bearer_send(&bcbearer->bearer, buf, NULL); 330 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
344 bcl->stats.sent_nacks++; 331 bcl->stats.sent_nacks++;
345 buf_discard(buf); 332 spin_unlock_bh(&bc_lock);
346 333 kfree_skb(buf);
347 /*
348 * Ensure we doesn't send another NACK msg to the node
349 * until 16 more deferred messages arrive from it
350 * (i.e. helps prevent all nodes from NACK'ing at same time)
351 */
352 334
353 n_ptr->bclink.nack_sync = tipc_own_tag; 335 n_ptr->bclink.oos_state++;
354 } 336 }
355} 337}
356 338
357/** 339/*
358 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 340 * bclink_peek_nack - monitor retransmission requests sent by other nodes
359 * 341 *
360 * tipc_net_lock and node lock set 342 * Delay any upcoming NACK by this node if another node has already
361 */ 343 * requested the first message this node is going to ask for.
362
363void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
364{
365 if (!n_ptr->bclink.supported ||
366 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
367 return;
368
369 bclink_set_gap(n_ptr);
370 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
371 n_ptr->bclink.gap_to = last_sent;
372 bclink_send_nack(n_ptr);
373}
374
375/**
376 * tipc_bclink_peek_nack - process a NACK msg meant for another node
377 * 344 *
378 * Only tipc_net_lock set. 345 * Only tipc_net_lock set.
379 */ 346 */
380 347
381static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 348static void bclink_peek_nack(struct tipc_msg *msg)
382{ 349{
383 struct tipc_node *n_ptr = tipc_node_find(dest); 350 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
384 u32 my_after, my_to;
385 351
386 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) 352 if (unlikely(!n_ptr))
387 return; 353 return;
354
388 tipc_node_lock(n_ptr); 355 tipc_node_lock(n_ptr);
389 /*
390 * Modify gap to suppress unnecessary NACKs from this node
391 */
392 my_after = n_ptr->bclink.gap_after;
393 my_to = n_ptr->bclink.gap_to;
394
395 if (less_eq(gap_after, my_after)) {
396 if (less(my_after, gap_to) && less(gap_to, my_to))
397 n_ptr->bclink.gap_after = gap_to;
398 else if (less_eq(my_to, gap_to))
399 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
400 } else if (less_eq(gap_after, my_to)) {
401 if (less_eq(my_to, gap_to))
402 n_ptr->bclink.gap_to = gap_after;
403 } else {
404 /*
405 * Expand gap if missing bufs not in deferred queue:
406 */
407 struct sk_buff *buf = n_ptr->bclink.deferred_head;
408 u32 prev = n_ptr->bclink.gap_to;
409 356
410 for (; buf; buf = buf->next) { 357 if (n_ptr->bclink.supported &&
411 u32 seqno = buf_seqno(buf); 358 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
359 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
360 n_ptr->bclink.oos_state = 2;
412 361
413 if (mod(seqno - prev) != 1) {
414 buf = NULL;
415 break;
416 }
417 if (seqno == gap_after)
418 break;
419 prev = seqno;
420 }
421 if (buf == NULL)
422 n_ptr->bclink.gap_to = gap_after;
423 }
424 /*
425 * Some nodes may send a complementary NACK now:
426 */
427 if (bclink_ack_allowed(sender_tag + 1)) {
428 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
429 bclink_send_nack(n_ptr);
430 bclink_set_gap(n_ptr);
431 }
432 }
433 tipc_node_unlock(n_ptr); 362 tipc_node_unlock(n_ptr);
434} 363}
435 364
436/** 365/*
437 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 366 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
438 */ 367 */
439 368
@@ -445,7 +374,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
445 374
446 if (!bclink->bcast_nodes.count) { 375 if (!bclink->bcast_nodes.count) {
447 res = msg_data_sz(buf_msg(buf)); 376 res = msg_data_sz(buf_msg(buf));
448 buf_discard(buf); 377 kfree_skb(buf);
449 goto exit; 378 goto exit;
450 } 379 }
451 380
@@ -460,7 +389,33 @@ exit:
460 return res; 389 return res;
461} 390}
462 391
463/** 392/*
393 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
394 *
395 * Called with both sending node's lock and bc_lock taken.
396 */
397
398static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
399{
400 bclink_update_last_sent(node, seqno);
401 node->bclink.last_in = seqno;
402 node->bclink.oos_state = 0;
403 bcl->stats.recv_info++;
404
405 /*
406 * Unicast an ACK periodically, ensuring that
407 * all nodes in the cluster don't ACK at the same time
408 */
409
410 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
411 tipc_link_send_proto_msg(
412 node->active_links[node->addr & 1],
413 STATE_MSG, 0, 0, 0, 0, 0);
414 bcl->stats.sent_acks++;
415 }
416}
417
418/*
464 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 419 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
465 * 420 *
466 * tipc_net_lock is read_locked, no other locks set 421 * tipc_net_lock is read_locked, no other locks set
@@ -472,7 +427,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
472 struct tipc_node *node; 427 struct tipc_node *node;
473 u32 next_in; 428 u32 next_in;
474 u32 seqno; 429 u32 seqno;
475 struct sk_buff *deferred; 430 int deferred;
476 431
477 /* Screen out unwanted broadcast messages */ 432 /* Screen out unwanted broadcast messages */
478 433
@@ -487,6 +442,8 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
487 if (unlikely(!node->bclink.supported)) 442 if (unlikely(!node->bclink.supported))
488 goto unlock; 443 goto unlock;
489 444
445 /* Handle broadcast protocol message */
446
490 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 447 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
491 if (msg_type(msg) != STATE_MSG) 448 if (msg_type(msg) != STATE_MSG)
492 goto unlock; 449 goto unlock;
@@ -501,89 +458,118 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
501 spin_unlock_bh(&bc_lock); 458 spin_unlock_bh(&bc_lock);
502 } else { 459 } else {
503 tipc_node_unlock(node); 460 tipc_node_unlock(node);
504 tipc_bclink_peek_nack(msg_destnode(msg), 461 bclink_peek_nack(msg);
505 msg_bcast_tag(msg),
506 msg_bcgap_after(msg),
507 msg_bcgap_to(msg));
508 } 462 }
509 goto exit; 463 goto exit;
510 } 464 }
511 465
512 /* Handle in-sequence broadcast message */ 466 /* Handle in-sequence broadcast message */
513 467
514receive:
515 next_in = mod(node->bclink.last_in + 1);
516 seqno = msg_seqno(msg); 468 seqno = msg_seqno(msg);
469 next_in = mod(node->bclink.last_in + 1);
517 470
518 if (likely(seqno == next_in)) { 471 if (likely(seqno == next_in)) {
519 bcl->stats.recv_info++; 472receive:
520 node->bclink.last_in++; 473 /* Deliver message to destination */
521 bclink_set_gap(node); 474
522 if (unlikely(bclink_ack_allowed(seqno))) {
523 bclink_send_ack(node);
524 bcl->stats.sent_acks++;
525 }
526 if (likely(msg_isdata(msg))) { 475 if (likely(msg_isdata(msg))) {
476 spin_lock_bh(&bc_lock);
477 bclink_accept_pkt(node, seqno);
478 spin_unlock_bh(&bc_lock);
527 tipc_node_unlock(node); 479 tipc_node_unlock(node);
528 if (likely(msg_mcast(msg))) 480 if (likely(msg_mcast(msg)))
529 tipc_port_recv_mcast(buf, NULL); 481 tipc_port_recv_mcast(buf, NULL);
530 else 482 else
531 buf_discard(buf); 483 kfree_skb(buf);
532 } else if (msg_user(msg) == MSG_BUNDLER) { 484 } else if (msg_user(msg) == MSG_BUNDLER) {
485 spin_lock_bh(&bc_lock);
486 bclink_accept_pkt(node, seqno);
533 bcl->stats.recv_bundles++; 487 bcl->stats.recv_bundles++;
534 bcl->stats.recv_bundled += msg_msgcnt(msg); 488 bcl->stats.recv_bundled += msg_msgcnt(msg);
489 spin_unlock_bh(&bc_lock);
535 tipc_node_unlock(node); 490 tipc_node_unlock(node);
536 tipc_link_recv_bundle(buf); 491 tipc_link_recv_bundle(buf);
537 } else if (msg_user(msg) == MSG_FRAGMENTER) { 492 } else if (msg_user(msg) == MSG_FRAGMENTER) {
493 int ret = tipc_link_recv_fragment(&node->bclink.defragm,
494 &buf, &msg);
495 if (ret < 0)
496 goto unlock;
497 spin_lock_bh(&bc_lock);
498 bclink_accept_pkt(node, seqno);
538 bcl->stats.recv_fragments++; 499 bcl->stats.recv_fragments++;
539 if (tipc_link_recv_fragment(&node->bclink.defragm, 500 if (ret > 0)
540 &buf, &msg))
541 bcl->stats.recv_fragmented++; 501 bcl->stats.recv_fragmented++;
502 spin_unlock_bh(&bc_lock);
542 tipc_node_unlock(node); 503 tipc_node_unlock(node);
543 tipc_net_route_msg(buf); 504 tipc_net_route_msg(buf);
544 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 505 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
506 spin_lock_bh(&bc_lock);
507 bclink_accept_pkt(node, seqno);
508 spin_unlock_bh(&bc_lock);
545 tipc_node_unlock(node); 509 tipc_node_unlock(node);
546 tipc_named_recv(buf); 510 tipc_named_recv(buf);
547 } else { 511 } else {
512 spin_lock_bh(&bc_lock);
513 bclink_accept_pkt(node, seqno);
514 spin_unlock_bh(&bc_lock);
548 tipc_node_unlock(node); 515 tipc_node_unlock(node);
549 buf_discard(buf); 516 kfree_skb(buf);
550 } 517 }
551 buf = NULL; 518 buf = NULL;
519
520 /* Determine new synchronization state */
521
552 tipc_node_lock(node); 522 tipc_node_lock(node);
553 deferred = node->bclink.deferred_head; 523 if (unlikely(!tipc_node_is_up(node)))
554 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 524 goto unlock;
555 buf = deferred; 525
556 msg = buf_msg(buf); 526 if (node->bclink.last_in == node->bclink.last_sent)
557 node->bclink.deferred_head = deferred->next; 527 goto unlock;
558 goto receive; 528
559 } 529 if (!node->bclink.deferred_head) {
560 } else if (less(next_in, seqno)) { 530 node->bclink.oos_state = 1;
561 u32 gap_after = node->bclink.gap_after; 531 goto unlock;
562 u32 gap_to = node->bclink.gap_to;
563
564 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
565 &node->bclink.deferred_tail,
566 buf)) {
567 node->bclink.nack_sync++;
568 bcl->stats.deferred_recv++;
569 if (seqno == mod(gap_after + 1))
570 node->bclink.gap_after = seqno;
571 else if (less(gap_after, seqno) && less(seqno, gap_to))
572 node->bclink.gap_to = seqno;
573 } 532 }
533
534 msg = buf_msg(node->bclink.deferred_head);
535 seqno = msg_seqno(msg);
536 next_in = mod(next_in + 1);
537 if (seqno != next_in)
538 goto unlock;
539
540 /* Take in-sequence message from deferred queue & deliver it */
541
542 buf = node->bclink.deferred_head;
543 node->bclink.deferred_head = buf->next;
544 node->bclink.deferred_size--;
545 goto receive;
546 }
547
548 /* Handle out-of-sequence broadcast message */
549
550 if (less(next_in, seqno)) {
551 deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
552 &node->bclink.deferred_tail,
553 buf);
554 node->bclink.deferred_size += deferred;
555 bclink_update_last_sent(node, seqno);
574 buf = NULL; 556 buf = NULL;
575 if (bclink_ack_allowed(node->bclink.nack_sync)) { 557 } else
576 if (gap_to != gap_after) 558 deferred = 0;
577 bclink_send_nack(node); 559
578 bclink_set_gap(node); 560 spin_lock_bh(&bc_lock);
579 } 561
580 } else { 562 if (deferred)
563 bcl->stats.deferred_recv++;
564 else
581 bcl->stats.duplicates++; 565 bcl->stats.duplicates++;
582 } 566
567 spin_unlock_bh(&bc_lock);
568
583unlock: 569unlock:
584 tipc_node_unlock(node); 570 tipc_node_unlock(node);
585exit: 571exit:
586 buf_discard(buf); 572 kfree_skb(buf);
587} 573}
588 574
589u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 575u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index b009666c60b0..5571394098f9 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -96,7 +96,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf);
96void tipc_bclink_recv_pkt(struct sk_buff *buf); 96void tipc_bclink_recv_pkt(struct sk_buff *buf);
97u32 tipc_bclink_get_last_sent(void); 97u32 tipc_bclink_get_last_sent(void);
98u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); 98u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
99void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno); 99void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
100int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 100int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
101int tipc_bclink_reset_stats(void); 101int tipc_bclink_reset_stats(void);
102int tipc_bclink_set_queue_limits(u32 limit); 102int tipc_bclink_set_queue_limits(u32 limit);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 329fb659fae4..5dfd89c40429 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -435,7 +435,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
435 u32 i; 435 u32 i;
436 int res = -EINVAL; 436 int res = -EINVAL;
437 437
438 if (tipc_mode != TIPC_NET_MODE) { 438 if (!tipc_own_addr) {
439 warn("Bearer <%s> rejected, not supported in standalone mode\n", 439 warn("Bearer <%s> rejected, not supported in standalone mode\n",
440 name); 440 name);
441 return -ENOPROTOOPT; 441 return -ENOPROTOOPT;
@@ -456,8 +456,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
456 warn("Bearer <%s> rejected, illegal discovery domain\n", name); 456 warn("Bearer <%s> rejected, illegal discovery domain\n", name);
457 return -EINVAL; 457 return -EINVAL;
458 } 458 }
459 if ((priority < TIPC_MIN_LINK_PRI || 459 if ((priority > TIPC_MAX_LINK_PRI) &&
460 priority > TIPC_MAX_LINK_PRI) &&
461 (priority != TIPC_MEDIA_LINK_PRI)) { 460 (priority != TIPC_MEDIA_LINK_PRI)) {
462 warn("Bearer <%s> rejected, illegal priority\n", name); 461 warn("Bearer <%s> rejected, illegal priority\n", name);
463 return -EINVAL; 462 return -EINVAL;
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 4785bf26cdf4..f76d3b15e4e2 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -179,7 +179,7 @@ static struct sk_buff *cfg_set_own_addr(void)
179 if (!tipc_addr_node_valid(addr)) 179 if (!tipc_addr_node_valid(addr))
180 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 180 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
181 " (node address)"); 181 " (node address)");
182 if (tipc_mode == TIPC_NET_MODE) 182 if (tipc_own_addr)
183 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 183 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
184 " (cannot change node address once assigned)"); 184 " (cannot change node address once assigned)");
185 185
@@ -218,7 +218,7 @@ static struct sk_buff *cfg_set_max_publications(void)
218 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 218 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
219 219
220 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 220 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
221 if (value != delimit(value, 1, 65535)) 221 if (value < 1 || value > 65535)
222 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 222 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
223 " (max publications must be 1-65535)"); 223 " (max publications must be 1-65535)");
224 tipc_max_publications = value; 224 tipc_max_publications = value;
@@ -233,7 +233,7 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
233 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 233 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
234 234
235 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 235 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
236 if (value != delimit(value, 1, 65535)) 236 if (value < 1 || value > 65535)
237 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 237 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
238 " (max subscriptions must be 1-65535"); 238 " (max subscriptions must be 1-65535");
239 tipc_max_subscriptions = value; 239 tipc_max_subscriptions = value;
@@ -249,14 +249,11 @@ static struct sk_buff *cfg_set_max_ports(void)
249 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 249 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
250 if (value == tipc_max_ports) 250 if (value == tipc_max_ports)
251 return tipc_cfg_reply_none(); 251 return tipc_cfg_reply_none();
252 if (value != delimit(value, 127, 65535)) 252 if (value < 127 || value > 65535)
253 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 253 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
254 " (max ports must be 127-65535)"); 254 " (max ports must be 127-65535)");
255 if (tipc_mode != TIPC_NOT_RUNNING) 255 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
256 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 256 " (cannot change max ports while TIPC is active)");
257 " (cannot change max ports while TIPC is active)");
258 tipc_max_ports = value;
259 return tipc_cfg_reply_none();
260} 257}
261 258
262static struct sk_buff *cfg_set_netid(void) 259static struct sk_buff *cfg_set_netid(void)
@@ -268,10 +265,10 @@ static struct sk_buff *cfg_set_netid(void)
268 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 265 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
269 if (value == tipc_net_id) 266 if (value == tipc_net_id)
270 return tipc_cfg_reply_none(); 267 return tipc_cfg_reply_none();
271 if (value != delimit(value, 1, 9999)) 268 if (value < 1 || value > 9999)
272 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 269 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
273 " (network id must be 1-9999)"); 270 " (network id must be 1-9999)");
274 if (tipc_mode == TIPC_NET_MODE) 271 if (tipc_own_addr)
275 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 272 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
276 " (cannot change network id once TIPC has joined a network)"); 273 " (cannot change network id once TIPC has joined a network)");
277 tipc_net_id = value; 274 tipc_net_id = value;
@@ -481,7 +478,7 @@ int tipc_cfg_init(void)
481 478
482 seq.type = TIPC_CFG_SRV; 479 seq.type = TIPC_CFG_SRV;
483 seq.lower = seq.upper = tipc_own_addr; 480 seq.lower = seq.upper = tipc_own_addr;
484 res = tipc_nametbl_publish_rsv(config_port_ref, TIPC_ZONE_SCOPE, &seq); 481 res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
485 if (res) 482 if (res)
486 goto failed; 483 goto failed;
487 484
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 2691cd57b8a8..68eba03e7955 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -53,7 +53,6 @@
53 53
54/* global variables used by multiple sub-systems within TIPC */ 54/* global variables used by multiple sub-systems within TIPC */
55 55
56int tipc_mode = TIPC_NOT_RUNNING;
57int tipc_random; 56int tipc_random;
58 57
59const char tipc_alphabet[] = 58const char tipc_alphabet[] =
@@ -125,11 +124,6 @@ int tipc_core_start_net(unsigned long addr)
125 124
126static void tipc_core_stop(void) 125static void tipc_core_stop(void)
127{ 126{
128 if (tipc_mode != TIPC_NODE_MODE)
129 return;
130
131 tipc_mode = TIPC_NOT_RUNNING;
132
133 tipc_netlink_stop(); 127 tipc_netlink_stop();
134 tipc_handler_stop(); 128 tipc_handler_stop();
135 tipc_cfg_stop(); 129 tipc_cfg_stop();
@@ -148,11 +142,7 @@ static int tipc_core_start(void)
148{ 142{
149 int res; 143 int res;
150 144
151 if (tipc_mode != TIPC_NOT_RUNNING)
152 return -ENOPROTOOPT;
153
154 get_random_bytes(&tipc_random, sizeof(tipc_random)); 145 get_random_bytes(&tipc_random, sizeof(tipc_random));
155 tipc_mode = TIPC_NODE_MODE;
156 146
157 res = tipc_handler_start(); 147 res = tipc_handler_start();
158 if (!res) 148 if (!res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 2761af36d141..13837e0e56b1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -130,13 +130,6 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
130#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ 130#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
131 131
132/* 132/*
133 * TIPC operating mode routines
134 */
135#define TIPC_NOT_RUNNING 0
136#define TIPC_NODE_MODE 1
137#define TIPC_NET_MODE 2
138
139/*
140 * Global configuration variables 133 * Global configuration variables
141 */ 134 */
142 135
@@ -151,7 +144,6 @@ extern int tipc_remote_management;
151 * Other global variables 144 * Other global variables
152 */ 145 */
153 146
154extern int tipc_mode;
155extern int tipc_random; 147extern int tipc_random;
156extern const char tipc_alphabet[]; 148extern const char tipc_alphabet[];
157 149
@@ -168,16 +160,6 @@ extern void tipc_netlink_stop(void);
168extern int tipc_socket_init(void); 160extern int tipc_socket_init(void);
169extern void tipc_socket_stop(void); 161extern void tipc_socket_stop(void);
170 162
171static inline int delimit(int val, int min, int max)
172{
173 if (val > max)
174 return max;
175 if (val < min)
176 return min;
177 return val;
178}
179
180
181/* 163/*
182 * TIPC timer and signal code 164 * TIPC timer and signal code
183 */ 165 */
@@ -279,28 +261,4 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
279 261
280extern struct sk_buff *tipc_buf_acquire(u32 size); 262extern struct sk_buff *tipc_buf_acquire(u32 size);
281 263
282/**
283 * buf_discard - frees a TIPC message buffer
284 * @skb: message buffer
285 *
286 * Frees a message buffer. If passed NULL, just returns.
287 */
288
289static inline void buf_discard(struct sk_buff *skb)
290{
291 kfree_skb(skb);
292}
293
294/**
295 * buf_linearize - convert a TIPC message buffer into a single contiguous piece
296 * @skb: message buffer
297 *
298 * Returns 0 on success.
299 */
300
301static inline int buf_linearize(struct sk_buff *skb)
302{
303 return skb_linearize(skb);
304}
305
306#endif 264#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index a00e5f811569..c630a21b2bed 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -82,6 +82,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
82 msg = buf_msg(buf); 82 msg = buf_msg(buf);
83 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); 83 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
84 msg_set_non_seq(msg, 1); 84 msg_set_non_seq(msg, 1);
85 msg_set_node_sig(msg, tipc_random);
85 msg_set_dest_domain(msg, dest_domain); 86 msg_set_dest_domain(msg, dest_domain);
86 msg_set_bc_netid(msg, tipc_net_id); 87 msg_set_bc_netid(msg, tipc_net_id);
87 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg)); 88 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
@@ -121,20 +122,22 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
121{ 122{
122 struct tipc_node *n_ptr; 123 struct tipc_node *n_ptr;
123 struct tipc_link *link; 124 struct tipc_link *link;
124 struct tipc_media_addr media_addr, *addr; 125 struct tipc_media_addr media_addr;
125 struct sk_buff *rbuf; 126 struct sk_buff *rbuf;
126 struct tipc_msg *msg = buf_msg(buf); 127 struct tipc_msg *msg = buf_msg(buf);
127 u32 dest = msg_dest_domain(msg); 128 u32 dest = msg_dest_domain(msg);
128 u32 orig = msg_prevnode(msg); 129 u32 orig = msg_prevnode(msg);
129 u32 net_id = msg_bc_netid(msg); 130 u32 net_id = msg_bc_netid(msg);
130 u32 type = msg_type(msg); 131 u32 type = msg_type(msg);
132 u32 signature = msg_node_sig(msg);
133 int addr_mismatch;
131 int link_fully_up; 134 int link_fully_up;
132 135
133 media_addr.broadcast = 1; 136 media_addr.broadcast = 1;
134 b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg)); 137 b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg));
135 buf_discard(buf); 138 kfree_skb(buf);
136 139
137 /* Validate discovery message from requesting node */ 140 /* Ensure message from node is valid and communication is permitted */
138 if (net_id != tipc_net_id) 141 if (net_id != tipc_net_id)
139 return; 142 return;
140 if (media_addr.broadcast) 143 if (media_addr.broadcast)
@@ -162,15 +165,50 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
162 } 165 }
163 tipc_node_lock(n_ptr); 166 tipc_node_lock(n_ptr);
164 167
168 /* Prepare to validate requesting node's signature and media address */
165 link = n_ptr->links[b_ptr->identity]; 169 link = n_ptr->links[b_ptr->identity];
170 addr_mismatch = (link != NULL) &&
171 memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
166 172
167 /* Create a link endpoint for this bearer, if necessary */ 173 /*
168 if (!link) { 174 * Ensure discovery message's signature is correct
169 link = tipc_link_create(n_ptr, b_ptr, &media_addr); 175 *
170 if (!link) { 176 * If signature is incorrect and there is no working link to the node,
177 * accept the new signature but invalidate all existing links to the
178 * node so they won't re-activate without a new discovery message.
179 *
180 * If signature is incorrect and the requested link to the node is
181 * working, accept the new signature. (This is an instance of delayed
182 * rediscovery, where a link endpoint was able to re-establish contact
183 * with its peer endpoint on a node that rebooted before receiving a
184 * discovery message from that node.)
185 *
186 * If signature is incorrect and there is a working link to the node
187 * that is not the requested link, reject the request (must be from
188 * a duplicate node).
189 */
190 if (signature != n_ptr->signature) {
191 if (n_ptr->working_links == 0) {
192 struct tipc_link *curr_link;
193 int i;
194
195 for (i = 0; i < MAX_BEARERS; i++) {
196 curr_link = n_ptr->links[i];
197 if (curr_link) {
198 memset(&curr_link->media_addr, 0,
199 sizeof(media_addr));
200 tipc_link_reset(curr_link);
201 }
202 }
203 addr_mismatch = (link != NULL);
204 } else if (tipc_link_is_up(link) && !addr_mismatch) {
205 /* delayed rediscovery */
206 } else {
207 disc_dupl_alert(b_ptr, orig, &media_addr);
171 tipc_node_unlock(n_ptr); 208 tipc_node_unlock(n_ptr);
172 return; 209 return;
173 } 210 }
211 n_ptr->signature = signature;
174 } 212 }
175 213
176 /* 214 /*
@@ -183,17 +221,26 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
183 * the new media address and reset the link to ensure it starts up 221 * the new media address and reset the link to ensure it starts up
184 * cleanly. 222 * cleanly.
185 */ 223 */
186 addr = &link->media_addr; 224
187 if (memcmp(addr, &media_addr, sizeof(*addr))) { 225 if (addr_mismatch) {
188 if (tipc_link_is_up(link) || (!link->started)) { 226 if (tipc_link_is_up(link)) {
189 disc_dupl_alert(b_ptr, orig, &media_addr); 227 disc_dupl_alert(b_ptr, orig, &media_addr);
190 tipc_node_unlock(n_ptr); 228 tipc_node_unlock(n_ptr);
191 return; 229 return;
230 } else {
231 memcpy(&link->media_addr, &media_addr,
232 sizeof(media_addr));
233 tipc_link_reset(link);
234 }
235 }
236
237 /* Create a link endpoint for this bearer, if necessary */
238 if (!link) {
239 link = tipc_link_create(n_ptr, b_ptr, &media_addr);
240 if (!link) {
241 tipc_node_unlock(n_ptr);
242 return;
192 } 243 }
193 warn("Resetting link <%s>, peer interface address changed\n",
194 link->name);
195 memcpy(addr, &media_addr, sizeof(*addr));
196 tipc_link_reset(link);
197 } 244 }
198 245
199 /* Accept discovery message & send response, if necessary */ 246 /* Accept discovery message & send response, if necessary */
@@ -203,7 +250,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
203 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr); 250 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
204 if (rbuf) { 251 if (rbuf) {
205 b_ptr->media->send_msg(rbuf, b_ptr, &media_addr); 252 b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
206 buf_discard(rbuf); 253 kfree_skb(rbuf);
207 } 254 }
208 } 255 }
209 256
@@ -349,7 +396,7 @@ void tipc_disc_delete(struct tipc_link_req *req)
349{ 396{
350 k_cancel_timer(&req->timer); 397 k_cancel_timer(&req->timer);
351 k_term_timer(&req->timer); 398 k_term_timer(&req->timer);
352 buf_discard(req->buf); 399 kfree_skb(req->buf);
353 kfree(req); 400 kfree(req);
354} 401}
355 402
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ac1832a66f8a..b4b9b30167a3 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -484,7 +484,7 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
484 484
485 while (buf) { 485 while (buf) {
486 next = buf->next; 486 next = buf->next;
487 buf_discard(buf); 487 kfree_skb(buf);
488 buf = next; 488 buf = next;
489 } 489 }
490 l_ptr->first_out = NULL; 490 l_ptr->first_out = NULL;
@@ -503,7 +503,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
503 503
504 while (buf) { 504 while (buf) {
505 next = buf->next; 505 next = buf->next;
506 buf_discard(buf); 506 kfree_skb(buf);
507 buf = next; 507 buf = next;
508 } 508 }
509 l_ptr->defragm_buf = NULL; 509 l_ptr->defragm_buf = NULL;
@@ -522,20 +522,20 @@ void tipc_link_stop(struct tipc_link *l_ptr)
522 buf = l_ptr->oldest_deferred_in; 522 buf = l_ptr->oldest_deferred_in;
523 while (buf) { 523 while (buf) {
524 next = buf->next; 524 next = buf->next;
525 buf_discard(buf); 525 kfree_skb(buf);
526 buf = next; 526 buf = next;
527 } 527 }
528 528
529 buf = l_ptr->first_out; 529 buf = l_ptr->first_out;
530 while (buf) { 530 while (buf) {
531 next = buf->next; 531 next = buf->next;
532 buf_discard(buf); 532 kfree_skb(buf);
533 buf = next; 533 buf = next;
534 } 534 }
535 535
536 tipc_link_reset_fragments(l_ptr); 536 tipc_link_reset_fragments(l_ptr);
537 537
538 buf_discard(l_ptr->proto_msg_queue); 538 kfree_skb(l_ptr->proto_msg_queue);
539 l_ptr->proto_msg_queue = NULL; 539 l_ptr->proto_msg_queue = NULL;
540} 540}
541 541
@@ -571,12 +571,12 @@ void tipc_link_reset(struct tipc_link *l_ptr)
571 /* Clean up all queues: */ 571 /* Clean up all queues: */
572 572
573 link_release_outqueue(l_ptr); 573 link_release_outqueue(l_ptr);
574 buf_discard(l_ptr->proto_msg_queue); 574 kfree_skb(l_ptr->proto_msg_queue);
575 l_ptr->proto_msg_queue = NULL; 575 l_ptr->proto_msg_queue = NULL;
576 buf = l_ptr->oldest_deferred_in; 576 buf = l_ptr->oldest_deferred_in;
577 while (buf) { 577 while (buf) {
578 struct sk_buff *next = buf->next; 578 struct sk_buff *next = buf->next;
579 buf_discard(buf); 579 kfree_skb(buf);
580 buf = next; 580 buf = next;
581 } 581 }
582 if (!list_empty(&l_ptr->waiting_ports)) 582 if (!list_empty(&l_ptr->waiting_ports))
@@ -810,7 +810,7 @@ static int link_bundle_buf(struct tipc_link *l_ptr,
810 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 810 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
811 msg_set_size(bundler_msg, to_pos + size); 811 msg_set_size(bundler_msg, to_pos + size);
812 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 812 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
813 buf_discard(buf); 813 kfree_skb(buf);
814 l_ptr->stats.sent_bundled++; 814 l_ptr->stats.sent_bundled++;
815 return 1; 815 return 1;
816} 816}
@@ -871,17 +871,15 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
871 u32 queue_limit = l_ptr->queue_limit[imp]; 871 u32 queue_limit = l_ptr->queue_limit[imp];
872 u32 max_packet = l_ptr->max_pkt; 872 u32 max_packet = l_ptr->max_pkt;
873 873
874 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
875
876 /* Match msg importance against queue limits: */ 874 /* Match msg importance against queue limits: */
877 875
878 if (unlikely(queue_size >= queue_limit)) { 876 if (unlikely(queue_size >= queue_limit)) {
879 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 877 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
880 link_schedule_port(l_ptr, msg_origport(msg), size); 878 link_schedule_port(l_ptr, msg_origport(msg), size);
881 buf_discard(buf); 879 kfree_skb(buf);
882 return -ELINKCONG; 880 return -ELINKCONG;
883 } 881 }
884 buf_discard(buf); 882 kfree_skb(buf);
885 if (imp > CONN_MANAGER) { 883 if (imp > CONN_MANAGER) {
886 warn("Resetting link <%s>, send queue full", l_ptr->name); 884 warn("Resetting link <%s>, send queue full", l_ptr->name);
887 tipc_link_reset(l_ptr); 885 tipc_link_reset(l_ptr);
@@ -968,10 +966,10 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
968 if (l_ptr) 966 if (l_ptr)
969 res = tipc_link_send_buf(l_ptr, buf); 967 res = tipc_link_send_buf(l_ptr, buf);
970 else 968 else
971 buf_discard(buf); 969 kfree_skb(buf);
972 tipc_node_unlock(n_ptr); 970 tipc_node_unlock(n_ptr);
973 } else { 971 } else {
974 buf_discard(buf); 972 kfree_skb(buf);
975 } 973 }
976 read_unlock_bh(&tipc_net_lock); 974 read_unlock_bh(&tipc_net_lock);
977 return res; 975 return res;
@@ -1018,7 +1016,7 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
1018 1016
1019 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 1017 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1020 list_del((struct list_head *)buf); 1018 list_del((struct list_head *)buf);
1021 buf_discard(buf); 1019 kfree_skb(buf);
1022 } 1020 }
1023} 1021}
1024 1022
@@ -1262,7 +1260,7 @@ again:
1262error: 1260error:
1263 for (; buf_chain; buf_chain = buf) { 1261 for (; buf_chain; buf_chain = buf) {
1264 buf = buf_chain->next; 1262 buf = buf_chain->next;
1265 buf_discard(buf_chain); 1263 kfree_skb(buf_chain);
1266 } 1264 }
1267 return -EFAULT; 1265 return -EFAULT;
1268 } 1266 }
@@ -1316,7 +1314,7 @@ error:
1316 tipc_node_unlock(node); 1314 tipc_node_unlock(node);
1317 for (; buf_chain; buf_chain = buf) { 1315 for (; buf_chain; buf_chain = buf) {
1318 buf = buf_chain->next; 1316 buf = buf_chain->next;
1319 buf_discard(buf_chain); 1317 kfree_skb(buf_chain);
1320 } 1318 }
1321 goto again; 1319 goto again;
1322 } 1320 }
@@ -1324,7 +1322,7 @@ error:
1324reject: 1322reject:
1325 for (; buf_chain; buf_chain = buf) { 1323 for (; buf_chain; buf_chain = buf) {
1326 buf = buf_chain->next; 1324 buf = buf_chain->next;
1327 buf_discard(buf_chain); 1325 kfree_skb(buf_chain);
1328 } 1326 }
1329 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1327 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1330 total_len, TIPC_ERR_NO_NODE); 1328 total_len, TIPC_ERR_NO_NODE);
@@ -1390,7 +1388,7 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1390 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1388 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1391 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1389 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1392 l_ptr->unacked_window = 0; 1390 l_ptr->unacked_window = 0;
1393 buf_discard(buf); 1391 kfree_skb(buf);
1394 l_ptr->proto_msg_queue = NULL; 1392 l_ptr->proto_msg_queue = NULL;
1395 return 0; 1393 return 0;
1396 } else { 1394 } else {
@@ -1501,13 +1499,13 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1501 tipc_node_lock(n_ptr); 1499 tipc_node_lock(n_ptr);
1502 1500
1503 tipc_addr_string_fill(addr_string, n_ptr->addr); 1501 tipc_addr_string_fill(addr_string, n_ptr->addr);
1504 info("Multicast link info for %s\n", addr_string); 1502 info("Broadcast link info for %s\n", addr_string);
1503 info("Supportable: %d, ", n_ptr->bclink.supportable);
1505 info("Supported: %d, ", n_ptr->bclink.supported); 1504 info("Supported: %d, ", n_ptr->bclink.supported);
1506 info("Acked: %u\n", n_ptr->bclink.acked); 1505 info("Acked: %u\n", n_ptr->bclink.acked);
1507 info("Last in: %u, ", n_ptr->bclink.last_in); 1506 info("Last in: %u, ", n_ptr->bclink.last_in);
1508 info("Gap after: %u, ", n_ptr->bclink.gap_after); 1507 info("Oos state: %u, ", n_ptr->bclink.oos_state);
1509 info("Gap to: %u\n", n_ptr->bclink.gap_to); 1508 info("Last sent: %u\n", n_ptr->bclink.last_sent);
1510 info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1511 1509
1512 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1510 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1513 1511
@@ -1679,7 +1677,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1679 1677
1680 /* Ensure message data is a single contiguous unit */ 1678 /* Ensure message data is a single contiguous unit */
1681 1679
1682 if (unlikely(buf_linearize(buf))) 1680 if (unlikely(skb_linearize(buf)))
1683 goto cont; 1681 goto cont;
1684 1682
1685 /* Handle arrival of a non-unicast link message */ 1683 /* Handle arrival of a non-unicast link message */
@@ -1736,7 +1734,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1736 1734
1737 /* Release acked messages */ 1735 /* Release acked messages */
1738 1736
1739 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported) 1737 if (n_ptr->bclink.supported)
1740 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1738 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1741 1739
1742 crs = l_ptr->first_out; 1740 crs = l_ptr->first_out;
@@ -1744,7 +1742,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1744 less_eq(buf_seqno(crs), ackd)) { 1742 less_eq(buf_seqno(crs), ackd)) {
1745 struct sk_buff *next = crs->next; 1743 struct sk_buff *next = crs->next;
1746 1744
1747 buf_discard(crs); 1745 kfree_skb(crs);
1748 crs = next; 1746 crs = next;
1749 released++; 1747 released++;
1750 } 1748 }
@@ -1773,52 +1771,56 @@ protocol_check:
1773 if (unlikely(l_ptr->oldest_deferred_in)) 1771 if (unlikely(l_ptr->oldest_deferred_in))
1774 head = link_insert_deferred_queue(l_ptr, 1772 head = link_insert_deferred_queue(l_ptr,
1775 head); 1773 head);
1776 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1777deliver: 1774deliver:
1778 if (likely(msg_isdata(msg))) { 1775 if (likely(msg_isdata(msg))) {
1779 tipc_node_unlock(n_ptr); 1776 tipc_node_unlock(n_ptr);
1780 tipc_port_recv_msg(buf); 1777 tipc_port_recv_msg(buf);
1781 continue; 1778 continue;
1779 }
1780 switch (msg_user(msg)) {
1781 int ret;
1782 case MSG_BUNDLER:
1783 l_ptr->stats.recv_bundles++;
1784 l_ptr->stats.recv_bundled +=
1785 msg_msgcnt(msg);
1786 tipc_node_unlock(n_ptr);
1787 tipc_link_recv_bundle(buf);
1788 continue;
1789 case NAME_DISTRIBUTOR:
1790 tipc_node_unlock(n_ptr);
1791 tipc_named_recv(buf);
1792 continue;
1793 case CONN_MANAGER:
1794 tipc_node_unlock(n_ptr);
1795 tipc_port_recv_proto_msg(buf);
1796 continue;
1797 case MSG_FRAGMENTER:
1798 l_ptr->stats.recv_fragments++;
1799 ret = tipc_link_recv_fragment(
1800 &l_ptr->defragm_buf,
1801 &buf, &msg);
1802 if (ret == 1) {
1803 l_ptr->stats.recv_fragmented++;
1804 goto deliver;
1782 } 1805 }
1783 switch (msg_user(msg)) { 1806 if (ret == -1)
1784 case MSG_BUNDLER: 1807 l_ptr->next_in_no--;
1785 l_ptr->stats.recv_bundles++; 1808 break;
1786 l_ptr->stats.recv_bundled += 1809 case CHANGEOVER_PROTOCOL:
1787 msg_msgcnt(msg); 1810 type = msg_type(msg);
1788 tipc_node_unlock(n_ptr); 1811 if (link_recv_changeover_msg(&l_ptr,
1789 tipc_link_recv_bundle(buf); 1812 &buf)) {
1790 continue; 1813 msg = buf_msg(buf);
1791 case NAME_DISTRIBUTOR: 1814 seq_no = msg_seqno(msg);
1792 tipc_node_unlock(n_ptr); 1815 if (type == ORIGINAL_MSG)
1793 tipc_named_recv(buf);
1794 continue;
1795 case CONN_MANAGER:
1796 tipc_node_unlock(n_ptr);
1797 tipc_port_recv_proto_msg(buf);
1798 continue;
1799 case MSG_FRAGMENTER:
1800 l_ptr->stats.recv_fragments++;
1801 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1802 &buf, &msg)) {
1803 l_ptr->stats.recv_fragmented++;
1804 goto deliver; 1816 goto deliver;
1805 } 1817 goto protocol_check;
1806 break;
1807 case CHANGEOVER_PROTOCOL:
1808 type = msg_type(msg);
1809 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1810 msg = buf_msg(buf);
1811 seq_no = msg_seqno(msg);
1812 if (type == ORIGINAL_MSG)
1813 goto deliver;
1814 goto protocol_check;
1815 }
1816 break;
1817 default:
1818 buf_discard(buf);
1819 buf = NULL;
1820 break;
1821 } 1818 }
1819 break;
1820 default:
1821 kfree_skb(buf);
1822 buf = NULL;
1823 break;
1822 } 1824 }
1823 tipc_node_unlock(n_ptr); 1825 tipc_node_unlock(n_ptr);
1824 tipc_net_route_msg(buf); 1826 tipc_net_route_msg(buf);
@@ -1847,23 +1849,22 @@ deliver:
1847 } 1849 }
1848 tipc_node_unlock(n_ptr); 1850 tipc_node_unlock(n_ptr);
1849cont: 1851cont:
1850 buf_discard(buf); 1852 kfree_skb(buf);
1851 } 1853 }
1852 read_unlock_bh(&tipc_net_lock); 1854 read_unlock_bh(&tipc_net_lock);
1853} 1855}
1854 1856
1855/* 1857/*
1856 * link_defer_buf(): Sort a received out-of-sequence packet 1858 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1857 * into the deferred reception queue. 1859 *
1858 * Returns the increase of the queue length,i.e. 0 or 1 1860 * Returns increase in queue length (i.e. 0 or 1)
1859 */ 1861 */
1860 1862
1861u32 tipc_link_defer_pkt(struct sk_buff **head, 1863u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1862 struct sk_buff **tail,
1863 struct sk_buff *buf) 1864 struct sk_buff *buf)
1864{ 1865{
1865 struct sk_buff *prev = NULL; 1866 struct sk_buff *queue_buf;
1866 struct sk_buff *crs = *head; 1867 struct sk_buff **prev;
1867 u32 seq_no = buf_seqno(buf); 1868 u32 seq_no = buf_seqno(buf);
1868 1869
1869 buf->next = NULL; 1870 buf->next = NULL;
@@ -1881,31 +1882,30 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
1881 return 1; 1882 return 1;
1882 } 1883 }
1883 1884
1884 /* Scan through queue and sort it in */ 1885 /* Locate insertion point in queue, then insert; discard if duplicate */
1885 do { 1886 prev = head;
1886 struct tipc_msg *msg = buf_msg(crs); 1887 queue_buf = *head;
1888 for (;;) {
1889 u32 curr_seqno = buf_seqno(queue_buf);
1887 1890
1888 if (less(seq_no, msg_seqno(msg))) { 1891 if (seq_no == curr_seqno) {
1889 buf->next = crs; 1892 kfree_skb(buf);
1890 if (prev) 1893 return 0;
1891 prev->next = buf;
1892 else
1893 *head = buf;
1894 return 1;
1895 } 1894 }
1896 if (seq_no == msg_seqno(msg)) 1895
1896 if (less(seq_no, curr_seqno))
1897 break; 1897 break;
1898 prev = crs;
1899 crs = crs->next;
1900 } while (crs);
1901 1898
1902 /* Message is a duplicate of an existing message */ 1899 prev = &queue_buf->next;
1900 queue_buf = queue_buf->next;
1901 }
1903 1902
1904 buf_discard(buf); 1903 buf->next = queue_buf;
1905 return 0; 1904 *prev = buf;
1905 return 1;
1906} 1906}
1907 1907
1908/** 1908/*
1909 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1909 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1910 */ 1910 */
1911 1911
@@ -1930,7 +1930,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1930 1930
1931 if (less(seq_no, mod(l_ptr->next_in_no))) { 1931 if (less(seq_no, mod(l_ptr->next_in_no))) {
1932 l_ptr->stats.duplicates++; 1932 l_ptr->stats.duplicates++;
1933 buf_discard(buf); 1933 kfree_skb(buf);
1934 return; 1934 return;
1935 } 1935 }
1936 1936
@@ -1956,6 +1956,13 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1956 u32 msg_size = sizeof(l_ptr->proto_msg); 1956 u32 msg_size = sizeof(l_ptr->proto_msg);
1957 int r_flag; 1957 int r_flag;
1958 1958
1959 /* Discard any previous message that was deferred due to congestion */
1960
1961 if (l_ptr->proto_msg_queue) {
1962 kfree_skb(l_ptr->proto_msg_queue);
1963 l_ptr->proto_msg_queue = NULL;
1964 }
1965
1959 if (link_blocked(l_ptr)) 1966 if (link_blocked(l_ptr))
1960 return; 1967 return;
1961 1968
@@ -1964,9 +1971,11 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1964 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1971 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1965 return; 1972 return;
1966 1973
1974 /* Create protocol message with "out-of-sequence" sequence number */
1975
1967 msg_set_type(msg, msg_typ); 1976 msg_set_type(msg, msg_typ);
1968 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1977 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1969 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1978 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1970 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1979 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1971 1980
1972 if (msg_typ == STATE_MSG) { 1981 if (msg_typ == STATE_MSG) {
@@ -2020,44 +2029,36 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
2020 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 2029 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
2021 msg_set_redundant_link(msg, r_flag); 2030 msg_set_redundant_link(msg, r_flag);
2022 msg_set_linkprio(msg, l_ptr->priority); 2031 msg_set_linkprio(msg, l_ptr->priority);
2023 2032 msg_set_size(msg, msg_size);
2024 /* Ensure sequence number will not fit : */
2025 2033
2026 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 2034 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2027 2035
2028 /* Congestion? */
2029
2030 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2031 if (!l_ptr->proto_msg_queue) {
2032 l_ptr->proto_msg_queue =
2033 tipc_buf_acquire(sizeof(l_ptr->proto_msg));
2034 }
2035 buf = l_ptr->proto_msg_queue;
2036 if (!buf)
2037 return;
2038 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2039 return;
2040 }
2041
2042 /* Message can be sent */
2043
2044 buf = tipc_buf_acquire(msg_size); 2036 buf = tipc_buf_acquire(msg_size);
2045 if (!buf) 2037 if (!buf)
2046 return; 2038 return;
2047 2039
2048 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 2040 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2049 msg_set_size(buf_msg(buf), msg_size);
2050 2041
2051 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 2042 /* Defer message if bearer is already congested */
2052 l_ptr->unacked_window = 0; 2043
2053 buf_discard(buf); 2044 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2045 l_ptr->proto_msg_queue = buf;
2054 return; 2046 return;
2055 } 2047 }
2056 2048
2057 /* New congestion */ 2049 /* Defer message if attempting to send results in bearer congestion */
2058 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 2050
2059 l_ptr->proto_msg_queue = buf; 2051 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2060 l_ptr->stats.bearer_congs++; 2052 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2053 l_ptr->proto_msg_queue = buf;
2054 l_ptr->stats.bearer_congs++;
2055 return;
2056 }
2057
2058 /* Discard message if it was sent successfully */
2059
2060 l_ptr->unacked_window = 0;
2061 kfree_skb(buf);
2061} 2062}
2062 2063
2063/* 2064/*
@@ -2105,6 +2106,8 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2105 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 2106 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2106 } 2107 }
2107 2108
2109 link_state_event(l_ptr, RESET_MSG);
2110
2108 /* fall thru' */ 2111 /* fall thru' */
2109 case ACTIVATE_MSG: 2112 case ACTIVATE_MSG:
2110 /* Update link settings according other endpoint's values */ 2113 /* Update link settings according other endpoint's values */
@@ -2127,16 +2130,22 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2127 } else { 2130 } else {
2128 l_ptr->max_pkt = l_ptr->max_pkt_target; 2131 l_ptr->max_pkt = l_ptr->max_pkt_target;
2129 } 2132 }
2130 l_ptr->owner->bclink.supported = (max_pkt_info != 0); 2133 l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2131 2134
2132 link_state_event(l_ptr, msg_type(msg)); 2135 /* Synchronize broadcast link info, if not done previously */
2136
2137 if (!tipc_node_is_up(l_ptr->owner)) {
2138 l_ptr->owner->bclink.last_sent =
2139 l_ptr->owner->bclink.last_in =
2140 msg_last_bcast(msg);
2141 l_ptr->owner->bclink.oos_state = 0;
2142 }
2133 2143
2134 l_ptr->peer_session = msg_session(msg); 2144 l_ptr->peer_session = msg_session(msg);
2135 l_ptr->peer_bearer_id = msg_bearer_id(msg); 2145 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2136 2146
2137 /* Synchronize broadcast sequence numbers */ 2147 if (msg_type(msg) == ACTIVATE_MSG)
2138 if (!tipc_node_redundant_links(l_ptr->owner)) 2148 link_state_event(l_ptr, ACTIVATE_MSG);
2139 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2140 break; 2149 break;
2141 case STATE_MSG: 2150 case STATE_MSG:
2142 2151
@@ -2177,7 +2186,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2177 2186
2178 /* Protocol message before retransmits, reduce loss risk */ 2187 /* Protocol message before retransmits, reduce loss risk */
2179 2188
2180 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg)); 2189 if (l_ptr->owner->bclink.supported)
2190 tipc_bclink_update_link_state(l_ptr->owner,
2191 msg_last_bcast(msg));
2181 2192
2182 if (rec_gap || (msg_probe(msg))) { 2193 if (rec_gap || (msg_probe(msg))) {
2183 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2194 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
@@ -2191,7 +2202,7 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2191 break; 2202 break;
2192 } 2203 }
2193exit: 2204exit:
2194 buf_discard(buf); 2205 kfree_skb(buf);
2195} 2206}
2196 2207
2197 2208
@@ -2389,7 +2400,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2389 warn("Link changeover error, duplicate msg dropped\n"); 2400 warn("Link changeover error, duplicate msg dropped\n");
2390 goto exit; 2401 goto exit;
2391 } 2402 }
2392 buf_discard(tunnel_buf); 2403 kfree_skb(tunnel_buf);
2393 return 1; 2404 return 1;
2394 } 2405 }
2395 2406
@@ -2421,7 +2432,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2421 } else { 2432 } else {
2422 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2433 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2423 if (*buf != NULL) { 2434 if (*buf != NULL) {
2424 buf_discard(tunnel_buf); 2435 kfree_skb(tunnel_buf);
2425 return 1; 2436 return 1;
2426 } else { 2437 } else {
2427 warn("Link changeover error, original msg dropped\n"); 2438 warn("Link changeover error, original msg dropped\n");
@@ -2429,7 +2440,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2429 } 2440 }
2430exit: 2441exit:
2431 *buf = NULL; 2442 *buf = NULL;
2432 buf_discard(tunnel_buf); 2443 kfree_skb(tunnel_buf);
2433 return 0; 2444 return 0;
2434} 2445}
2435 2446
@@ -2451,7 +2462,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2451 pos += align(msg_size(buf_msg(obuf))); 2462 pos += align(msg_size(buf_msg(obuf)));
2452 tipc_net_route_msg(obuf); 2463 tipc_net_route_msg(obuf);
2453 } 2464 }
2454 buf_discard(buf); 2465 kfree_skb(buf);
2455} 2466}
2456 2467
2457/* 2468/*
@@ -2500,11 +2511,11 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2500 } 2511 }
2501 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2512 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2502 if (fragm == NULL) { 2513 if (fragm == NULL) {
2503 buf_discard(buf); 2514 kfree_skb(buf);
2504 while (buf_chain) { 2515 while (buf_chain) {
2505 buf = buf_chain; 2516 buf = buf_chain;
2506 buf_chain = buf_chain->next; 2517 buf_chain = buf_chain->next;
2507 buf_discard(buf); 2518 kfree_skb(buf);
2508 } 2519 }
2509 return -ENOMEM; 2520 return -ENOMEM;
2510 } 2521 }
@@ -2521,7 +2532,7 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2521 crs += fragm_sz; 2532 crs += fragm_sz;
2522 msg_set_type(&fragm_hdr, FRAGMENT); 2533 msg_set_type(&fragm_hdr, FRAGMENT);
2523 } 2534 }
2524 buf_discard(buf); 2535 kfree_skb(buf);
2525 2536
2526 /* Append chain of fragments to send queue & send them */ 2537 /* Append chain of fragments to send queue & send them */
2527 2538
@@ -2608,7 +2619,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2608 if (msg_type(imsg) == TIPC_MCAST_MSG) 2619 if (msg_type(imsg) == TIPC_MCAST_MSG)
2609 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; 2620 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2610 if (msg_size(imsg) > max) { 2621 if (msg_size(imsg) > max) {
2611 buf_discard(fbuf); 2622 kfree_skb(fbuf);
2612 return 0; 2623 return 0;
2613 } 2624 }
2614 pbuf = tipc_buf_acquire(msg_size(imsg)); 2625 pbuf = tipc_buf_acquire(msg_size(imsg));
@@ -2623,9 +2634,11 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2623 set_fragm_size(pbuf, fragm_sz); 2634 set_fragm_size(pbuf, fragm_sz);
2624 set_expected_frags(pbuf, exp_fragm_cnt - 1); 2635 set_expected_frags(pbuf, exp_fragm_cnt - 1);
2625 } else { 2636 } else {
2626 warn("Link unable to reassemble fragmented message\n"); 2637 dbg("Link unable to reassemble fragmented message\n");
2638 kfree_skb(fbuf);
2639 return -1;
2627 } 2640 }
2628 buf_discard(fbuf); 2641 kfree_skb(fbuf);
2629 return 0; 2642 return 0;
2630 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) { 2643 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2631 u32 dsz = msg_data_sz(fragm); 2644 u32 dsz = msg_data_sz(fragm);
@@ -2634,7 +2647,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2634 u32 exp_frags = get_expected_frags(pbuf) - 1; 2647 u32 exp_frags = get_expected_frags(pbuf) - 1;
2635 skb_copy_to_linear_data_offset(pbuf, crs, 2648 skb_copy_to_linear_data_offset(pbuf, crs,
2636 msg_data(fragm), dsz); 2649 msg_data(fragm), dsz);
2637 buf_discard(fbuf); 2650 kfree_skb(fbuf);
2638 2651
2639 /* Is message complete? */ 2652 /* Is message complete? */
2640 2653
@@ -2651,7 +2664,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2651 set_expected_frags(pbuf, exp_frags); 2664 set_expected_frags(pbuf, exp_frags);
2652 return 0; 2665 return 0;
2653 } 2666 }
2654 buf_discard(fbuf); 2667 kfree_skb(fbuf);
2655 return 0; 2668 return 0;
2656} 2669}
2657 2670
@@ -2682,7 +2695,7 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2682 prev->next = buf->next; 2695 prev->next = buf->next;
2683 else 2696 else
2684 l_ptr->defragm_buf = buf->next; 2697 l_ptr->defragm_buf = buf->next;
2685 buf_discard(buf); 2698 kfree_skb(buf);
2686 } 2699 }
2687 buf = next; 2700 buf = next;
2688 } 2701 }
@@ -3057,7 +3070,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
3057 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 3070 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
3058 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); 3071 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3059 if (!str_len) { 3072 if (!str_len) {
3060 buf_discard(buf); 3073 kfree_skb(buf);
3061 return tipc_cfg_reply_error_string("link not found"); 3074 return tipc_cfg_reply_error_string("link not found");
3062 } 3075 }
3063 3076
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 952c39f643e6..895c6e530b0b 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -304,7 +304,7 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
304 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 304 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
305 305
306 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 306 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
307 if (value != delimit(value, 0, 32768)) 307 if (value > 32768)
308 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 308 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
309 " (log size must be 0-32768)"); 309 " (log size must be 0-32768)");
310 if (tipc_log_resize(value)) 310 if (tipc_log_resize(value))
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 3e4d3e29be61..e3afe162c0ac 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -106,7 +106,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
106 if (likely(res)) 106 if (likely(res))
107 return dsz; 107 return dsz;
108 108
109 buf_discard(*buf); 109 kfree_skb(*buf);
110 *buf = NULL; 110 *buf = NULL;
111 return -EFAULT; 111 return -EFAULT;
112} 112}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 7b0cda167107..eba524e34a6b 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -384,11 +384,6 @@ static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
384 msg_set_word(m, 7, a); 384 msg_set_word(m, 7, a);
385} 385}
386 386
387static inline int msg_is_dest(struct tipc_msg *m, u32 d)
388{
389 return msg_short(m) || (msg_destnode(m) == d);
390}
391
392static inline u32 msg_nametype(struct tipc_msg *m) 387static inline u32 msg_nametype(struct tipc_msg *m)
393{ 388{
394 return msg_word(m, 8); 389 return msg_word(m, 8);
@@ -517,6 +512,16 @@ static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
517 msg_set_bits(m, 1, 16, 0x1fff, n); 512 msg_set_bits(m, 1, 16, 0x1fff, n);
518} 513}
519 514
515static inline u32 msg_node_sig(struct tipc_msg *m)
516{
517 return msg_bits(m, 1, 0, 0xffff);
518}
519
520static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
521{
522 msg_set_bits(m, 1, 0, 0xffff, n);
523}
524
520 525
521/* 526/*
522 * Word 2 527 * Word 2
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 98ebb37f1808..d57da6159616 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -120,7 +120,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
120 } 120 }
121 } 121 }
122 122
123 buf_discard(buf); 123 kfree_skb(buf);
124} 124}
125 125
126/** 126/**
@@ -239,9 +239,6 @@ exit:
239 * 239 *
240 * Invoked for each publication issued by a newly failed node. 240 * Invoked for each publication issued by a newly failed node.
241 * Removes publication structure from name table & deletes it. 241 * Removes publication structure from name table & deletes it.
242 * In rare cases the link may have come back up again when this
243 * function is called, and we have two items representing the same
244 * publication. Nudge this item's key to distinguish it from the other.
245 */ 242 */
246 243
247static void named_purge_publ(struct publication *publ) 244static void named_purge_publ(struct publication *publ)
@@ -249,7 +246,6 @@ static void named_purge_publ(struct publication *publ)
249 struct publication *p; 246 struct publication *p;
250 247
251 write_lock_bh(&tipc_nametbl_lock); 248 write_lock_bh(&tipc_nametbl_lock);
252 publ->key += 1222345;
253 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 249 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
254 publ->node, publ->ref, publ->key); 250 publ->node, publ->ref, publ->key);
255 if (p) 251 if (p)
@@ -316,7 +312,7 @@ void tipc_named_recv(struct sk_buff *buf)
316 item++; 312 item++;
317 } 313 }
318 write_unlock_bh(&tipc_nametbl_lock); 314 write_unlock_bh(&tipc_nametbl_lock);
319 buf_discard(buf); 315 kfree_skb(buf);
320} 316}
321 317
322/** 318/**
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 89eb5621ebba..c6a1ae36952e 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -114,10 +114,8 @@ struct name_table {
114}; 114};
115 115
116static struct name_table table; 116static struct name_table table;
117static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
118DEFINE_RWLOCK(tipc_nametbl_lock); 117DEFINE_RWLOCK(tipc_nametbl_lock);
119 118
120
121static int hash(int x) 119static int hash(int x)
122{ 120{
123 return x & (tipc_nametbl_size - 1); 121 return x & (tipc_nametbl_size - 1);
@@ -270,6 +268,13 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
270 } 268 }
271 269
272 info = sseq->info; 270 info = sseq->info;
271
272 /* Check if an identical publication already exists */
273 list_for_each_entry(publ, &info->zone_list, zone_list) {
274 if ((publ->ref == port) && (publ->key == key) &&
275 (!publ->node || (publ->node == node)))
276 return NULL;
277 }
273 } else { 278 } else {
274 u32 inspos; 279 u32 inspos;
275 struct sub_seq *freesseq; 280 struct sub_seq *freesseq;
@@ -534,10 +539,17 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
534} 539}
535 540
536/* 541/*
537 * tipc_nametbl_translate - translate name to port id 542 * tipc_nametbl_translate - perform name translation
543 *
544 * On entry, 'destnode' is the search domain used during translation.
538 * 545 *
539 * Note: on entry 'destnode' is the search domain used during translation; 546 * On exit:
540 * on exit it passes back the node address of the matching port (if any) 547 * - if name translation is deferred to another node/cluster/zone,
548 * leaves 'destnode' unchanged (will be non-zero) and returns 0
549 * - if name translation is attempted and succeeds, sets 'destnode'
550 * to publishing node and returns port reference (will be non-zero)
551 * - if name translation is attempted and fails, sets 'destnode' to 0
552 * and returns 0
541 */ 553 */
542 554
543u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) 555u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
@@ -547,6 +559,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
547 struct publication *publ; 559 struct publication *publ;
548 struct name_seq *seq; 560 struct name_seq *seq;
549 u32 ref = 0; 561 u32 ref = 0;
562 u32 node = 0;
550 563
551 if (!tipc_in_scope(*destnode, tipc_own_addr)) 564 if (!tipc_in_scope(*destnode, tipc_own_addr))
552 return 0; 565 return 0;
@@ -604,11 +617,12 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
604 } 617 }
605 618
606 ref = publ->ref; 619 ref = publ->ref;
607 *destnode = publ->node; 620 node = publ->node;
608no_match: 621no_match:
609 spin_unlock_bh(&seq->lock); 622 spin_unlock_bh(&seq->lock);
610not_found: 623not_found:
611 read_unlock_bh(&tipc_nametbl_lock); 624 read_unlock_bh(&tipc_nametbl_lock);
625 *destnode = node;
612 return ref; 626 return ref;
613} 627}
614 628
@@ -665,22 +679,7 @@ exit:
665 return res; 679 return res;
666} 680}
667 681
668/** 682/*
669 * tipc_nametbl_publish_rsv - publish port name using a reserved name type
670 */
671
672int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
673 struct tipc_name_seq const *seq)
674{
675 int res;
676
677 atomic_inc(&rsv_publ_ok);
678 res = tipc_publish(ref, scope, seq);
679 atomic_dec(&rsv_publ_ok);
680 return res;
681}
682
683/**
684 * tipc_nametbl_publish - add name publication to network name tables 683 * tipc_nametbl_publish - add name publication to network name tables
685 */ 684 */
686 685
@@ -694,11 +693,6 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
694 tipc_max_publications); 693 tipc_max_publications);
695 return NULL; 694 return NULL;
696 } 695 }
697 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
698 warn("Publication failed, reserved name {%u,%u,%u}\n",
699 type, lower, upper);
700 return NULL;
701 }
702 696
703 write_lock_bh(&tipc_nametbl_lock); 697 write_lock_bh(&tipc_nametbl_lock);
704 table.local_publ_count++; 698 table.local_publ_count++;
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 8086b42f92ad..207d59ebf849 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -91,8 +91,6 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); 91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct tipc_port_list *dports); 93 struct tipc_port_list *dports);
94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq);
96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 94struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key); 95 u32 scope, u32 port_ref, u32 key);
98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 96int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 61afee7e8291..d4531b07076c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -117,7 +117,7 @@ static void net_route_named_msg(struct sk_buff *buf)
117 u32 dport; 117 u32 dport;
118 118
119 if (!msg_named(msg)) { 119 if (!msg_named(msg)) {
120 buf_discard(buf); 120 kfree_skb(buf);
121 return; 121 return;
122 } 122 }
123 123
@@ -161,7 +161,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
161 tipc_port_recv_proto_msg(buf); 161 tipc_port_recv_proto_msg(buf);
162 break; 162 break;
163 default: 163 default:
164 buf_discard(buf); 164 kfree_skb(buf);
165 } 165 }
166 return; 166 return;
167 } 167 }
@@ -175,14 +175,10 @@ int tipc_net_start(u32 addr)
175{ 175{
176 char addr_string[16]; 176 char addr_string[16];
177 177
178 if (tipc_mode != TIPC_NODE_MODE)
179 return -ENOPROTOOPT;
180
181 tipc_subscr_stop(); 178 tipc_subscr_stop();
182 tipc_cfg_stop(); 179 tipc_cfg_stop();
183 180
184 tipc_own_addr = addr; 181 tipc_own_addr = addr;
185 tipc_mode = TIPC_NET_MODE;
186 tipc_named_reinit(); 182 tipc_named_reinit();
187 tipc_port_reinit(); 183 tipc_port_reinit();
188 184
@@ -201,10 +197,9 @@ void tipc_net_stop(void)
201{ 197{
202 struct tipc_node *node, *t_node; 198 struct tipc_node *node, *t_node;
203 199
204 if (tipc_mode != TIPC_NET_MODE) 200 if (!tipc_own_addr)
205 return; 201 return;
206 write_lock_bh(&tipc_net_lock); 202 write_lock_bh(&tipc_net_lock);
207 tipc_mode = TIPC_NODE_MODE;
208 tipc_bearer_stop(); 203 tipc_bearer_stop();
209 tipc_bclink_stop(); 204 tipc_bclink_stop();
210 list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 205 list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6b226faad89f..a34cabc2c43a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -39,6 +39,8 @@
39#include "node.h" 39#include "node.h"
40#include "name_distr.h" 40#include "name_distr.h"
41 41
42#define NODE_HTABLE_SIZE 512
43
42static void node_lost_contact(struct tipc_node *n_ptr); 44static void node_lost_contact(struct tipc_node *n_ptr);
43static void node_established_contact(struct tipc_node *n_ptr); 45static void node_established_contact(struct tipc_node *n_ptr);
44 46
@@ -49,9 +51,19 @@ LIST_HEAD(tipc_node_list);
49static u32 tipc_num_nodes; 51static u32 tipc_num_nodes;
50 52
51static atomic_t tipc_num_links = ATOMIC_INIT(0); 53static atomic_t tipc_num_links = ATOMIC_INIT(0);
52u32 tipc_own_tag;
53 54
54/** 55/*
56 * A trivial power-of-two bitmask technique is used for speed, since this
57 * operation is done for every incoming TIPC packet. The number of hash table
58 * entries has been chosen so that no hash chain exceeds 8 nodes and will
59 * usually be much smaller (typically only a single node).
60 */
61static inline unsigned int tipc_hashfn(u32 addr)
62{
63 return addr & (NODE_HTABLE_SIZE - 1);
64}
65
66/*
55 * tipc_node_find - locate specified node object, if it exists 67 * tipc_node_find - locate specified node object, if it exists
56 */ 68 */
57 69
@@ -113,6 +125,7 @@ struct tipc_node *tipc_node_create(u32 addr)
113 } 125 }
114 list_add_tail(&n_ptr->list, &temp_node->list); 126 list_add_tail(&n_ptr->list, &temp_node->list);
115 n_ptr->block_setup = WAIT_PEER_DOWN; 127 n_ptr->block_setup = WAIT_PEER_DOWN;
128 n_ptr->signature = INVALID_NODE_SIG;
116 129
117 tipc_num_nodes++; 130 tipc_num_nodes++;
118 131
@@ -253,63 +266,14 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
253 n_ptr->link_cnt--; 266 n_ptr->link_cnt--;
254} 267}
255 268
256/*
257 * Routing table management - five cases to handle:
258 *
259 * 1: A link towards a zone/cluster external node comes up.
260 * => Send a multicast message updating routing tables of all
261 * system nodes within own cluster that the new destination
262 * can be reached via this node.
263 * (node.establishedContact()=>cluster.multicastNewRoute())
264 *
265 * 2: A link towards a slave node comes up.
266 * => Send a multicast message updating routing tables of all
267 * system nodes within own cluster that the new destination
268 * can be reached via this node.
269 * (node.establishedContact()=>cluster.multicastNewRoute())
270 * => Send a message to the slave node about existence
271 * of all system nodes within cluster:
272 * (node.establishedContact()=>cluster.sendLocalRoutes())
273 *
274 * 3: A new cluster local system node becomes available.
275 * => Send message(s) to this particular node containing
276 * information about all cluster external and slave
277 * nodes which can be reached via this node.
278 * (node.establishedContact()==>network.sendExternalRoutes())
279 * (node.establishedContact()==>network.sendSlaveRoutes())
280 * => Send messages to all directly connected slave nodes
281 * containing information about the existence of the new node
282 * (node.establishedContact()=>cluster.multicastNewRoute())
283 *
284 * 4: The link towards a zone/cluster external node or slave
285 * node goes down.
286 * => Send a multcast message updating routing tables of all
287 * nodes within cluster that the new destination can not any
288 * longer be reached via this node.
289 * (node.lostAllLinks()=>cluster.bcastLostRoute())
290 *
291 * 5: A cluster local system node becomes unavailable.
292 * => Remove all references to this node from the local
293 * routing tables. Note: This is a completely node
294 * local operation.
295 * (node.lostAllLinks()=>network.removeAsRouter())
296 * => Send messages to all directly connected slave nodes
297 * containing information about loss of the node
298 * (node.establishedContact()=>cluster.multicastLostRoute())
299 *
300 */
301
302static void node_established_contact(struct tipc_node *n_ptr) 269static void node_established_contact(struct tipc_node *n_ptr)
303{ 270{
304 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 271 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
305 272
306 /* Syncronize broadcast acks */ 273 if (n_ptr->bclink.supportable) {
307 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 274 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
308
309 if (n_ptr->bclink.supported) {
310 tipc_bclink_add_node(n_ptr->addr); 275 tipc_bclink_add_node(n_ptr->addr);
311 if (n_ptr->addr < tipc_own_addr) 276 n_ptr->bclink.supported = 1;
312 tipc_own_tag++;
313 } 277 }
314} 278}
315 279
@@ -338,22 +302,20 @@ static void node_lost_contact(struct tipc_node *n_ptr)
338 /* Flush broadcast link info associated with lost node */ 302 /* Flush broadcast link info associated with lost node */
339 303
340 if (n_ptr->bclink.supported) { 304 if (n_ptr->bclink.supported) {
341 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
342 while (n_ptr->bclink.deferred_head) { 305 while (n_ptr->bclink.deferred_head) {
343 struct sk_buff *buf = n_ptr->bclink.deferred_head; 306 struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 n_ptr->bclink.deferred_head = buf->next; 307 n_ptr->bclink.deferred_head = buf->next;
345 buf_discard(buf); 308 kfree_skb(buf);
346 } 309 }
310 n_ptr->bclink.deferred_size = 0;
347 311
348 if (n_ptr->bclink.defragm) { 312 if (n_ptr->bclink.defragm) {
349 buf_discard(n_ptr->bclink.defragm); 313 kfree_skb(n_ptr->bclink.defragm);
350 n_ptr->bclink.defragm = NULL; 314 n_ptr->bclink.defragm = NULL;
351 } 315 }
352 316
353 tipc_bclink_remove_node(n_ptr->addr); 317 tipc_bclink_remove_node(n_ptr->addr);
354 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); 318 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
355 if (n_ptr->addr < tipc_own_addr)
356 tipc_own_tag--;
357 319
358 n_ptr->bclink.supported = 0; 320 n_ptr->bclink.supported = 0;
359 } 321 }
@@ -444,12 +406,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
444 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 406 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
445 " (network address)"); 407 " (network address)");
446 408
447 if (tipc_mode != TIPC_NET_MODE) 409 if (!tipc_own_addr)
448 return tipc_cfg_reply_none(); 410 return tipc_cfg_reply_none();
449 411
450 read_lock_bh(&tipc_net_lock); 412 read_lock_bh(&tipc_net_lock);
451 413
452 /* Get space for all unicast links + multicast link */ 414 /* Get space for all unicast links + broadcast link */
453 415
454 payload_size = TLV_SPACE(sizeof(link_info)) * 416 payload_size = TLV_SPACE(sizeof(link_info)) *
455 (atomic_read(&tipc_num_links) + 1); 417 (atomic_read(&tipc_num_links) + 1);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 0b1c5f8b6996..72561c971d67 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,6 +42,11 @@
42#include "net.h" 42#include "net.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/*
46 * Out-of-range value for node signature
47 */
48#define INVALID_NODE_SIG 0x10000
49
45/* Flags used to block (re)establishment of contact with a neighboring node */ 50/* Flags used to block (re)establishment of contact with a neighboring node */
46 51
47#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ 52#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
@@ -61,13 +66,15 @@
61 * @block_setup: bit mask of conditions preventing link establishment to node 66 * @block_setup: bit mask of conditions preventing link establishment to node
62 * @link_cnt: number of links to node 67 * @link_cnt: number of links to node
63 * @permit_changeover: non-zero if node has redundant links to this system 68 * @permit_changeover: non-zero if node has redundant links to this system
69 * @signature: node instance identifier
64 * @bclink: broadcast-related info 70 * @bclink: broadcast-related info
71 * @supportable: non-zero if node supports TIPC b'cast link capability
65 * @supported: non-zero if node supports TIPC b'cast capability 72 * @supported: non-zero if node supports TIPC b'cast capability
66 * @acked: sequence # of last outbound b'cast message acknowledged by node 73 * @acked: sequence # of last outbound b'cast message acknowledged by node
67 * @last_in: sequence # of last in-sequence b'cast message received from node 74 * @last_in: sequence # of last in-sequence b'cast message received from node
68 * @gap_after: sequence # of last message not requiring a NAK request 75 * @last_sent: sequence # of last b'cast message sent by node
69 * @gap_to: sequence # of last message requiring a NAK request 76 * @oos_state: state tracker for handling OOS b'cast messages
70 * @nack_sync: counter that determines when NAK requests should be sent 77 * @deferred_size: number of OOS b'cast messages in deferred queue
71 * @deferred_head: oldest OOS b'cast message received from node 78 * @deferred_head: oldest OOS b'cast message received from node
72 * @deferred_tail: newest OOS b'cast message received from node 79 * @deferred_tail: newest OOS b'cast message received from node
73 * @defragm: list of partially reassembled b'cast message fragments from node 80 * @defragm: list of partially reassembled b'cast message fragments from node
@@ -85,35 +92,23 @@ struct tipc_node {
85 int working_links; 92 int working_links;
86 int block_setup; 93 int block_setup;
87 int permit_changeover; 94 int permit_changeover;
95 u32 signature;
88 struct { 96 struct {
89 int supported; 97 u8 supportable;
98 u8 supported;
90 u32 acked; 99 u32 acked;
91 u32 last_in; 100 u32 last_in;
92 u32 gap_after; 101 u32 last_sent;
93 u32 gap_to; 102 u32 oos_state;
94 u32 nack_sync; 103 u32 deferred_size;
95 struct sk_buff *deferred_head; 104 struct sk_buff *deferred_head;
96 struct sk_buff *deferred_tail; 105 struct sk_buff *deferred_tail;
97 struct sk_buff *defragm; 106 struct sk_buff *defragm;
98 } bclink; 107 } bclink;
99}; 108};
100 109
101#define NODE_HTABLE_SIZE 512
102extern struct list_head tipc_node_list; 110extern struct list_head tipc_node_list;
103 111
104/*
105 * A trivial power-of-two bitmask technique is used for speed, since this
106 * operation is done for every incoming TIPC packet. The number of hash table
107 * entries has been chosen so that no hash chain exceeds 8 nodes and will
108 * usually be much smaller (typically only a single node).
109 */
110static inline unsigned int tipc_hashfn(u32 addr)
111{
112 return addr & (NODE_HTABLE_SIZE - 1);
113}
114
115extern u32 tipc_own_tag;
116
117struct tipc_node *tipc_node_find(u32 addr); 112struct tipc_node *tipc_node_find(u32 addr);
118struct tipc_node *tipc_node_create(u32 addr); 113struct tipc_node *tipc_node_create(u32 addr);
119void tipc_node_delete(struct tipc_node *n_ptr); 114void tipc_node_delete(struct tipc_node *n_ptr);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index d91efc69e6f9..94d2904cce66 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -116,13 +116,13 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
116 ibuf = skb_copy(buf, GFP_ATOMIC); 116 ibuf = skb_copy(buf, GFP_ATOMIC);
117 if (ibuf == NULL) { 117 if (ibuf == NULL) {
118 tipc_port_list_free(&dports); 118 tipc_port_list_free(&dports);
119 buf_discard(buf); 119 kfree_skb(buf);
120 return -ENOMEM; 120 return -ENOMEM;
121 } 121 }
122 } 122 }
123 res = tipc_bclink_send_msg(buf); 123 res = tipc_bclink_send_msg(buf);
124 if ((res < 0) && (dports.count != 0)) 124 if ((res < 0) && (dports.count != 0))
125 buf_discard(ibuf); 125 kfree_skb(ibuf);
126 } else { 126 } else {
127 ibuf = buf; 127 ibuf = buf;
128 } 128 }
@@ -187,7 +187,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
187 } 187 }
188 } 188 }
189exit: 189exit:
190 buf_discard(buf); 190 kfree_skb(buf);
191 tipc_port_list_free(dp); 191 tipc_port_list_free(dp);
192} 192}
193 193
@@ -400,15 +400,16 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
400 400
401 /* send self-abort message when rejecting on a connected port */ 401 /* send self-abort message when rejecting on a connected port */
402 if (msg_connected(msg)) { 402 if (msg_connected(msg)) {
403 struct sk_buff *abuf = NULL;
404 struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg)); 403 struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
405 404
406 if (p_ptr) { 405 if (p_ptr) {
406 struct sk_buff *abuf = NULL;
407
407 if (p_ptr->connected) 408 if (p_ptr->connected)
408 abuf = port_build_self_abort_msg(p_ptr, err); 409 abuf = port_build_self_abort_msg(p_ptr, err);
409 tipc_port_unlock(p_ptr); 410 tipc_port_unlock(p_ptr);
411 tipc_net_route_msg(abuf);
410 } 412 }
411 tipc_net_route_msg(abuf);
412 } 413 }
413 414
414 /* send returned message & dispose of rejected message */ 415 /* send returned message & dispose of rejected message */
@@ -419,7 +420,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
419 else 420 else
420 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg)); 421 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
421exit: 422exit:
422 buf_discard(buf); 423 kfree_skb(buf);
423 return data_sz; 424 return data_sz;
424} 425}
425 426
@@ -567,7 +568,7 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
567 tipc_port_unlock(p_ptr); 568 tipc_port_unlock(p_ptr);
568exit: 569exit:
569 tipc_net_route_msg(r_buf); 570 tipc_net_route_msg(r_buf);
570 buf_discard(buf); 571 kfree_skb(buf);
571} 572}
572 573
573static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id) 574static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
@@ -758,7 +759,7 @@ static void port_dispatcher_sigh(void *dummy)
758 } 759 }
759 } 760 }
760 if (buf) 761 if (buf)
761 buf_discard(buf); 762 kfree_skb(buf);
762 buf = next; 763 buf = next;
763 continue; 764 continue;
764err: 765err:
@@ -812,7 +813,7 @@ err:
812 } 813 }
813 } 814 }
814 if (buf) 815 if (buf)
815 buf_discard(buf); 816 kfree_skb(buf);
816 buf = next; 817 buf = next;
817 continue; 818 continue;
818reject: 819reject:
@@ -1053,8 +1054,6 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1053 msg = &p_ptr->phdr; 1054 msg = &p_ptr->phdr;
1054 msg_set_destnode(msg, peer->node); 1055 msg_set_destnode(msg, peer->node);
1055 msg_set_destport(msg, peer->ref); 1056 msg_set_destport(msg, peer->ref);
1056 msg_set_orignode(msg, tipc_own_addr);
1057 msg_set_origport(msg, p_ptr->ref);
1058 msg_set_type(msg, TIPC_CONN_MSG); 1057 msg_set_type(msg, TIPC_CONN_MSG);
1059 msg_set_lookup_scope(msg, 0); 1058 msg_set_lookup_scope(msg, 0);
1060 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1059 msg_set_hdr_sz(msg, SHORT_H_SIZE);
@@ -1132,6 +1131,49 @@ int tipc_shutdown(u32 ref)
1132 return tipc_disconnect(ref); 1131 return tipc_disconnect(ref);
1133} 1132}
1134 1133
1134/**
1135 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
1136 */
1137
1138int tipc_port_recv_msg(struct sk_buff *buf)
1139{
1140 struct tipc_port *p_ptr;
1141 struct tipc_msg *msg = buf_msg(buf);
1142 u32 destport = msg_destport(msg);
1143 u32 dsz = msg_data_sz(msg);
1144 u32 err;
1145
1146 /* forward unresolved named message */
1147 if (unlikely(!destport)) {
1148 tipc_net_route_msg(buf);
1149 return dsz;
1150 }
1151
1152 /* validate destination & pass to port, otherwise reject message */
1153 p_ptr = tipc_port_lock(destport);
1154 if (likely(p_ptr)) {
1155 if (likely(p_ptr->connected)) {
1156 if ((unlikely(msg_origport(msg) !=
1157 tipc_peer_port(p_ptr))) ||
1158 (unlikely(msg_orignode(msg) !=
1159 tipc_peer_node(p_ptr))) ||
1160 (unlikely(!msg_connected(msg)))) {
1161 err = TIPC_ERR_NO_PORT;
1162 tipc_port_unlock(p_ptr);
1163 goto reject;
1164 }
1165 }
1166 err = p_ptr->dispatcher(p_ptr, buf);
1167 tipc_port_unlock(p_ptr);
1168 if (likely(!err))
1169 return dsz;
1170 } else {
1171 err = TIPC_ERR_NO_PORT;
1172 }
1173reject:
1174 return tipc_reject_msg(buf, err);
1175}
1176
1135/* 1177/*
1136 * tipc_port_recv_sections(): Concatenate and deliver sectioned 1178 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1137 * message for this node. 1179 * message for this node.
@@ -1210,8 +1252,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1210 1252
1211 msg = &p_ptr->phdr; 1253 msg = &p_ptr->phdr;
1212 msg_set_type(msg, TIPC_NAMED_MSG); 1254 msg_set_type(msg, TIPC_NAMED_MSG);
1213 msg_set_orignode(msg, tipc_own_addr);
1214 msg_set_origport(msg, ref);
1215 msg_set_hdr_sz(msg, NAMED_H_SIZE); 1255 msg_set_hdr_sz(msg, NAMED_H_SIZE);
1216 msg_set_nametype(msg, name->type); 1256 msg_set_nametype(msg, name->type);
1217 msg_set_nameinst(msg, name->instance); 1257 msg_set_nameinst(msg, name->instance);
@@ -1220,7 +1260,7 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1220 msg_set_destnode(msg, destnode); 1260 msg_set_destnode(msg, destnode);
1221 msg_set_destport(msg, destport); 1261 msg_set_destport(msg, destport);
1222 1262
1223 if (likely(destport)) { 1263 if (likely(destport || destnode)) {
1224 if (likely(destnode == tipc_own_addr)) 1264 if (likely(destnode == tipc_own_addr))
1225 res = tipc_port_recv_sections(p_ptr, num_sect, 1265 res = tipc_port_recv_sections(p_ptr, num_sect,
1226 msg_sect, total_len); 1266 msg_sect, total_len);
@@ -1261,8 +1301,6 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1261 msg = &p_ptr->phdr; 1301 msg = &p_ptr->phdr;
1262 msg_set_type(msg, TIPC_DIRECT_MSG); 1302 msg_set_type(msg, TIPC_DIRECT_MSG);
1263 msg_set_lookup_scope(msg, 0); 1303 msg_set_lookup_scope(msg, 0);
1264 msg_set_orignode(msg, tipc_own_addr);
1265 msg_set_origport(msg, ref);
1266 msg_set_destnode(msg, dest->node); 1304 msg_set_destnode(msg, dest->node);
1267 msg_set_destport(msg, dest->ref); 1305 msg_set_destport(msg, dest->ref);
1268 msg_set_hdr_sz(msg, BASIC_H_SIZE); 1306 msg_set_hdr_sz(msg, BASIC_H_SIZE);
@@ -1301,8 +1339,6 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1301 1339
1302 msg = &p_ptr->phdr; 1340 msg = &p_ptr->phdr;
1303 msg_set_type(msg, TIPC_DIRECT_MSG); 1341 msg_set_type(msg, TIPC_DIRECT_MSG);
1304 msg_set_orignode(msg, tipc_own_addr);
1305 msg_set_origport(msg, ref);
1306 msg_set_destnode(msg, dest->node); 1342 msg_set_destnode(msg, dest->node);
1307 msg_set_destport(msg, dest->ref); 1343 msg_set_destport(msg, dest->ref);
1308 msg_set_hdr_sz(msg, BASIC_H_SIZE); 1344 msg_set_hdr_sz(msg, BASIC_H_SIZE);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index f751807e2a91..9b88531e5a61 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -205,6 +205,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr);
205/* 205/*
206 * TIPC messaging routines 206 * TIPC messaging routines
207 */ 207 */
208int tipc_port_recv_msg(struct sk_buff *buf);
208int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect, 209int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
209 unsigned int total_len); 210 unsigned int total_len);
210 211
@@ -271,45 +272,4 @@ static inline int tipc_port_congested(struct tipc_port *p_ptr)
271 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); 272 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
272} 273}
273 274
274/**
275 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
276 */
277
278static inline int tipc_port_recv_msg(struct sk_buff *buf)
279{
280 struct tipc_port *p_ptr;
281 struct tipc_msg *msg = buf_msg(buf);
282 u32 destport = msg_destport(msg);
283 u32 dsz = msg_data_sz(msg);
284 u32 err;
285
286 /* forward unresolved named message */
287 if (unlikely(!destport)) {
288 tipc_net_route_msg(buf);
289 return dsz;
290 }
291
292 /* validate destination & pass to port, otherwise reject message */
293 p_ptr = tipc_port_lock(destport);
294 if (likely(p_ptr)) {
295 if (likely(p_ptr->connected)) {
296 if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
297 (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
298 (unlikely(!msg_connected(msg)))) {
299 err = TIPC_ERR_NO_PORT;
300 tipc_port_unlock(p_ptr);
301 goto reject;
302 }
303 }
304 err = p_ptr->dispatcher(p_ptr, buf);
305 tipc_port_unlock(p_ptr);
306 if (likely(!err))
307 return dsz;
308 } else {
309 err = TIPC_ERR_NO_PORT;
310 }
311reject:
312 return tipc_reject_msg(buf, err);
313}
314
315#endif 275#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e2f7c5d370ba..29e957f64458 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -126,7 +126,7 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
126 126
127static void advance_rx_queue(struct sock *sk) 127static void advance_rx_queue(struct sock *sk)
128{ 128{
129 buf_discard(__skb_dequeue(&sk->sk_receive_queue)); 129 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
130 atomic_dec(&tipc_queue_size); 130 atomic_dec(&tipc_queue_size);
131} 131}
132 132
@@ -142,7 +142,7 @@ static void discard_rx_queue(struct sock *sk)
142 142
143 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 143 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
144 atomic_dec(&tipc_queue_size); 144 atomic_dec(&tipc_queue_size);
145 buf_discard(buf); 145 kfree_skb(buf);
146 } 146 }
147} 147}
148 148
@@ -288,7 +288,7 @@ static int release(struct socket *sock)
288 break; 288 break;
289 atomic_dec(&tipc_queue_size); 289 atomic_dec(&tipc_queue_size);
290 if (TIPC_SKB_CB(buf)->handle != 0) 290 if (TIPC_SKB_CB(buf)->handle != 0)
291 buf_discard(buf); 291 kfree_skb(buf);
292 else { 292 else {
293 if ((sock->state == SS_CONNECTING) || 293 if ((sock->state == SS_CONNECTING) ||
294 (sock->state == SS_CONNECTED)) { 294 (sock->state == SS_CONNECTED)) {
@@ -355,6 +355,9 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
355 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) 355 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
356 return -EAFNOSUPPORT; 356 return -EAFNOSUPPORT;
357 357
358 if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES)
359 return -EACCES;
360
358 return (addr->scope > 0) ? 361 return (addr->scope > 0) ?
359 tipc_publish(portref, addr->scope, &addr->addr.nameseq) : 362 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
360 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); 363 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
@@ -1612,7 +1615,7 @@ restart:
1612 if (buf) { 1615 if (buf) {
1613 atomic_dec(&tipc_queue_size); 1616 atomic_dec(&tipc_queue_size);
1614 if (TIPC_SKB_CB(buf)->handle != 0) { 1617 if (TIPC_SKB_CB(buf)->handle != 0) {
1615 buf_discard(buf); 1618 kfree_skb(buf);
1616 goto restart; 1619 goto restart;
1617 } 1620 }
1618 tipc_disconnect(tport->ref); 1621 tipc_disconnect(tport->ref);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 8c49566da8f3..b2964e9895d3 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -552,7 +552,7 @@ int tipc_subscr_start(void)
552 if (res) 552 if (res)
553 goto failed; 553 goto failed;
554 554
555 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq); 555 res = tipc_publish(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
556 if (res) { 556 if (res) {
557 tipc_deleteport(topsrv.setup_port); 557 tipc_deleteport(topsrv.setup_port);
558 topsrv.setup_port = 0; 558 topsrv.setup_port = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 85d3bb7490aa..8ee85aa79fa7 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -530,6 +530,16 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, 530static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
531 struct msghdr *, size_t, int); 531 struct msghdr *, size_t, int);
532 532
533static void unix_set_peek_off(struct sock *sk, int val)
534{
535 struct unix_sock *u = unix_sk(sk);
536
537 mutex_lock(&u->readlock);
538 sk->sk_peek_off = val;
539 mutex_unlock(&u->readlock);
540}
541
542
533static const struct proto_ops unix_stream_ops = { 543static const struct proto_ops unix_stream_ops = {
534 .family = PF_UNIX, 544 .family = PF_UNIX,
535 .owner = THIS_MODULE, 545 .owner = THIS_MODULE,
@@ -549,6 +559,7 @@ static const struct proto_ops unix_stream_ops = {
549 .recvmsg = unix_stream_recvmsg, 559 .recvmsg = unix_stream_recvmsg,
550 .mmap = sock_no_mmap, 560 .mmap = sock_no_mmap,
551 .sendpage = sock_no_sendpage, 561 .sendpage = sock_no_sendpage,
562 .set_peek_off = unix_set_peek_off,
552}; 563};
553 564
554static const struct proto_ops unix_dgram_ops = { 565static const struct proto_ops unix_dgram_ops = {
@@ -570,6 +581,7 @@ static const struct proto_ops unix_dgram_ops = {
570 .recvmsg = unix_dgram_recvmsg, 581 .recvmsg = unix_dgram_recvmsg,
571 .mmap = sock_no_mmap, 582 .mmap = sock_no_mmap,
572 .sendpage = sock_no_sendpage, 583 .sendpage = sock_no_sendpage,
584 .set_peek_off = unix_set_peek_off,
573}; 585};
574 586
575static const struct proto_ops unix_seqpacket_ops = { 587static const struct proto_ops unix_seqpacket_ops = {
@@ -591,6 +603,7 @@ static const struct proto_ops unix_seqpacket_ops = {
591 .recvmsg = unix_seqpacket_recvmsg, 603 .recvmsg = unix_seqpacket_recvmsg,
592 .mmap = sock_no_mmap, 604 .mmap = sock_no_mmap,
593 .sendpage = sock_no_sendpage, 605 .sendpage = sock_no_sendpage,
606 .set_peek_off = unix_set_peek_off,
594}; 607};
595 608
596static struct proto unix_proto = { 609static struct proto unix_proto = {
@@ -1756,6 +1769,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1756 int noblock = flags & MSG_DONTWAIT; 1769 int noblock = flags & MSG_DONTWAIT;
1757 struct sk_buff *skb; 1770 struct sk_buff *skb;
1758 int err; 1771 int err;
1772 int peeked, skip;
1759 1773
1760 err = -EOPNOTSUPP; 1774 err = -EOPNOTSUPP;
1761 if (flags&MSG_OOB) 1775 if (flags&MSG_OOB)
@@ -1769,7 +1783,9 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1769 goto out; 1783 goto out;
1770 } 1784 }
1771 1785
1772 skb = skb_recv_datagram(sk, flags, noblock, &err); 1786 skip = sk_peek_offset(sk, flags);
1787
1788 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1773 if (!skb) { 1789 if (!skb) {
1774 unix_state_lock(sk); 1790 unix_state_lock(sk);
1775 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 1791 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
@@ -1786,12 +1802,12 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1786 if (msg->msg_name) 1802 if (msg->msg_name)
1787 unix_copy_addr(msg, skb->sk); 1803 unix_copy_addr(msg, skb->sk);
1788 1804
1789 if (size > skb->len) 1805 if (size > skb->len - skip)
1790 size = skb->len; 1806 size = skb->len - skip;
1791 else if (size < skb->len) 1807 else if (size < skb->len - skip)
1792 msg->msg_flags |= MSG_TRUNC; 1808 msg->msg_flags |= MSG_TRUNC;
1793 1809
1794 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size); 1810 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1795 if (err) 1811 if (err)
1796 goto out_free; 1812 goto out_free;
1797 1813
@@ -1808,6 +1824,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1808 if (!(flags & MSG_PEEK)) { 1824 if (!(flags & MSG_PEEK)) {
1809 if (UNIXCB(skb).fp) 1825 if (UNIXCB(skb).fp)
1810 unix_detach_fds(siocb->scm, skb); 1826 unix_detach_fds(siocb->scm, skb);
1827
1828 sk_peek_offset_bwd(sk, skb->len);
1811 } else { 1829 } else {
1812 /* It is questionable: on PEEK we could: 1830 /* It is questionable: on PEEK we could:
1813 - do not return fds - good, but too simple 8) 1831 - do not return fds - good, but too simple 8)
@@ -1821,10 +1839,13 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1821 clearly however! 1839 clearly however!
1822 1840
1823 */ 1841 */
1842
1843 sk_peek_offset_fwd(sk, size);
1844
1824 if (UNIXCB(skb).fp) 1845 if (UNIXCB(skb).fp)
1825 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1846 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1826 } 1847 }
1827 err = size; 1848 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1828 1849
1829 scm_recv(sock, msg, siocb->scm, flags); 1850 scm_recv(sock, msg, siocb->scm, flags);
1830 1851
@@ -1884,6 +1905,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1884 int target; 1905 int target;
1885 int err = 0; 1906 int err = 0;
1886 long timeo; 1907 long timeo;
1908 int skip;
1887 1909
1888 err = -EINVAL; 1910 err = -EINVAL;
1889 if (sk->sk_state != TCP_ESTABLISHED) 1911 if (sk->sk_state != TCP_ESTABLISHED)
@@ -1913,12 +1935,15 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1913 goto out; 1935 goto out;
1914 } 1936 }
1915 1937
1938 skip = sk_peek_offset(sk, flags);
1939
1916 do { 1940 do {
1917 int chunk; 1941 int chunk;
1918 struct sk_buff *skb; 1942 struct sk_buff *skb;
1919 1943
1920 unix_state_lock(sk); 1944 unix_state_lock(sk);
1921 skb = skb_peek(&sk->sk_receive_queue); 1945 skb = skb_peek(&sk->sk_receive_queue);
1946again:
1922 if (skb == NULL) { 1947 if (skb == NULL) {
1923 unix_sk(sk)->recursion_level = 0; 1948 unix_sk(sk)->recursion_level = 0;
1924 if (copied >= target) 1949 if (copied >= target)
@@ -1953,6 +1978,13 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1953 unix_state_unlock(sk); 1978 unix_state_unlock(sk);
1954 break; 1979 break;
1955 } 1980 }
1981
1982 if (skip >= skb->len) {
1983 skip -= skb->len;
1984 skb = skb_peek_next(skb, &sk->sk_receive_queue);
1985 goto again;
1986 }
1987
1956 unix_state_unlock(sk); 1988 unix_state_unlock(sk);
1957 1989
1958 if (check_creds) { 1990 if (check_creds) {
@@ -1972,8 +2004,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1972 sunaddr = NULL; 2004 sunaddr = NULL;
1973 } 2005 }
1974 2006
1975 chunk = min_t(unsigned int, skb->len, size); 2007 chunk = min_t(unsigned int, skb->len - skip, size);
1976 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 2008 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
1977 if (copied == 0) 2009 if (copied == 0)
1978 copied = -EFAULT; 2010 copied = -EFAULT;
1979 break; 2011 break;
@@ -1985,6 +2017,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1985 if (!(flags & MSG_PEEK)) { 2017 if (!(flags & MSG_PEEK)) {
1986 skb_pull(skb, chunk); 2018 skb_pull(skb, chunk);
1987 2019
2020 sk_peek_offset_bwd(sk, chunk);
2021
1988 if (UNIXCB(skb).fp) 2022 if (UNIXCB(skb).fp)
1989 unix_detach_fds(siocb->scm, skb); 2023 unix_detach_fds(siocb->scm, skb);
1990 2024
@@ -2002,6 +2036,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2002 if (UNIXCB(skb).fp) 2036 if (UNIXCB(skb).fp)
2003 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 2037 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2004 2038
2039 sk_peek_offset_fwd(sk, chunk);
2040
2005 break; 2041 break;
2006 } 2042 }
2007 } while (size); 2043 } while (size);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 6b7697fd911b..4195555aea65 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -301,10 +301,12 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
301 if (nlmsg_len(h) < hdrlen) 301 if (nlmsg_len(h) < hdrlen)
302 return -EINVAL; 302 return -EINVAL;
303 303
304 if (h->nlmsg_flags & NLM_F_DUMP) 304 if (h->nlmsg_flags & NLM_F_DUMP) {
305 return netlink_dump_start(sock_diag_nlsk, skb, h, 305 struct netlink_dump_control c = {
306 unix_diag_dump, NULL, 0); 306 .dump = unix_diag_dump,
307 else 307 };
308 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
309 } else
308 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); 310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
309} 311}
310 312
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 43ad9c81efcf..3ac2dd00d714 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -144,11 +144,6 @@ static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pu
144 return container_of(pub, struct cfg80211_internal_bss, pub); 144 return container_of(pub, struct cfg80211_internal_bss, pub);
145} 145}
146 146
147static inline void cfg80211_ref_bss(struct cfg80211_internal_bss *bss)
148{
149 kref_get(&bss->ref);
150}
151
152static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss) 147static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
153{ 148{
154 atomic_inc(&bss->hold); 149 atomic_inc(&bss->hold);
@@ -325,15 +320,13 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
325 const u8 *bssid, 320 const u8 *bssid,
326 const u8 *ssid, int ssid_len, 321 const u8 *ssid, int ssid_len,
327 const u8 *ie, int ie_len, 322 const u8 *ie, int ie_len,
328 const u8 *key, int key_len, int key_idx, 323 const u8 *key, int key_len, int key_idx);
329 bool local_state_change);
330int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 324int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
331 struct net_device *dev, struct ieee80211_channel *chan, 325 struct net_device *dev, struct ieee80211_channel *chan,
332 enum nl80211_auth_type auth_type, const u8 *bssid, 326 enum nl80211_auth_type auth_type, const u8 *bssid,
333 const u8 *ssid, int ssid_len, 327 const u8 *ssid, int ssid_len,
334 const u8 *ie, int ie_len, 328 const u8 *ie, int ie_len,
335 const u8 *key, int key_len, int key_idx, 329 const u8 *key, int key_len, int key_idx);
336 bool local_state_change);
337int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
338 struct net_device *dev, 331 struct net_device *dev,
339 struct ieee80211_channel *chan, 332 struct ieee80211_channel *chan,
@@ -421,7 +414,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
421 size_t ie_len, u16 reason, bool from_ap); 414 size_t ie_len, u16 reason, bool from_ap);
422void cfg80211_sme_scan_done(struct net_device *dev); 415void cfg80211_sme_scan_done(struct net_device *dev);
423void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); 416void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
424void cfg80211_sme_disassoc(struct net_device *dev, int idx); 417void cfg80211_sme_disassoc(struct net_device *dev,
418 struct cfg80211_internal_bss *bss);
425void __cfg80211_scan_done(struct work_struct *wk); 419void __cfg80211_scan_done(struct work_struct *wk);
426void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); 420void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
427void __cfg80211_sched_scan_results(struct work_struct *wk); 421void __cfg80211_sched_scan_results(struct work_struct *wk);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 8c550df13037..ba21ab22187b 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -23,6 +23,8 @@
23#define MESH_PERR_MIN_INT 100 23#define MESH_PERR_MIN_INT 100
24#define MESH_DIAM_TRAVERSAL_TIME 50 24#define MESH_DIAM_TRAVERSAL_TIME 50
25 25
26#define MESH_RSSI_THRESHOLD 0
27
26/* 28/*
27 * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds 29 * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds
28 * before timing out. This way it will remain ACTIVE and no data frames 30 * before timing out. This way it will remain ACTIVE and no data frames
@@ -55,6 +57,8 @@ const struct mesh_config default_mesh_config = {
55 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT, 57 .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
56 .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL, 58 .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
57 .dot11MeshGateAnnouncementProtocol = false, 59 .dot11MeshGateAnnouncementProtocol = false,
60 .dot11MeshForwarding = true,
61 .rssi_threshold = MESH_RSSI_THRESHOLD,
58}; 62};
59 63
60const struct mesh_setup default_mesh_setup = { 64const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 438dfc105b4a..f5a7ac3a0939 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -20,40 +20,18 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
20 struct wireless_dev *wdev = dev->ieee80211_ptr; 20 struct wireless_dev *wdev = dev->ieee80211_ptr;
21 struct wiphy *wiphy = wdev->wiphy; 21 struct wiphy *wiphy = wdev->wiphy;
22 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 22 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
23 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
24 u8 *bssid = mgmt->bssid;
25 int i;
26 u16 status = le16_to_cpu(mgmt->u.auth.status_code);
27 bool done = false;
28 23
29 wdev_lock(wdev); 24 wdev_lock(wdev);
30 25
31 for (i = 0; i < MAX_AUTH_BSSES; i++) { 26 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
32 if (wdev->authtry_bsses[i] && 27 cfg80211_sme_rx_auth(dev, buf, len);
33 memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
34 ETH_ALEN) == 0) {
35 if (status == WLAN_STATUS_SUCCESS) {
36 wdev->auth_bsses[i] = wdev->authtry_bsses[i];
37 } else {
38 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
39 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
40 }
41 wdev->authtry_bsses[i] = NULL;
42 done = true;
43 break;
44 }
45 }
46
47 if (done) {
48 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
49 cfg80211_sme_rx_auth(dev, buf, len);
50 }
51 28
52 wdev_unlock(wdev); 29 wdev_unlock(wdev);
53} 30}
54EXPORT_SYMBOL(cfg80211_send_rx_auth); 31EXPORT_SYMBOL(cfg80211_send_rx_auth);
55 32
56void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) 33void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
34 const u8 *buf, size_t len)
57{ 35{
58 u16 status_code; 36 u16 status_code;
59 struct wireless_dev *wdev = dev->ieee80211_ptr; 37 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -61,8 +39,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
61 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 39 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
62 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 40 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
63 u8 *ie = mgmt->u.assoc_resp.variable; 41 u8 *ie = mgmt->u.assoc_resp.variable;
64 int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); 42 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
65 struct cfg80211_internal_bss *bss = NULL;
66 43
67 wdev_lock(wdev); 44 wdev_lock(wdev);
68 45
@@ -75,43 +52,20 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
75 * frame instead of reassoc. 52 * frame instead of reassoc.
76 */ 53 */
77 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && 54 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
78 cfg80211_sme_failed_reassoc(wdev)) 55 cfg80211_sme_failed_reassoc(wdev)) {
56 cfg80211_put_bss(bss);
79 goto out; 57 goto out;
58 }
80 59
81 nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); 60 nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
82 61
83 if (status_code == WLAN_STATUS_SUCCESS) { 62 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) {
84 for (i = 0; i < MAX_AUTH_BSSES; i++) {
85 if (!wdev->auth_bsses[i])
86 continue;
87 if (memcmp(wdev->auth_bsses[i]->pub.bssid, mgmt->bssid,
88 ETH_ALEN) == 0) {
89 bss = wdev->auth_bsses[i];
90 wdev->auth_bsses[i] = NULL;
91 /* additional reference to drop hold */
92 cfg80211_ref_bss(bss);
93 break;
94 }
95 }
96
97 /*
98 * We might be coming here because the driver reported
99 * a successful association at the same time as the
100 * user requested a deauth. In that case, we will have
101 * removed the BSS from the auth_bsses list due to the
102 * deauth request when the assoc response makes it. If
103 * the two code paths acquire the lock the other way
104 * around, that's just the standard situation of a
105 * deauth being requested while connected.
106 */
107 if (!bss)
108 goto out;
109 } else if (wdev->conn) {
110 cfg80211_sme_failed_assoc(wdev); 63 cfg80211_sme_failed_assoc(wdev);
111 /* 64 /*
112 * do not call connect_result() now because the 65 * do not call connect_result() now because the
113 * sme will schedule work that does it later. 66 * sme will schedule work that does it later.
114 */ 67 */
68 cfg80211_put_bss(bss);
115 goto out; 69 goto out;
116 } 70 }
117 71
@@ -124,17 +78,10 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
124 wdev->sme_state = CFG80211_SME_CONNECTING; 78 wdev->sme_state = CFG80211_SME_CONNECTING;
125 } 79 }
126 80
127 /* this consumes one bss reference (unless bss is NULL) */ 81 /* this consumes the bss reference */
128 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, 82 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
129 status_code, 83 status_code,
130 status_code == WLAN_STATUS_SUCCESS, 84 status_code == WLAN_STATUS_SUCCESS, bss);
131 bss ? &bss->pub : NULL);
132 /* drop hold now, and also reference acquired above */
133 if (bss) {
134 cfg80211_unhold_bss(bss);
135 cfg80211_put_bss(&bss->pub);
136 }
137
138 out: 85 out:
139 wdev_unlock(wdev); 86 wdev_unlock(wdev);
140} 87}
@@ -148,8 +95,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
148 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 95 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 96 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
150 const u8 *bssid = mgmt->bssid; 97 const u8 *bssid = mgmt->bssid;
151 int i; 98 bool was_current = false;
152 bool found = false, was_current = false;
153 99
154 ASSERT_WDEV_LOCK(wdev); 100 ASSERT_WDEV_LOCK(wdev);
155 101
@@ -158,32 +104,9 @@ void __cfg80211_send_deauth(struct net_device *dev,
158 cfg80211_unhold_bss(wdev->current_bss); 104 cfg80211_unhold_bss(wdev->current_bss);
159 cfg80211_put_bss(&wdev->current_bss->pub); 105 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 106 wdev->current_bss = NULL;
161 found = true;
162 was_current = true; 107 was_current = true;
163 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
164 if (wdev->auth_bsses[i] &&
165 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
166 cfg80211_unhold_bss(wdev->auth_bsses[i]);
167 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
168 wdev->auth_bsses[i] = NULL;
169 found = true;
170 break;
171 }
172 if (wdev->authtry_bsses[i] &&
173 memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
174 ETH_ALEN) == 0 &&
175 memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) == 0) {
176 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
177 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
178 wdev->authtry_bsses[i] = NULL;
179 found = true;
180 break;
181 }
182 } 108 }
183 109
184 if (!found)
185 return;
186
187 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); 110 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
188 111
189 if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) { 112 if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
@@ -220,10 +143,8 @@ void __cfg80211_send_disassoc(struct net_device *dev,
220 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 143 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
221 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 144 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
222 const u8 *bssid = mgmt->bssid; 145 const u8 *bssid = mgmt->bssid;
223 int i;
224 u16 reason_code; 146 u16 reason_code;
225 bool from_ap; 147 bool from_ap;
226 bool done = false;
227 148
228 ASSERT_WDEV_LOCK(wdev); 149 ASSERT_WDEV_LOCK(wdev);
229 150
@@ -234,16 +155,10 @@ void __cfg80211_send_disassoc(struct net_device *dev,
234 155
235 if (wdev->current_bss && 156 if (wdev->current_bss &&
236 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
237 for (i = 0; i < MAX_AUTH_BSSES; i++) { 158 cfg80211_sme_disassoc(dev, wdev->current_bss);
238 if (wdev->authtry_bsses[i] || wdev->auth_bsses[i]) 159 cfg80211_unhold_bss(wdev->current_bss);
239 continue; 160 cfg80211_put_bss(&wdev->current_bss->pub);
240 wdev->auth_bsses[i] = wdev->current_bss; 161 wdev->current_bss = NULL;
241 wdev->current_bss = NULL;
242 done = true;
243 cfg80211_sme_disassoc(dev, i);
244 break;
245 }
246 WARN_ON(!done);
247 } else 162 } else
248 WARN_ON(1); 163 WARN_ON(1);
249 164
@@ -287,34 +202,6 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
287} 202}
288EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); 203EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
289 204
290static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr)
291{
292 int i;
293 bool done = false;
294
295 ASSERT_WDEV_LOCK(wdev);
296
297 for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
298 if (wdev->authtry_bsses[i] &&
299 memcmp(wdev->authtry_bsses[i]->pub.bssid,
300 addr, ETH_ALEN) == 0) {
301 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
302 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
303 wdev->authtry_bsses[i] = NULL;
304 done = true;
305 break;
306 }
307 }
308
309 WARN_ON(!done);
310}
311
312void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr)
313{
314 __cfg80211_auth_remove(dev->ieee80211_ptr, addr);
315}
316EXPORT_SYMBOL(__cfg80211_auth_canceled);
317
318void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) 205void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
319{ 206{
320 struct wireless_dev *wdev = dev->ieee80211_ptr; 207 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -329,8 +216,6 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
329 WLAN_STATUS_UNSPECIFIED_FAILURE, 216 WLAN_STATUS_UNSPECIFIED_FAILURE,
330 false, NULL); 217 false, NULL);
331 218
332 __cfg80211_auth_remove(wdev, addr);
333
334 wdev_unlock(wdev); 219 wdev_unlock(wdev);
335} 220}
336EXPORT_SYMBOL(cfg80211_send_auth_timeout); 221EXPORT_SYMBOL(cfg80211_send_auth_timeout);
@@ -340,8 +225,6 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
340 struct wireless_dev *wdev = dev->ieee80211_ptr; 225 struct wireless_dev *wdev = dev->ieee80211_ptr;
341 struct wiphy *wiphy = wdev->wiphy; 226 struct wiphy *wiphy = wdev->wiphy;
342 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 227 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
343 int i;
344 bool done = false;
345 228
346 wdev_lock(wdev); 229 wdev_lock(wdev);
347 230
@@ -351,20 +234,6 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
351 WLAN_STATUS_UNSPECIFIED_FAILURE, 234 WLAN_STATUS_UNSPECIFIED_FAILURE,
352 false, NULL); 235 false, NULL);
353 236
354 for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
355 if (wdev->auth_bsses[i] &&
356 memcmp(wdev->auth_bsses[i]->pub.bssid,
357 addr, ETH_ALEN) == 0) {
358 cfg80211_unhold_bss(wdev->auth_bsses[i]);
359 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
360 wdev->auth_bsses[i] = NULL;
361 done = true;
362 break;
363 }
364 }
365
366 WARN_ON(!done);
367
368 wdev_unlock(wdev); 237 wdev_unlock(wdev);
369} 238}
370EXPORT_SYMBOL(cfg80211_send_assoc_timeout); 239EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
@@ -403,13 +272,11 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
403 const u8 *bssid, 272 const u8 *bssid,
404 const u8 *ssid, int ssid_len, 273 const u8 *ssid, int ssid_len,
405 const u8 *ie, int ie_len, 274 const u8 *ie, int ie_len,
406 const u8 *key, int key_len, int key_idx, 275 const u8 *key, int key_len, int key_idx)
407 bool local_state_change)
408{ 276{
409 struct wireless_dev *wdev = dev->ieee80211_ptr; 277 struct wireless_dev *wdev = dev->ieee80211_ptr;
410 struct cfg80211_auth_request req; 278 struct cfg80211_auth_request req;
411 struct cfg80211_internal_bss *bss; 279 int err;
412 int i, err, slot = -1, nfree = 0;
413 280
414 ASSERT_WDEV_LOCK(wdev); 281 ASSERT_WDEV_LOCK(wdev);
415 282
@@ -421,20 +288,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
421 memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) 288 memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0)
422 return -EALREADY; 289 return -EALREADY;
423 290
424 for (i = 0; i < MAX_AUTH_BSSES; i++) {
425 if (wdev->authtry_bsses[i] &&
426 memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid,
427 ETH_ALEN) == 0)
428 return -EALREADY;
429 if (wdev->auth_bsses[i] &&
430 memcmp(bssid, wdev->auth_bsses[i]->pub.bssid,
431 ETH_ALEN) == 0)
432 return -EALREADY;
433 }
434
435 memset(&req, 0, sizeof(req)); 291 memset(&req, 0, sizeof(req));
436 292
437 req.local_state_change = local_state_change;
438 req.ie = ie; 293 req.ie = ie;
439 req.ie_len = ie_len; 294 req.ie_len = ie_len;
440 req.auth_type = auth_type; 295 req.auth_type = auth_type;
@@ -446,39 +301,9 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
446 if (!req.bss) 301 if (!req.bss)
447 return -ENOENT; 302 return -ENOENT;
448 303
449 bss = bss_from_pub(req.bss);
450
451 for (i = 0; i < MAX_AUTH_BSSES; i++) {
452 if (!wdev->auth_bsses[i] && !wdev->authtry_bsses[i]) {
453 slot = i;
454 nfree++;
455 }
456 }
457
458 /* we need one free slot for disassoc and one for this auth */
459 if (nfree < 2) {
460 err = -ENOSPC;
461 goto out;
462 }
463
464 if (local_state_change)
465 wdev->auth_bsses[slot] = bss;
466 else
467 wdev->authtry_bsses[slot] = bss;
468 cfg80211_hold_bss(bss);
469
470 err = rdev->ops->auth(&rdev->wiphy, dev, &req); 304 err = rdev->ops->auth(&rdev->wiphy, dev, &req);
471 if (err) {
472 if (local_state_change)
473 wdev->auth_bsses[slot] = NULL;
474 else
475 wdev->authtry_bsses[slot] = NULL;
476 cfg80211_unhold_bss(bss);
477 }
478 305
479 out: 306 cfg80211_put_bss(req.bss);
480 if (err)
481 cfg80211_put_bss(req.bss);
482 return err; 307 return err;
483} 308}
484 309
@@ -487,15 +312,14 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
487 enum nl80211_auth_type auth_type, const u8 *bssid, 312 enum nl80211_auth_type auth_type, const u8 *bssid,
488 const u8 *ssid, int ssid_len, 313 const u8 *ssid, int ssid_len,
489 const u8 *ie, int ie_len, 314 const u8 *ie, int ie_len,
490 const u8 *key, int key_len, int key_idx, 315 const u8 *key, int key_len, int key_idx)
491 bool local_state_change)
492{ 316{
493 int err; 317 int err;
494 318
495 wdev_lock(dev->ieee80211_ptr); 319 wdev_lock(dev->ieee80211_ptr);
496 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 320 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
497 ssid, ssid_len, ie, ie_len, 321 ssid, ssid_len, ie, ie_len,
498 key, key_len, key_idx, local_state_change); 322 key, key_len, key_idx);
499 wdev_unlock(dev->ieee80211_ptr); 323 wdev_unlock(dev->ieee80211_ptr);
500 324
501 return err; 325 return err;
@@ -530,8 +354,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
530{ 354{
531 struct wireless_dev *wdev = dev->ieee80211_ptr; 355 struct wireless_dev *wdev = dev->ieee80211_ptr;
532 struct cfg80211_assoc_request req; 356 struct cfg80211_assoc_request req;
533 struct cfg80211_internal_bss *bss; 357 int err;
534 int i, err, slot = -1;
535 bool was_connected = false; 358 bool was_connected = false;
536 359
537 ASSERT_WDEV_LOCK(wdev); 360 ASSERT_WDEV_LOCK(wdev);
@@ -573,26 +396,14 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
573 return -ENOENT; 396 return -ENOENT;
574 } 397 }
575 398
576 bss = bss_from_pub(req.bss); 399 err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
577
578 for (i = 0; i < MAX_AUTH_BSSES; i++) {
579 if (bss == wdev->auth_bsses[i]) {
580 slot = i;
581 break;
582 }
583 }
584 400
585 if (slot < 0) { 401 if (err) {
586 err = -ENOTCONN; 402 if (was_connected)
587 goto out; 403 wdev->sme_state = CFG80211_SME_CONNECTED;
404 cfg80211_put_bss(req.bss);
588 } 405 }
589 406
590 err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
591 out:
592 if (err && was_connected)
593 wdev->sme_state = CFG80211_SME_CONNECTED;
594 /* still a reference in wdev->auth_bsses[slot] */
595 cfg80211_put_bss(req.bss);
596 return err; 407 return err;
597} 408}
598 409
@@ -624,36 +435,27 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
624 bool local_state_change) 435 bool local_state_change)
625{ 436{
626 struct wireless_dev *wdev = dev->ieee80211_ptr; 437 struct wireless_dev *wdev = dev->ieee80211_ptr;
627 struct cfg80211_deauth_request req; 438 struct cfg80211_deauth_request req = {
628 int i; 439 .bssid = bssid,
440 .reason_code = reason,
441 .ie = ie,
442 .ie_len = ie_len,
443 };
629 444
630 ASSERT_WDEV_LOCK(wdev); 445 ASSERT_WDEV_LOCK(wdev);
631 446
632 memset(&req, 0, sizeof(req)); 447 if (local_state_change) {
633 req.reason_code = reason; 448 if (wdev->current_bss &&
634 req.local_state_change = local_state_change; 449 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
635 req.ie = ie; 450 cfg80211_unhold_bss(wdev->current_bss);
636 req.ie_len = ie_len; 451 cfg80211_put_bss(&wdev->current_bss->pub);
637 if (wdev->current_bss && 452 wdev->current_bss = NULL;
638 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
639 req.bss = &wdev->current_bss->pub;
640 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
641 if (wdev->auth_bsses[i] &&
642 memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
643 req.bss = &wdev->auth_bsses[i]->pub;
644 break;
645 }
646 if (wdev->authtry_bsses[i] &&
647 memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
648 req.bss = &wdev->authtry_bsses[i]->pub;
649 break;
650 } 453 }
651 }
652 454
653 if (!req.bss) 455 return 0;
654 return -ENOTCONN; 456 }
655 457
656 return rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); 458 return rdev->ops->deauth(&rdev->wiphy, dev, &req);
657} 459}
658 460
659int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 461int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
@@ -698,7 +500,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
698 else 500 else
699 return -ENOTCONN; 501 return -ENOTCONN;
700 502
701 return rdev->ops->disassoc(&rdev->wiphy, dev, &req, wdev); 503 return rdev->ops->disassoc(&rdev->wiphy, dev, &req);
702} 504}
703 505
704int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 506int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
@@ -722,7 +524,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
722{ 524{
723 struct wireless_dev *wdev = dev->ieee80211_ptr; 525 struct wireless_dev *wdev = dev->ieee80211_ptr;
724 struct cfg80211_deauth_request req; 526 struct cfg80211_deauth_request req;
725 int i; 527 u8 bssid[ETH_ALEN];
726 528
727 ASSERT_WDEV_LOCK(wdev); 529 ASSERT_WDEV_LOCK(wdev);
728 530
@@ -734,35 +536,17 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
734 req.ie = NULL; 536 req.ie = NULL;
735 req.ie_len = 0; 537 req.ie_len = 0;
736 538
737 if (wdev->current_bss) { 539 if (!wdev->current_bss)
738 req.bss = &wdev->current_bss->pub; 540 return;
739 rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
740 if (wdev->current_bss) {
741 cfg80211_unhold_bss(wdev->current_bss);
742 cfg80211_put_bss(&wdev->current_bss->pub);
743 wdev->current_bss = NULL;
744 }
745 }
746 541
747 for (i = 0; i < MAX_AUTH_BSSES; i++) { 542 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
748 if (wdev->auth_bsses[i]) { 543 req.bssid = bssid;
749 req.bss = &wdev->auth_bsses[i]->pub; 544 rdev->ops->deauth(&rdev->wiphy, dev, &req);
750 rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); 545
751 if (wdev->auth_bsses[i]) { 546 if (wdev->current_bss) {
752 cfg80211_unhold_bss(wdev->auth_bsses[i]); 547 cfg80211_unhold_bss(wdev->current_bss);
753 cfg80211_put_bss(&wdev->auth_bsses[i]->pub); 548 cfg80211_put_bss(&wdev->current_bss->pub);
754 wdev->auth_bsses[i] = NULL; 549 wdev->current_bss = NULL;
755 }
756 }
757 if (wdev->authtry_bsses[i]) {
758 req.bss = &wdev->authtry_bsses[i]->pub;
759 rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
760 if (wdev->authtry_bsses[i]) {
761 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
762 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
763 wdev->authtry_bsses[i] = NULL;
764 }
765 }
766 } 550 }
767} 551}
768 552
@@ -1030,8 +814,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
1030 cookie); 814 cookie);
1031} 815}
1032 816
1033bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, 817bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
1034 size_t len, gfp_t gfp) 818 const u8 *buf, size_t len, gfp_t gfp)
1035{ 819{
1036 struct wireless_dev *wdev = dev->ieee80211_ptr; 820 struct wireless_dev *wdev = dev->ieee80211_ptr;
1037 struct wiphy *wiphy = wdev->wiphy; 821 struct wiphy *wiphy = wdev->wiphy;
@@ -1070,7 +854,8 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
1070 /* found match! */ 854 /* found match! */
1071 855
1072 /* Indicate the received Action frame to user space */ 856 /* Indicate the received Action frame to user space */
1073 if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq, 857 if (nl80211_send_mgmt(rdev, dev, reg->nlpid,
858 freq, sig_mbm,
1074 buf, len, gfp)) 859 buf, len, gfp))
1075 continue; 860 continue;
1076 861
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index afeea32e04ad..4c1eb9472ddb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -204,6 +204,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
204 .len = NL80211_HT_CAPABILITY_LEN 204 .len = NL80211_HT_CAPABILITY_LEN
205 }, 205 },
206 [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 }, 206 [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
207 [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
208 [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
207}; 209};
208 210
209/* policy for the key attributes */ 211/* policy for the key attributes */
@@ -427,10 +429,9 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
427 429
428 if (tb[NL80211_KEY_DEFAULT_TYPES]) { 430 if (tb[NL80211_KEY_DEFAULT_TYPES]) {
429 struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; 431 struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
430 int err = nla_parse_nested(kdt, 432 err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
431 NUM_NL80211_KEY_DEFAULT_TYPES - 1, 433 tb[NL80211_KEY_DEFAULT_TYPES],
432 tb[NL80211_KEY_DEFAULT_TYPES], 434 nl80211_key_default_policy);
433 nl80211_key_default_policy);
434 if (err) 435 if (err)
435 return err; 436 return err;
436 437
@@ -872,7 +873,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
872 CMD(add_virtual_intf, NEW_INTERFACE); 873 CMD(add_virtual_intf, NEW_INTERFACE);
873 CMD(change_virtual_intf, SET_INTERFACE); 874 CMD(change_virtual_intf, SET_INTERFACE);
874 CMD(add_key, NEW_KEY); 875 CMD(add_key, NEW_KEY);
875 CMD(add_beacon, NEW_BEACON); 876 CMD(start_ap, START_AP);
876 CMD(add_station, NEW_STATION); 877 CMD(add_station, NEW_STATION);
877 CMD(add_mpath, NEW_MPATH); 878 CMD(add_mpath, NEW_MPATH);
878 CMD(update_mesh_config, SET_MESH_CONFIG); 879 CMD(update_mesh_config, SET_MESH_CONFIG);
@@ -2076,15 +2077,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2076 return err; 2077 return err;
2077} 2078}
2078 2079
2079static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) 2080static int nl80211_parse_beacon(struct genl_info *info,
2081 struct cfg80211_beacon_data *bcn)
2080{ 2082{
2081 int (*call)(struct wiphy *wiphy, struct net_device *dev, 2083 bool haveinfo = false;
2082 struct beacon_parameters *info);
2083 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2084 struct net_device *dev = info->user_ptr[1];
2085 struct wireless_dev *wdev = dev->ieee80211_ptr;
2086 struct beacon_parameters params;
2087 int haveinfo = 0, err;
2088 2084
2089 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) || 2085 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
2090 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) || 2086 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
@@ -2092,149 +2088,190 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
2092 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP])) 2088 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
2093 return -EINVAL; 2089 return -EINVAL;
2094 2090
2095 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2091 memset(bcn, 0, sizeof(*bcn));
2096 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2097 return -EOPNOTSUPP;
2098
2099 memset(&params, 0, sizeof(params));
2100
2101 switch (info->genlhdr->cmd) {
2102 case NL80211_CMD_NEW_BEACON:
2103 /* these are required for NEW_BEACON */
2104 if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] ||
2105 !info->attrs[NL80211_ATTR_DTIM_PERIOD] ||
2106 !info->attrs[NL80211_ATTR_BEACON_HEAD])
2107 return -EINVAL;
2108
2109 params.interval =
2110 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
2111 params.dtim_period =
2112 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
2113
2114 err = cfg80211_validate_beacon_int(rdev, params.interval);
2115 if (err)
2116 return err;
2117
2118 /*
2119 * In theory, some of these attributes could be required for
2120 * NEW_BEACON, but since they were not used when the command was
2121 * originally added, keep them optional for old user space
2122 * programs to work with drivers that do not need the additional
2123 * information.
2124 */
2125 if (info->attrs[NL80211_ATTR_SSID]) {
2126 params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2127 params.ssid_len =
2128 nla_len(info->attrs[NL80211_ATTR_SSID]);
2129 if (params.ssid_len == 0 ||
2130 params.ssid_len > IEEE80211_MAX_SSID_LEN)
2131 return -EINVAL;
2132 }
2133
2134 if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
2135 params.hidden_ssid = nla_get_u32(
2136 info->attrs[NL80211_ATTR_HIDDEN_SSID]);
2137 if (params.hidden_ssid !=
2138 NL80211_HIDDEN_SSID_NOT_IN_USE &&
2139 params.hidden_ssid !=
2140 NL80211_HIDDEN_SSID_ZERO_LEN &&
2141 params.hidden_ssid !=
2142 NL80211_HIDDEN_SSID_ZERO_CONTENTS)
2143 return -EINVAL;
2144 }
2145
2146 params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
2147
2148 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2149 params.auth_type = nla_get_u32(
2150 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2151 if (!nl80211_valid_auth_type(params.auth_type))
2152 return -EINVAL;
2153 } else
2154 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
2155
2156 err = nl80211_crypto_settings(rdev, info, &params.crypto,
2157 NL80211_MAX_NR_CIPHER_SUITES);
2158 if (err)
2159 return err;
2160
2161 call = rdev->ops->add_beacon;
2162 break;
2163 case NL80211_CMD_SET_BEACON:
2164 call = rdev->ops->set_beacon;
2165 break;
2166 default:
2167 WARN_ON(1);
2168 return -EOPNOTSUPP;
2169 }
2170
2171 if (!call)
2172 return -EOPNOTSUPP;
2173 2092
2174 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { 2093 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) {
2175 params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); 2094 bcn->head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]);
2176 params.head_len = 2095 bcn->head_len = nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]);
2177 nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]); 2096 if (!bcn->head_len)
2178 haveinfo = 1; 2097 return -EINVAL;
2098 haveinfo = true;
2179 } 2099 }
2180 2100
2181 if (info->attrs[NL80211_ATTR_BEACON_TAIL]) { 2101 if (info->attrs[NL80211_ATTR_BEACON_TAIL]) {
2182 params.tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]); 2102 bcn->tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]);
2183 params.tail_len = 2103 bcn->tail_len =
2184 nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]); 2104 nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]);
2185 haveinfo = 1; 2105 haveinfo = true;
2186 } 2106 }
2187 2107
2188 if (!haveinfo) 2108 if (!haveinfo)
2189 return -EINVAL; 2109 return -EINVAL;
2190 2110
2191 if (info->attrs[NL80211_ATTR_IE]) { 2111 if (info->attrs[NL80211_ATTR_IE]) {
2192 params.beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]); 2112 bcn->beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
2193 params.beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]); 2113 bcn->beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
2194 } 2114 }
2195 2115
2196 if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) { 2116 if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
2197 params.proberesp_ies = 2117 bcn->proberesp_ies =
2198 nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]); 2118 nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2199 params.proberesp_ies_len = 2119 bcn->proberesp_ies_len =
2200 nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]); 2120 nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
2201 } 2121 }
2202 2122
2203 if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) { 2123 if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
2204 params.assocresp_ies = 2124 bcn->assocresp_ies =
2205 nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); 2125 nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2206 params.assocresp_ies_len = 2126 bcn->assocresp_ies_len =
2207 nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); 2127 nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
2208 } 2128 }
2209 2129
2210 if (info->attrs[NL80211_ATTR_PROBE_RESP]) { 2130 if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
2211 params.probe_resp = 2131 bcn->probe_resp =
2212 nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]); 2132 nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
2213 params.probe_resp_len = 2133 bcn->probe_resp_len =
2214 nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]); 2134 nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
2215 } 2135 }
2216 2136
2217 err = call(&rdev->wiphy, dev, &params); 2137 return 0;
2218 if (!err && params.interval) 2138}
2219 wdev->beacon_interval = params.interval; 2139
2140static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2141{
2142 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2143 struct net_device *dev = info->user_ptr[1];
2144 struct wireless_dev *wdev = dev->ieee80211_ptr;
2145 struct cfg80211_ap_settings params;
2146 int err;
2147
2148 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2149 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2150 return -EOPNOTSUPP;
2151
2152 if (!rdev->ops->start_ap)
2153 return -EOPNOTSUPP;
2154
2155 if (wdev->beacon_interval)
2156 return -EALREADY;
2157
2158 memset(&params, 0, sizeof(params));
2159
2160 /* these are required for START_AP */
2161 if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] ||
2162 !info->attrs[NL80211_ATTR_DTIM_PERIOD] ||
2163 !info->attrs[NL80211_ATTR_BEACON_HEAD])
2164 return -EINVAL;
2165
2166 err = nl80211_parse_beacon(info, &params.beacon);
2167 if (err)
2168 return err;
2169
2170 params.beacon_interval =
2171 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
2172 params.dtim_period =
2173 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
2174
2175 err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
2176 if (err)
2177 return err;
2178
2179 /*
2180 * In theory, some of these attributes should be required here
2181 * but since they were not used when the command was originally
2182 * added, keep them optional for old user space programs to let
2183 * them continue to work with drivers that do not need the
2184 * additional information -- drivers must check!
2185 */
2186 if (info->attrs[NL80211_ATTR_SSID]) {
2187 params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
2188 params.ssid_len =
2189 nla_len(info->attrs[NL80211_ATTR_SSID]);
2190 if (params.ssid_len == 0 ||
2191 params.ssid_len > IEEE80211_MAX_SSID_LEN)
2192 return -EINVAL;
2193 }
2194
2195 if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
2196 params.hidden_ssid = nla_get_u32(
2197 info->attrs[NL80211_ATTR_HIDDEN_SSID]);
2198 if (params.hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE &&
2199 params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_LEN &&
2200 params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_CONTENTS)
2201 return -EINVAL;
2202 }
2203
2204 params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
2205
2206 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2207 params.auth_type = nla_get_u32(
2208 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2209 if (!nl80211_valid_auth_type(params.auth_type))
2210 return -EINVAL;
2211 } else
2212 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
2213
2214 err = nl80211_crypto_settings(rdev, info, &params.crypto,
2215 NL80211_MAX_NR_CIPHER_SUITES);
2216 if (err)
2217 return err;
2218
2219 if (info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]) {
2220 if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER))
2221 return -EOPNOTSUPP;
2222 params.inactivity_timeout = nla_get_u16(
2223 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
2224 }
2225
2226 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
2227 if (!err)
2228 wdev->beacon_interval = params.beacon_interval;
2220 return err; 2229 return err;
2221} 2230}
2222 2231
2223static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) 2232static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
2224{ 2233{
2225 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2234 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2226 struct net_device *dev = info->user_ptr[1]; 2235 struct net_device *dev = info->user_ptr[1];
2227 struct wireless_dev *wdev = dev->ieee80211_ptr; 2236 struct wireless_dev *wdev = dev->ieee80211_ptr;
2237 struct cfg80211_beacon_data params;
2228 int err; 2238 int err;
2229 2239
2230 if (!rdev->ops->del_beacon) 2240 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2241 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2242 return -EOPNOTSUPP;
2243
2244 if (!rdev->ops->change_beacon)
2245 return -EOPNOTSUPP;
2246
2247 if (!wdev->beacon_interval)
2248 return -EINVAL;
2249
2250 err = nl80211_parse_beacon(info, &params);
2251 if (err)
2252 return err;
2253
2254 return rdev->ops->change_beacon(&rdev->wiphy, dev, &params);
2255}
2256
2257static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
2258{
2259 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2260 struct net_device *dev = info->user_ptr[1];
2261 struct wireless_dev *wdev = dev->ieee80211_ptr;
2262 int err;
2263
2264 if (!rdev->ops->stop_ap)
2231 return -EOPNOTSUPP; 2265 return -EOPNOTSUPP;
2232 2266
2233 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2267 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2234 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2268 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2235 return -EOPNOTSUPP; 2269 return -EOPNOTSUPP;
2236 2270
2237 err = rdev->ops->del_beacon(&rdev->wiphy, dev); 2271 if (!wdev->beacon_interval)
2272 return -ENOENT;
2273
2274 err = rdev->ops->stop_ap(&rdev->wiphy, dev);
2238 if (!err) 2275 if (!err)
2239 wdev->beacon_interval = 0; 2276 wdev->beacon_interval = 0;
2240 return err; 2277 return err;
@@ -2655,13 +2692,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2655 break; 2692 break;
2656 case NL80211_IFTYPE_P2P_CLIENT: 2693 case NL80211_IFTYPE_P2P_CLIENT:
2657 case NL80211_IFTYPE_STATION: 2694 case NL80211_IFTYPE_STATION:
2658 /* disallow things sta doesn't support */
2659 if (params.plink_action)
2660 return -EINVAL;
2661 if (params.ht_capa)
2662 return -EINVAL;
2663 if (params.listen_interval >= 0)
2664 return -EINVAL;
2665 /* 2695 /*
2666 * Don't allow userspace to change the TDLS_PEER flag, 2696 * Don't allow userspace to change the TDLS_PEER flag,
2667 * but silently ignore attempts to change it since we 2697 * but silently ignore attempts to change it since we
@@ -2669,7 +2699,15 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2669 * to change the flag. 2699 * to change the flag.
2670 */ 2700 */
2671 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); 2701 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
2672 2702 /* fall through */
2703 case NL80211_IFTYPE_ADHOC:
2704 /* disallow things sta doesn't support */
2705 if (params.plink_action)
2706 return -EINVAL;
2707 if (params.ht_capa)
2708 return -EINVAL;
2709 if (params.listen_interval >= 0)
2710 return -EINVAL;
2673 /* reject any changes other than AUTHORIZED */ 2711 /* reject any changes other than AUTHORIZED */
2674 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) 2712 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
2675 return -EINVAL; 2713 return -EINVAL;
@@ -3259,6 +3297,10 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3259 cur_params.dot11MeshHWMPRannInterval); 3297 cur_params.dot11MeshHWMPRannInterval);
3260 NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 3298 NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3261 cur_params.dot11MeshGateAnnouncementProtocol); 3299 cur_params.dot11MeshGateAnnouncementProtocol);
3300 NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
3301 cur_params.dot11MeshForwarding);
3302 NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
3303 cur_params.rssi_threshold);
3262 nla_nest_end(msg, pinfoattr); 3304 nla_nest_end(msg, pinfoattr);
3263 genlmsg_end(msg, hdr); 3305 genlmsg_end(msg, hdr);
3264 return genlmsg_reply(msg, info); 3306 return genlmsg_reply(msg, info);
@@ -3290,6 +3332,8 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3290 [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 }, 3332 [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
3291 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 }, 3333 [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
3292 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, 3334 [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
3335 [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
3336 [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
3293}; 3337};
3294 3338
3295static const struct nla_policy 3339static const struct nla_policy
@@ -3379,6 +3423,10 @@ do {\
3379 dot11MeshGateAnnouncementProtocol, mask, 3423 dot11MeshGateAnnouncementProtocol, mask,
3380 NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 3424 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3381 nla_get_u8); 3425 nla_get_u8);
3426 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
3427 mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
3428 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
3429 mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
3382 if (mask_out) 3430 if (mask_out)
3383 *mask_out = mask; 3431 *mask_out = mask;
3384 3432
@@ -4079,7 +4127,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4079 struct cfg80211_bss *res = &intbss->pub; 4127 struct cfg80211_bss *res = &intbss->pub;
4080 void *hdr; 4128 void *hdr;
4081 struct nlattr *bss; 4129 struct nlattr *bss;
4082 int i;
4083 4130
4084 ASSERT_WDEV_LOCK(wdev); 4131 ASSERT_WDEV_LOCK(wdev);
4085 4132
@@ -4132,13 +4179,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4132 if (intbss == wdev->current_bss) 4179 if (intbss == wdev->current_bss)
4133 NLA_PUT_U32(msg, NL80211_BSS_STATUS, 4180 NLA_PUT_U32(msg, NL80211_BSS_STATUS,
4134 NL80211_BSS_STATUS_ASSOCIATED); 4181 NL80211_BSS_STATUS_ASSOCIATED);
4135 else for (i = 0; i < MAX_AUTH_BSSES; i++) {
4136 if (intbss != wdev->auth_bsses[i])
4137 continue;
4138 NLA_PUT_U32(msg, NL80211_BSS_STATUS,
4139 NL80211_BSS_STATUS_AUTHENTICATED);
4140 break;
4141 }
4142 break; 4182 break;
4143 case NL80211_IFTYPE_ADHOC: 4183 case NL80211_IFTYPE_ADHOC:
4144 if (intbss == wdev->current_bss) 4184 if (intbss == wdev->current_bss)
@@ -4406,10 +4446,16 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
4406 4446
4407 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; 4447 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
4408 4448
4449 /*
4450 * Since we no longer track auth state, ignore
4451 * requests to only change local state.
4452 */
4453 if (local_state_change)
4454 return 0;
4455
4409 return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 4456 return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
4410 ssid, ssid_len, ie, ie_len, 4457 ssid, ssid_len, ie, ie_len,
4411 key.p.key, key.p.key_len, key.idx, 4458 key.p.key, key.p.key_len, key.idx);
4412 local_state_change);
4413} 4459}
4414 4460
4415static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, 4461static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
@@ -4781,7 +4827,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4781 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); 4827 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
4782 struct ieee80211_supported_band *sband = 4828 struct ieee80211_supported_band *sband =
4783 wiphy->bands[ibss.channel->band]; 4829 wiphy->bands[ibss.channel->band];
4784 int err;
4785 4830
4786 err = ieee80211_get_ratemask(sband, rates, n_rates, 4831 err = ieee80211_get_ratemask(sband, rates, n_rates,
4787 &ibss.basic_rates); 4832 &ibss.basic_rates);
@@ -4801,6 +4846,9 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4801 return PTR_ERR(connkeys); 4846 return PTR_ERR(connkeys);
4802 } 4847 }
4803 4848
4849 ibss.control_port =
4850 nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]);
4851
4804 err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); 4852 err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
4805 if (err) 4853 if (err)
4806 kfree(connkeys); 4854 kfree(connkeys);
@@ -5069,6 +5117,13 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
5069 5117
5070 wiphy = &rdev->wiphy; 5118 wiphy = &rdev->wiphy;
5071 5119
5120 connect.bg_scan_period = -1;
5121 if (info->attrs[NL80211_ATTR_BG_SCAN_PERIOD] &&
5122 (wiphy->flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)) {
5123 connect.bg_scan_period =
5124 nla_get_u16(info->attrs[NL80211_ATTR_BG_SCAN_PERIOD]);
5125 }
5126
5072 if (info->attrs[NL80211_ATTR_MAC]) 5127 if (info->attrs[NL80211_ATTR_MAC])
5073 connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); 5128 connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
5074 connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); 5129 connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -5390,9 +5445,39 @@ static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
5390 return mask; 5445 return mask;
5391} 5446}
5392 5447
5448static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
5449 u8 *rates, u8 rates_len,
5450 u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
5451{
5452 u8 i;
5453
5454 memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
5455
5456 for (i = 0; i < rates_len; i++) {
5457 int ridx, rbit;
5458
5459 ridx = rates[i] / 8;
5460 rbit = BIT(rates[i] % 8);
5461
5462 /* check validity */
5463 if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
5464 return false;
5465
5466 /* check availability */
5467 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
5468 mcs[ridx] |= rbit;
5469 else
5470 return false;
5471 }
5472
5473 return true;
5474}
5475
5393static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { 5476static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
5394 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY, 5477 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
5395 .len = NL80211_MAX_SUPP_RATES }, 5478 .len = NL80211_MAX_SUPP_RATES },
5479 [NL80211_TXRATE_MCS] = { .type = NLA_BINARY,
5480 .len = NL80211_MAX_SUPP_HT_RATES },
5396}; 5481};
5397 5482
5398static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, 5483static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
@@ -5418,12 +5503,20 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
5418 sband = rdev->wiphy.bands[i]; 5503 sband = rdev->wiphy.bands[i];
5419 mask.control[i].legacy = 5504 mask.control[i].legacy =
5420 sband ? (1 << sband->n_bitrates) - 1 : 0; 5505 sband ? (1 << sband->n_bitrates) - 1 : 0;
5506 if (sband)
5507 memcpy(mask.control[i].mcs,
5508 sband->ht_cap.mcs.rx_mask,
5509 sizeof(mask.control[i].mcs));
5510 else
5511 memset(mask.control[i].mcs, 0,
5512 sizeof(mask.control[i].mcs));
5421 } 5513 }
5422 5514
5423 /* 5515 /*
5424 * The nested attribute uses enum nl80211_band as the index. This maps 5516 * The nested attribute uses enum nl80211_band as the index. This maps
5425 * directly to the enum ieee80211_band values used in cfg80211. 5517 * directly to the enum ieee80211_band values used in cfg80211.
5426 */ 5518 */
5519 BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
5427 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) 5520 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
5428 { 5521 {
5429 enum ieee80211_band band = nla_type(tx_rates); 5522 enum ieee80211_band band = nla_type(tx_rates);
@@ -5439,7 +5532,28 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
5439 sband, 5532 sband,
5440 nla_data(tb[NL80211_TXRATE_LEGACY]), 5533 nla_data(tb[NL80211_TXRATE_LEGACY]),
5441 nla_len(tb[NL80211_TXRATE_LEGACY])); 5534 nla_len(tb[NL80211_TXRATE_LEGACY]));
5442 if (mask.control[band].legacy == 0) 5535 }
5536 if (tb[NL80211_TXRATE_MCS]) {
5537 if (!ht_rateset_to_mask(
5538 sband,
5539 nla_data(tb[NL80211_TXRATE_MCS]),
5540 nla_len(tb[NL80211_TXRATE_MCS]),
5541 mask.control[band].mcs))
5542 return -EINVAL;
5543 }
5544
5545 if (mask.control[band].legacy == 0) {
5546 /* don't allow empty legacy rates if HT
5547 * is not even supported. */
5548 if (!rdev->wiphy.bands[band]->ht_cap.ht_supported)
5549 return -EINVAL;
5550
5551 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
5552 if (mask.control[band].mcs[i])
5553 break;
5554
5555 /* legacy and mcs rates may not be both empty */
5556 if (i == IEEE80211_HT_MCS_MASK_LEN)
5443 return -EINVAL; 5557 return -EINVAL;
5444 } 5558 }
5445 } 5559 }
@@ -6293,23 +6407,23 @@ static struct genl_ops nl80211_ops[] = {
6293 .cmd = NL80211_CMD_SET_BEACON, 6407 .cmd = NL80211_CMD_SET_BEACON,
6294 .policy = nl80211_policy, 6408 .policy = nl80211_policy,
6295 .flags = GENL_ADMIN_PERM, 6409 .flags = GENL_ADMIN_PERM,
6296 .doit = nl80211_addset_beacon, 6410 .doit = nl80211_set_beacon,
6297 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6411 .internal_flags = NL80211_FLAG_NEED_NETDEV |
6298 NL80211_FLAG_NEED_RTNL, 6412 NL80211_FLAG_NEED_RTNL,
6299 }, 6413 },
6300 { 6414 {
6301 .cmd = NL80211_CMD_NEW_BEACON, 6415 .cmd = NL80211_CMD_START_AP,
6302 .policy = nl80211_policy, 6416 .policy = nl80211_policy,
6303 .flags = GENL_ADMIN_PERM, 6417 .flags = GENL_ADMIN_PERM,
6304 .doit = nl80211_addset_beacon, 6418 .doit = nl80211_start_ap,
6305 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6419 .internal_flags = NL80211_FLAG_NEED_NETDEV |
6306 NL80211_FLAG_NEED_RTNL, 6420 NL80211_FLAG_NEED_RTNL,
6307 }, 6421 },
6308 { 6422 {
6309 .cmd = NL80211_CMD_DEL_BEACON, 6423 .cmd = NL80211_CMD_STOP_AP,
6310 .policy = nl80211_policy, 6424 .policy = nl80211_policy,
6311 .flags = GENL_ADMIN_PERM, 6425 .flags = GENL_ADMIN_PERM,
6312 .doit = nl80211_del_beacon, 6426 .doit = nl80211_stop_ap,
6313 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6427 .internal_flags = NL80211_FLAG_NEED_NETDEV |
6314 NL80211_FLAG_NEED_RTNL, 6428 NL80211_FLAG_NEED_RTNL,
6315 }, 6429 },
@@ -7580,7 +7694,8 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
7580 7694
7581int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 7695int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7582 struct net_device *netdev, u32 nlpid, 7696 struct net_device *netdev, u32 nlpid,
7583 int freq, const u8 *buf, size_t len, gfp_t gfp) 7697 int freq, int sig_dbm,
7698 const u8 *buf, size_t len, gfp_t gfp)
7584{ 7699{
7585 struct sk_buff *msg; 7700 struct sk_buff *msg;
7586 void *hdr; 7701 void *hdr;
@@ -7598,6 +7713,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7598 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7713 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
7599 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7714 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
7600 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); 7715 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
7716 if (sig_dbm)
7717 NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
7601 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7718 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
7602 7719
7603 genlmsg_end(msg, hdr); 7720 genlmsg_end(msg, hdr);
@@ -7859,7 +7976,7 @@ EXPORT_SYMBOL(cfg80211_probe_status);
7859 7976
7860void cfg80211_report_obss_beacon(struct wiphy *wiphy, 7977void cfg80211_report_obss_beacon(struct wiphy *wiphy,
7861 const u8 *frame, size_t len, 7978 const u8 *frame, size_t len,
7862 int freq, gfp_t gfp) 7979 int freq, int sig_dbm, gfp_t gfp)
7863{ 7980{
7864 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 7981 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
7865 struct sk_buff *msg; 7982 struct sk_buff *msg;
@@ -7882,6 +7999,8 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
7882 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7999 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
7883 if (freq) 8000 if (freq)
7884 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); 8001 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
8002 if (sig_dbm)
8003 NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
7885 NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); 8004 NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
7886 8005
7887 genlmsg_end(msg, hdr); 8006 genlmsg_end(msg, hdr);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 12bf4d185abe..4ffe50df9f31 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -92,7 +92,8 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
92 gfp_t gfp); 92 gfp_t gfp);
93 93
94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
95 struct net_device *netdev, u32 nlpid, int freq, 95 struct net_device *netdev, u32 nlpid,
96 int freq, int sig_dbm,
96 const u8 *buf, size_t len, gfp_t gfp); 97 const u8 *buf, size_t len, gfp_t gfp);
97void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, 98void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
98 struct net_device *netdev, u64 cookie, 99 struct net_device *netdev, u64 cookie,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f65feaad155f..e9a0ac83b84c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -882,23 +882,8 @@ static void handle_channel(struct wiphy *wiphy,
882 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 882 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
883 chan->max_antenna_gain = min(chan->orig_mag, 883 chan->max_antenna_gain = min(chan->orig_mag,
884 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 884 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
885 if (chan->orig_mpwr) { 885 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
886 /* 886 chan->max_power = min(chan->max_power, chan->max_reg_power);
887 * Devices that have their own custom regulatory domain
888 * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
889 * passed country IE power settings.
890 */
891 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
892 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
893 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
894 chan->max_power =
895 MBM_TO_DBM(power_rule->max_eirp);
896 } else {
897 chan->max_power = min(chan->orig_mpwr,
898 (int) MBM_TO_DBM(power_rule->max_eirp));
899 }
900 } else
901 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
902} 887}
903 888
904static void handle_band(struct wiphy *wiphy, 889static void handle_band(struct wiphy *wiphy,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 31119e32e092..70faadf16a32 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -734,9 +734,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
734struct cfg80211_bss* 734struct cfg80211_bss*
735cfg80211_inform_bss(struct wiphy *wiphy, 735cfg80211_inform_bss(struct wiphy *wiphy,
736 struct ieee80211_channel *channel, 736 struct ieee80211_channel *channel,
737 const u8 *bssid, 737 const u8 *bssid, u64 tsf, u16 capability,
738 u64 timestamp, u16 capability, u16 beacon_interval, 738 u16 beacon_interval, const u8 *ie, size_t ielen,
739 const u8 *ie, size_t ielen,
740 s32 signal, gfp_t gfp) 739 s32 signal, gfp_t gfp)
741{ 740{
742 struct cfg80211_internal_bss *res; 741 struct cfg80211_internal_bss *res;
@@ -758,7 +757,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
758 memcpy(res->pub.bssid, bssid, ETH_ALEN); 757 memcpy(res->pub.bssid, bssid, ETH_ALEN);
759 res->pub.channel = channel; 758 res->pub.channel = channel;
760 res->pub.signal = signal; 759 res->pub.signal = signal;
761 res->pub.tsf = timestamp; 760 res->pub.tsf = tsf;
762 res->pub.beacon_interval = beacon_interval; 761 res->pub.beacon_interval = beacon_interval;
763 res->pub.capability = capability; 762 res->pub.capability = capability;
764 /* 763 /*
@@ -861,6 +860,18 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
861} 860}
862EXPORT_SYMBOL(cfg80211_inform_bss_frame); 861EXPORT_SYMBOL(cfg80211_inform_bss_frame);
863 862
863void cfg80211_ref_bss(struct cfg80211_bss *pub)
864{
865 struct cfg80211_internal_bss *bss;
866
867 if (!pub)
868 return;
869
870 bss = container_of(pub, struct cfg80211_internal_bss, pub);
871 kref_get(&bss->ref);
872}
873EXPORT_SYMBOL(cfg80211_ref_bss);
874
864void cfg80211_put_bss(struct cfg80211_bss *pub) 875void cfg80211_put_bss(struct cfg80211_bss *pub)
865{ 876{
866 struct cfg80211_internal_bss *bss; 877 struct cfg80211_internal_bss *bss;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 7b9ecaed96be..f7e937ff8978 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -179,7 +179,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
179 params->ssid, params->ssid_len, 179 params->ssid, params->ssid_len,
180 NULL, 0, 180 NULL, 0,
181 params->key, params->key_len, 181 params->key, params->key_len,
182 params->key_idx, false); 182 params->key_idx);
183 case CFG80211_CONN_ASSOCIATE_NEXT: 183 case CFG80211_CONN_ASSOCIATE_NEXT:
184 BUG_ON(!rdev->ops->assoc); 184 BUG_ON(!rdev->ops->assoc);
185 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 185 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -477,6 +477,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
477 kfree(wdev->connect_keys); 477 kfree(wdev->connect_keys);
478 wdev->connect_keys = NULL; 478 wdev->connect_keys = NULL;
479 wdev->ssid_len = 0; 479 wdev->ssid_len = 0;
480 cfg80211_put_bss(bss);
480 return; 481 return;
481 } 482 }
482 483
@@ -701,31 +702,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
701 wdev->ssid_len = 0; 702 wdev->ssid_len = 0;
702 703
703 if (wdev->conn) { 704 if (wdev->conn) {
704 const u8 *bssid;
705 int ret;
706
707 kfree(wdev->conn->ie); 705 kfree(wdev->conn->ie);
708 wdev->conn->ie = NULL; 706 wdev->conn->ie = NULL;
709 kfree(wdev->conn); 707 kfree(wdev->conn);
710 wdev->conn = NULL; 708 wdev->conn = NULL;
711
712 /*
713 * If this disconnect was due to a disassoc, we
714 * we might still have an auth BSS around. For
715 * the userspace SME that's currently expected,
716 * but for the kernel SME (nl80211 CONNECT or
717 * wireless extensions) we want to clear up all
718 * state.
719 */
720 for (i = 0; i < MAX_AUTH_BSSES; i++) {
721 if (!wdev->auth_bsses[i])
722 continue;
723 bssid = wdev->auth_bsses[i]->pub.bssid;
724 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
725 WLAN_REASON_DEAUTH_LEAVING,
726 false);
727 WARN(ret, "deauth failed: %d\n", ret);
728 }
729 } 709 }
730 710
731 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); 711 nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
@@ -1012,7 +992,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
1012 return err; 992 return err;
1013} 993}
1014 994
1015void cfg80211_sme_disassoc(struct net_device *dev, int idx) 995void cfg80211_sme_disassoc(struct net_device *dev,
996 struct cfg80211_internal_bss *bss)
1016{ 997{
1017 struct wireless_dev *wdev = dev->ieee80211_ptr; 998 struct wireless_dev *wdev = dev->ieee80211_ptr;
1018 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 999 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1031,16 +1012,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
1031 * want it any more so deauthenticate too. 1012 * want it any more so deauthenticate too.
1032 */ 1013 */
1033 1014
1034 if (!wdev->auth_bsses[idx]) 1015 memcpy(bssid, bss->pub.bssid, ETH_ALEN);
1035 return;
1036 1016
1037 memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); 1017 __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
1038 if (__cfg80211_mlme_deauth(rdev, dev, bssid, 1018 WLAN_REASON_DEAUTH_LEAVING, false);
1039 NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
1040 false)) {
1041 /* whatever -- assume gone anyway */
1042 cfg80211_unhold_bss(wdev->auth_bsses[idx]);
1043 cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
1044 wdev->auth_bsses[idx] = NULL;
1045 }
1046} 1019}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 9aa9db6c8141..1b7a08df933c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -904,6 +904,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
904 /* do NOT round down here */ 904 /* do NOT round down here */
905 return (bitrate + 50000) / 100000; 905 return (bitrate + 50000) / 100000;
906} 906}
907EXPORT_SYMBOL(cfg80211_calculate_bitrate);
907 908
908int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, 909int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
909 u32 beacon_int) 910 u32 beacon_int)
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 326750b99151..7c01c2f3b6cf 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -30,6 +30,9 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
30 wdev->wext.connect.ie = wdev->wext.ie; 30 wdev->wext.connect.ie = wdev->wext.ie;
31 wdev->wext.connect.ie_len = wdev->wext.ie_len; 31 wdev->wext.connect.ie_len = wdev->wext.ie_len;
32 32
33 /* Use default background scan period */
34 wdev->wext.connect.bg_scan_period = -1;
35
33 if (wdev->wext.keys) { 36 if (wdev->wext.keys) {
34 wdev->wext.keys->def = wdev->wext.default_key; 37 wdev->wext.keys->def = wdev->wext.default_key;
35 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; 38 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 66b84fbf2746..7128dde0fe1a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2299,8 +2299,13 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2299 if (link->dump == NULL) 2299 if (link->dump == NULL)
2300 return -EINVAL; 2300 return -EINVAL;
2301 2301
2302 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, 2302 {
2303 link->dump, link->done, 0); 2303 struct netlink_dump_control c = {
2304 .dump = link->dump,
2305 .done = link->done,
2306 };
2307 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2308 }
2304 } 2309 }
2305 2310
2306 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2311 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,