aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/9p/client.c13
-rw-r--r--net/9p/trans_virtio.c17
-rw-r--r--net/Kconfig16
-rw-r--r--net/Makefile1
-rw-r--r--net/activity_stats.c115
-rw-r--r--net/atm/br2684.c9
-rw-r--r--net/atm/clip.c16
-rw-r--r--net/ax25/af_ax25.c9
-rw-r--r--net/bluetooth/Kconfig9
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/af_bluetooth.c44
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/bnep/netdev.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/hci_conn.c136
-rw-r--r--net/bluetooth/hci_core.c273
-rw-r--r--net/bluetooth/hci_event.c328
-rw-r--r--net/bluetooth/hci_sock.c70
-rw-r--r--net/bluetooth/hidp/core.c19
-rw-r--r--net/bluetooth/l2cap_core.c1043
-rw-r--r--net/bluetooth/l2cap_sock.c472
-rw-r--r--net/bluetooth/lib.c23
-rw-r--r--net/bluetooth/mgmt.c283
-rw-r--r--net/bluetooth/rfcomm/core.c18
-rw-r--r--net/bluetooth/rfcomm/sock.c33
-rw-r--r--net/bluetooth/sco.c86
-rw-r--r--net/bluetooth/smp.c702
-rw-r--r--net/bridge/br_device.c14
-rw-r--r--net/bridge/br_if.c16
-rw-r--r--net/bridge/br_multicast.c109
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_stp.c4
-rw-r--r--net/caif/caif_dev.c16
-rw-r--r--net/caif/cfcnfg.c1
-rw-r--r--net/can/bcm.c53
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c73
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/flow.c31
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c29
-rw-r--r--net/core/net-sysfs.c37
-rw-r--r--net/core/net_namespace.c64
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/pktgen.c18
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/secure_seq.c184
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/timestamping.c12
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c9
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_route.c18
-rw-r--r--net/ethernet/eth.c1
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/ah4.c8
-rw-r--r--net/ipv4/arp.c31
-rw-r--r--net/ipv4/cipso_ipv4.c6
-rw-r--r--net/ipv4/devinet.c29
-rw-r--r--net/ipv4/esp4.c24
-rw-r--r--net/ipv4/fib_semantics.c20
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/gre.c21
-rw-r--r--net/ipv4/icmp.c14
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/netfilter/Kconfig12
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c1
-rw-r--r--net/ipv4/route.c225
-rw-r--r--net/ipv4/syncookies.c32
-rw-r--r--net/ipv4/sysfs_net_ipv4.c88
-rw-r--r--net/ipv4/tcp.c140
-rw-r--r--net/ipv4/tcp_input.c74
-rw-r--r--net/ipv4/tcp_ipv4.c29
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/xfrm4_mode_beet.c5
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c6
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv6/addrconf.c75
-rw-r--r--net/ipv6/af_inet6.c36
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/esp6.c18
-rw-r--r--net/ipv6/inet6_hashtables.c1
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c148
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c9
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c9
-rw-r--r--net/ipv6/route.c104
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_mode_beet.c6
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/l2tp/l2tp_eth.c4
-rw-r--r--net/l2tp/l2tp_ip.c38
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/mac80211/agg-rx.c3
-rw-r--r--net/mac80211/agg-tx.c127
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c13
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.c1
-rw-r--r--net/mac80211/mlme.c21
-rw-r--r--net/mac80211/offchannel.c16
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/rx.c16
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac80211/util.c16
-rw-r--r--net/mac80211/work.c9
-rw-r--r--net/mac80211/wpa.c2
-rw-r--r--net/netfilter/Kconfig42
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/xt_qtaguid.c2785
-rw-r--r--net/netfilter/xt_qtaguid_internal.h330
-rw-r--r--net/netfilter/xt_qtaguid_print.c556
-rw-r--r--net/netfilter/xt_qtaguid_print.h120
-rw-r--r--net/netfilter/xt_quota2.c381
-rw-r--r--net/netfilter/xt_socket.c70
-rw-r--r--net/netlink/af_netlink.c24
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/rds/af_rds.c20
-rw-r--r--net/rds/send.c1
-rw-r--r--net/rfkill/Kconfig5
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rose/rose_dev.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_gred.c9
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c13
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_sfb.c5
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/sched/sch_teql.c31
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/input.c7
-rw-r--r--net/sctp/output.c12
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c19
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/socket.c81
-rw-r--r--net/sunrpc/auth_unix.c3
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/sched.c17
-rw-r--r--net/sunrpc/svc.c23
-rw-r--r--net/sunrpc/svc_xprt.c52
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtsock.c13
-rw-r--r--net/wanrouter/wanmain.c51
-rw-r--r--net/wireless/Kconfig11
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/nl80211.c46
-rw-r--r--net/wireless/reg.c71
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c6
-rw-r--r--net/wireless/util.c28
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--net/xfrm/xfrm_input.c5
-rw-r--r--net/xfrm/xfrm_policy.c5
-rw-r--r--net/xfrm/xfrm_replay.c6
201 files changed, 9144 insertions, 2047 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 917ecb93ea2..d265c526839 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -438,7 +438,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
438 } 438 }
439 439
440 break; 440 break;
441 441 case NETDEV_GOING_DOWN: // NETDEV_DOWN
442 case NETDEV_DOWN: 442 case NETDEV_DOWN:
443 /* Put all VLANs for this dev in the down state too. */ 443 /* Put all VLANs for this dev in the down state too. */
444 for (i = 0; i < VLAN_N_VID; i++) { 444 for (i = 0; i < VLAN_N_VID; i++) {
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index fcc684678af..27263fb1564 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -171,6 +171,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
171 if (unlikely(!skb)) 171 if (unlikely(!skb))
172 goto err_free; 172 goto err_free;
173 173
174 skb_reset_network_header(skb);
175 skb_reset_transport_header(skb);
174 return skb; 176 return skb;
175 177
176err_free: 178err_free:
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 6e82148edfc..d54845618c2 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -154,7 +154,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
154 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 154 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
155 } 155 }
156 156
157 skb_set_dev(skb, vlan_dev_info(dev)->real_dev); 157 skb->dev = vlan_dev_info(dev)->real_dev;
158 len = skb->len; 158 len = skb->len;
159 ret = dev_queue_xmit(skb); 159 ret = dev_queue_xmit(skb);
160 160
@@ -694,7 +694,7 @@ void vlan_setup(struct net_device *dev)
694 ether_setup(dev); 694 ether_setup(dev);
695 695
696 dev->priv_flags |= IFF_802_1Q_VLAN; 696 dev->priv_flags |= IFF_802_1Q_VLAN;
697 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 697 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
698 dev->tx_queue_len = 0; 698 dev->tx_queue_len = 0;
699 699
700 dev->netdev_ops = &vlan_netdev_ops; 700 dev->netdev_ops = &vlan_netdev_ops;
diff --git a/net/9p/client.c b/net/9p/client.c
index 9e3b0e640da..5532710fbfe 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -280,7 +280,8 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
280 * buffer to read the data into */ 280 * buffer to read the data into */
281 tag++; 281 tag++;
282 282
283 BUG_ON(tag >= c->max_tag); 283 if(tag >= c->max_tag)
284 return NULL;
284 285
285 row = tag / P9_ROW_MAXTAG; 286 row = tag / P9_ROW_MAXTAG;
286 col = tag % P9_ROW_MAXTAG; 287 col = tag % P9_ROW_MAXTAG;
@@ -821,8 +822,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
821 if (err) 822 if (err)
822 goto destroy_fidpool; 823 goto destroy_fidpool;
823 824
824 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 825 if (clnt->msize > clnt->trans_mod->maxsize)
825 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 826 clnt->msize = clnt->trans_mod->maxsize;
826 827
827 err = p9_client_version(clnt); 828 err = p9_client_version(clnt);
828 if (err) 829 if (err)
@@ -1249,9 +1250,11 @@ int p9_client_clunk(struct p9_fid *fid)
1249 P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid); 1250 P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
1250 1251
1251 p9_free_req(clnt, req); 1252 p9_free_req(clnt, req);
1252 p9_fid_destroy(fid);
1253
1254error: 1253error:
1254 /*
1255 * Fid is not valid even after a failed clunk
1256 */
1257 p9_fid_destroy(fid);
1255 return err; 1258 return err;
1256} 1259}
1257EXPORT_SYMBOL(p9_client_clunk); 1260EXPORT_SYMBOL(p9_client_clunk);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 244e7074218..e317583fcc7 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263{ 263{
264 int in, out, inp, outp; 264 int in, out, inp, outp;
265 struct virtio_chan *chan = client->trans; 265 struct virtio_chan *chan = client->trans;
266 char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
267 unsigned long flags; 266 unsigned long flags;
268 size_t pdata_off = 0; 267 size_t pdata_off = 0;
269 struct trans_rpage_info *rpinfo = NULL; 268 struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
346 * Arrange in such a way that server places header in the 345 * Arrange in such a way that server places header in the
347 * alloced memory and payload onto the user buffer. 346 * alloced memory and payload onto the user buffer.
348 */ 347 */
349 inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); 348 inp = pack_sg_list(chan->sg, out,
349 VIRTQUEUE_NUM, req->rc->sdata, 11);
350 /* 350 /*
351 * Running executables in the filesystem may result in 351 * Running executables in the filesystem may result in
352 * a read request with kernel buffer as opposed to user buffer. 352 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
366 } 366 }
367 in += inp; 367 in += inp;
368 } else { 368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
370 client->msize); 370 req->rc->sdata, req->rc->capacity);
371 } 371 }
372 372
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
592 .close = p9_virtio_close, 592 .close = p9_virtio_close,
593 .request = p9_virtio_request, 593 .request = p9_virtio_request,
594 .cancel = p9_virtio_cancel, 594 .cancel = p9_virtio_cancel,
595 .maxsize = PAGE_SIZE*16, 595
596 /*
597 * We leave one entry for input and one entry for response
598 * headers. We also skip one more entry to accomodate, address
599 * that are not at page boundary, that can result in an extra
600 * page in zero copy.
601 */
602 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
596 .pref = P9_TRANS_PREF_PAYLOAD_SEP, 603 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
597 .def = 0, 604 .def = 0,
598 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
diff --git a/net/Kconfig b/net/Kconfig
index 878151c772c..919cf9a8212 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -79,6 +79,20 @@ source "net/netlabel/Kconfig"
79 79
80endif # if INET 80endif # if INET
81 81
82config ANDROID_PARANOID_NETWORK
83 bool "Only allow certain groups to create sockets"
84 default y
85 help
86 none
87
88config NET_ACTIVITY_STATS
89 bool "Network activity statistics tracking"
90 default y
91 help
92 Network activity statistics are useful for tracking wireless
93 modem activity on 2G, 3G, 4G wireless networks. Counts number of
94 transmissions and groups them in specified time buckets.
95
82config NETWORK_SECMARK 96config NETWORK_SECMARK
83 bool "Security Marking" 97 bool "Security Marking"
84 help 98 help
@@ -217,7 +231,7 @@ source "net/dns_resolver/Kconfig"
217source "net/batman-adv/Kconfig" 231source "net/batman-adv/Kconfig"
218 232
219config RPS 233config RPS
220 boolean 234 boolean "RPS"
221 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 235 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
222 default y 236 default y
223 237
diff --git a/net/Makefile b/net/Makefile
index a51d9465e62..54808aba6c1 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -68,3 +68,4 @@ obj-$(CONFIG_WIMAX) += wimax/
68obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ 68obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
69obj-$(CONFIG_CEPH_LIB) += ceph/ 69obj-$(CONFIG_CEPH_LIB) += ceph/
70obj-$(CONFIG_BATMAN_ADV) += batman-adv/ 70obj-$(CONFIG_BATMAN_ADV) += batman-adv/
71obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 00000000000..8a3e9347006
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,115 @@
1/* net/activity_stats.c
2 *
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Author: Mike Chan (mike@android.com)
15 */
16
17#include <linux/proc_fs.h>
18#include <linux/suspend.h>
19#include <net/net_namespace.h>
20
21/*
22 * Track transmission rates in buckets (power of 2).
23 * 1,2,4,8...512 seconds.
24 *
25 * Buckets represent the count of network transmissions at least
26 * N seconds apart, where N is 1 << bucket index.
27 */
28#define BUCKET_MAX 10
29
30/* Track network activity frequency */
31static unsigned long activity_stats[BUCKET_MAX];
32static ktime_t last_transmit;
33static ktime_t suspend_time;
34static DEFINE_SPINLOCK(activity_lock);
35
36void activity_stats_update(void)
37{
38 int i;
39 unsigned long flags;
40 ktime_t now;
41 s64 delta;
42
43 spin_lock_irqsave(&activity_lock, flags);
44 now = ktime_get();
45 delta = ktime_to_ns(ktime_sub(now, last_transmit));
46
47 for (i = BUCKET_MAX - 1; i >= 0; i--) {
48 /*
49 * Check if the time delta between network activity is within the
50 * minimum bucket range.
51 */
52 if (delta < (1000000000ULL << i))
53 continue;
54
55 activity_stats[i]++;
56 last_transmit = now;
57 break;
58 }
59 spin_unlock_irqrestore(&activity_lock, flags);
60}
61
62static int activity_stats_read_proc(char *page, char **start, off_t off,
63 int count, int *eof, void *data)
64{
65 int i;
66 int len;
67 char *p = page;
68
69 /* Only print if offset is 0, or we have enough buffer space */
70 if (off || count < (30 * BUCKET_MAX + 22))
71 return -ENOMEM;
72
73 len = snprintf(p, count, "Min Bucket(sec) Count\n");
74 count -= len;
75 p += len;
76
77 for (i = 0; i < BUCKET_MAX; i++) {
78 len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]);
79 count -= len;
80 p += len;
81 }
82 *eof = 1;
83
84 return p - page;
85}
86
87static int activity_stats_notifier(struct notifier_block *nb,
88 unsigned long event, void *dummy)
89{
90 switch (event) {
91 case PM_SUSPEND_PREPARE:
92 suspend_time = ktime_get_real();
93 break;
94
95 case PM_POST_SUSPEND:
96 suspend_time = ktime_sub(ktime_get_real(), suspend_time);
97 last_transmit = ktime_sub(last_transmit, suspend_time);
98 }
99
100 return 0;
101}
102
103static struct notifier_block activity_stats_notifier_block = {
104 .notifier_call = activity_stats_notifier,
105};
106
107static int __init activity_stats_init(void)
108{
109 create_proc_read_entry("activity", S_IRUGO,
110 init_net.proc_net_stat, activity_stats_read_proc, NULL);
111 return register_pm_notifier(&activity_stats_notifier_block);
112}
113
114subsys_initcall(activity_stats_init);
115
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 2252c2085da..d07223c834a 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -242,8 +242,6 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
242 if (brdev->payload == p_bridged) { 242 if (brdev->payload == p_bridged) {
243 skb_push(skb, 2); 243 skb_push(skb, 2);
244 memset(skb->data, 0, 2); 244 memset(skb->data, 0, 2);
245 } else { /* p_routed */
246 skb_pull(skb, ETH_HLEN);
247 } 245 }
248 } 246 }
249 skb_debug(skb); 247 skb_debug(skb);
@@ -560,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
560 spin_unlock_irqrestore(&rq->lock, flags); 558 spin_unlock_irqrestore(&rq->lock, flags);
561 559
562 skb_queue_walk_safe(&queue, skb, tmp) { 560 skb_queue_walk_safe(&queue, skb, tmp) {
563 struct net_device *dev = skb->dev; 561 struct net_device *dev;
562
563 br2684_push(atmvcc, skb);
564 dev = skb->dev;
564 565
565 dev->stats.rx_bytes -= skb->len; 566 dev->stats.rx_bytes -= skb->len;
566 dev->stats.rx_packets--; 567 dev->stats.rx_packets--;
567
568 br2684_push(atmvcc, skb);
569 } 568 }
570 569
571 /* initialize netdev carrier state */ 570 /* initialize netdev carrier state */
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 1d4be60e139..5889074e971 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -364,33 +364,37 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
364 struct net_device *dev) 364 struct net_device *dev)
365{ 365{
366 struct clip_priv *clip_priv = PRIV(dev); 366 struct clip_priv *clip_priv = PRIV(dev);
367 struct dst_entry *dst = skb_dst(skb);
367 struct atmarp_entry *entry; 368 struct atmarp_entry *entry;
369 struct neighbour *n;
368 struct atm_vcc *vcc; 370 struct atm_vcc *vcc;
369 int old; 371 int old;
370 unsigned long flags; 372 unsigned long flags;
371 373
372 pr_debug("(skb %p)\n", skb); 374 pr_debug("(skb %p)\n", skb);
373 if (!skb_dst(skb)) { 375 if (!dst) {
374 pr_err("skb_dst(skb) == NULL\n"); 376 pr_err("skb_dst(skb) == NULL\n");
375 dev_kfree_skb(skb); 377 dev_kfree_skb(skb);
376 dev->stats.tx_dropped++; 378 dev->stats.tx_dropped++;
377 return NETDEV_TX_OK; 379 return NETDEV_TX_OK;
378 } 380 }
379 if (!skb_dst(skb)->neighbour) { 381 n = dst_get_neighbour(dst);
382 if (!n) {
380#if 0 383#if 0
381 skb_dst(skb)->neighbour = clip_find_neighbour(skb_dst(skb), 1); 384 n = clip_find_neighbour(skb_dst(skb), 1);
382 if (!skb_dst(skb)->neighbour) { 385 if (!n) {
383 dev_kfree_skb(skb); /* lost that one */ 386 dev_kfree_skb(skb); /* lost that one */
384 dev->stats.tx_dropped++; 387 dev->stats.tx_dropped++;
385 return 0; 388 return 0;
386 } 389 }
390 dst_set_neighbour(dst, n);
387#endif 391#endif
388 pr_err("NO NEIGHBOUR !\n"); 392 pr_err("NO NEIGHBOUR !\n");
389 dev_kfree_skb(skb); 393 dev_kfree_skb(skb);
390 dev->stats.tx_dropped++; 394 dev->stats.tx_dropped++;
391 return NETDEV_TX_OK; 395 return NETDEV_TX_OK;
392 } 396 }
393 entry = NEIGH2ENTRY(skb_dst(skb)->neighbour); 397 entry = NEIGH2ENTRY(n);
394 if (!entry->vccs) { 398 if (!entry->vccs) {
395 if (time_after(jiffies, entry->expires)) { 399 if (time_after(jiffies, entry->expires)) {
396 /* should be resolved */ 400 /* should be resolved */
@@ -407,7 +411,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
407 } 411 }
408 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); 412 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
409 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 413 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
410 pr_debug("using neighbour %p, vcc %p\n", skb_dst(skb)->neighbour, vcc); 414 pr_debug("using neighbour %p, vcc %p\n", n, vcc);
411 if (entry->vccs->encap) { 415 if (entry->vccs->encap) {
412 void *here; 416 void *here;
413 417
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index e7c69f4619e..b04a6ef4da9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -2006,16 +2006,17 @@ static void __exit ax25_exit(void)
2006 proc_net_remove(&init_net, "ax25_route"); 2006 proc_net_remove(&init_net, "ax25_route");
2007 proc_net_remove(&init_net, "ax25"); 2007 proc_net_remove(&init_net, "ax25");
2008 proc_net_remove(&init_net, "ax25_calls"); 2008 proc_net_remove(&init_net, "ax25_calls");
2009 ax25_rt_free();
2010 ax25_uid_free();
2011 ax25_dev_free();
2012 2009
2013 ax25_unregister_sysctl();
2014 unregister_netdevice_notifier(&ax25_dev_notifier); 2010 unregister_netdevice_notifier(&ax25_dev_notifier);
2011 ax25_unregister_sysctl();
2015 2012
2016 dev_remove_pack(&ax25_packet_type); 2013 dev_remove_pack(&ax25_packet_type);
2017 2014
2018 sock_unregister(PF_AX25); 2015 sock_unregister(PF_AX25);
2019 proto_unregister(&ax25_proto); 2016 proto_unregister(&ax25_proto);
2017
2018 ax25_rt_free();
2019 ax25_uid_free();
2020 ax25_dev_free();
2020} 2021}
2021module_exit(ax25_exit); 2022module_exit(ax25_exit);
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 6ae5ec50858..bfb3dc03c9d 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,6 +6,7 @@ menuconfig BT
6 tristate "Bluetooth subsystem support" 6 tristate "Bluetooth subsystem support"
7 depends on NET && !S390 7 depends on NET && !S390
8 depends on RFKILL || !RFKILL 8 depends on RFKILL || !RFKILL
9 select CRYPTO
9 help 10 help
10 Bluetooth is low-cost, low-power, short-range wireless technology. 11 Bluetooth is low-cost, low-power, short-range wireless technology.
11 It was designed as a replacement for cables and other short-range 12 It was designed as a replacement for cables and other short-range
@@ -22,6 +23,7 @@ menuconfig BT
22 BNEP Module (Bluetooth Network Encapsulation Protocol) 23 BNEP Module (Bluetooth Network Encapsulation Protocol)
23 CMTP Module (CAPI Message Transport Protocol) 24 CMTP Module (CAPI Message Transport Protocol)
24 HIDP Module (Human Interface Device Protocol) 25 HIDP Module (Human Interface Device Protocol)
26 SMP Module (Security Manager Protocol)
25 27
26 Say Y here to compile Bluetooth support into the kernel or say M to 28 Say Y here to compile Bluetooth support into the kernel or say M to
27 compile it as module (bluetooth). 29 compile it as module (bluetooth).
@@ -36,11 +38,18 @@ if BT != n
36config BT_L2CAP 38config BT_L2CAP
37 bool "L2CAP protocol support" 39 bool "L2CAP protocol support"
38 select CRC16 40 select CRC16
41 select CRYPTO
42 select CRYPTO_BLKCIPHER
43 select CRYPTO_AES
44 select CRYPTO_ECB
39 help 45 help
40 L2CAP (Logical Link Control and Adaptation Protocol) provides 46 L2CAP (Logical Link Control and Adaptation Protocol) provides
41 connection oriented and connection-less data transport. L2CAP 47 connection oriented and connection-less data transport. L2CAP
42 support is required for most Bluetooth applications. 48 support is required for most Bluetooth applications.
43 49
50 Also included is support for SMP (Security Manager Protocol) which
51 is the security layer on top of LE (Low Energy) links.
52
44config BT_SCO 53config BT_SCO
45 bool "SCO links support" 54 bool "SCO links support"
46 help 55 help
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index f04fe9a9d63..9b67f3d08fa 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,5 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o 12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o 13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 8add9b49991..7c73a10d7ed 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,6 +40,15 @@
40 40
41#include <net/bluetooth/bluetooth.h> 41#include <net/bluetooth/bluetooth.h>
42 42
43#ifdef CONFIG_ANDROID_PARANOID_NETWORK
44#include <linux/android_aid.h>
45#endif
46
47#ifndef CONFIG_BT_SOCK_DEBUG
48#undef BT_DBG
49#define BT_DBG(D...)
50#endif
51
43#define VERSION "2.16" 52#define VERSION "2.16"
44 53
45/* Bluetooth sockets */ 54/* Bluetooth sockets */
@@ -125,11 +134,40 @@ int bt_sock_unregister(int proto)
125} 134}
126EXPORT_SYMBOL(bt_sock_unregister); 135EXPORT_SYMBOL(bt_sock_unregister);
127 136
137#ifdef CONFIG_ANDROID_PARANOID_NETWORK
138static inline int current_has_bt_admin(void)
139{
140 return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN));
141}
142
143static inline int current_has_bt(void)
144{
145 return (current_has_bt_admin() || in_egroup_p(AID_NET_BT));
146}
147# else
148static inline int current_has_bt_admin(void)
149{
150 return 1;
151}
152
153static inline int current_has_bt(void)
154{
155 return 1;
156}
157#endif
158
128static int bt_sock_create(struct net *net, struct socket *sock, int proto, 159static int bt_sock_create(struct net *net, struct socket *sock, int proto,
129 int kern) 160 int kern)
130{ 161{
131 int err; 162 int err;
132 163
164 if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
165 proto == BTPROTO_L2CAP) {
166 if (!current_has_bt())
167 return -EPERM;
168 } else if (!current_has_bt_admin())
169 return -EPERM;
170
133 if (net != &init_net) 171 if (net != &init_net)
134 return -EAFNOSUPPORT; 172 return -EAFNOSUPPORT;
135 173
@@ -494,9 +532,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
494 BT_DBG("sk %p", sk); 532 BT_DBG("sk %p", sk);
495 533
496 add_wait_queue(sk_sleep(sk), &wait); 534 add_wait_queue(sk_sleep(sk), &wait);
535 set_current_state(TASK_INTERRUPTIBLE);
497 while (sk->sk_state != state) { 536 while (sk->sk_state != state) {
498 set_current_state(TASK_INTERRUPTIBLE);
499
500 if (!timeo) { 537 if (!timeo) {
501 err = -EINPROGRESS; 538 err = -EINPROGRESS;
502 break; 539 break;
@@ -510,12 +547,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
510 release_sock(sk); 547 release_sock(sk);
511 timeo = schedule_timeout(timeo); 548 timeo = schedule_timeout(timeo);
512 lock_sock(sk); 549 lock_sock(sk);
550 set_current_state(TASK_INTERRUPTIBLE);
513 551
514 err = sock_error(sk); 552 err = sock_error(sk);
515 if (err) 553 if (err)
516 break; 554 break;
517 } 555 }
518 set_current_state(TASK_RUNNING); 556 __set_current_state(TASK_RUNNING);
519 remove_wait_queue(sk_sleep(sk), &wait); 557 remove_wait_queue(sk_sleep(sk), &wait);
520 return err; 558 return err;
521} 559}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 8e6c06158f8..e7ee5314f39 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -155,6 +155,7 @@ struct bnep_session {
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t terminate;
158 struct task_struct *task; 159 struct task_struct *task;
159 160
160 struct ethhdr eh; 161 struct ethhdr eh;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ca39fcf010c..d9edfe8bf9d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,9 +484,11 @@ static int bnep_session(void *arg)
484 484
485 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
486 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
487 while (!kthread_should_stop()) { 487 while (1) {
488 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
489 489
490 if (atomic_read(&s->terminate))
491 break;
490 /* RX */ 492 /* RX */
491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
492 skb_orphan(skb); 494 skb_orphan(skb);
@@ -504,7 +506,7 @@ static int bnep_session(void *arg)
504 506
505 schedule(); 507 schedule();
506 } 508 }
507 set_current_state(TASK_RUNNING); 509 __set_current_state(TASK_RUNNING);
508 remove_wait_queue(sk_sleep(sk), &wait); 510 remove_wait_queue(sk_sleep(sk), &wait);
509 511
510 /* Cleanup session */ 512 /* Cleanup session */
@@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req)
640 down_read(&bnep_session_sem); 642 down_read(&bnep_session_sem);
641 643
642 s = __bnep_get_session(req->dst); 644 s = __bnep_get_session(req->dst);
643 if (s) 645 if (s) {
644 kthread_stop(s->task); 646 atomic_inc(&s->terminate);
645 else 647 wake_up_process(s->task);
648 } else
646 err = -ENOENT; 649 err = -ENOENT;
647 650
648 up_read(&bnep_session_sem); 651 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 8c100c9dae2..d4f5dff7c95 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev)
231 dev->addr_len = ETH_ALEN; 231 dev->addr_len = ETH_ALEN;
232 232
233 ether_setup(dev); 233 ether_setup(dev);
234 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
234 dev->netdev_ops = &bnep_netdev_ops; 235 dev->netdev_ops = &bnep_netdev_ops;
235 236
236 dev->watchdog_timeo = HZ * 2; 237 dev->watchdog_timeo = HZ * 2;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 744233cba24..040f67b1297 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -326,7 +326,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
326{ 326{
327 struct capi_ctr *ctrl = &session->ctrl; 327 struct capi_ctr *ctrl = &session->ctrl;
328 struct cmtp_application *application; 328 struct cmtp_application *application;
329 __u16 cmd, appl; 329 __u16 appl;
330 __u32 contr; 330 __u32 contr;
331 331
332 BT_DBG("session %p skb %p len %d", session, skb, skb->len); 332 BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,7 +344,6 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
344 return; 344 return;
345 } 345 }
346 346
347 cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
348 appl = CAPIMSG_APPID(skb->data); 347 appl = CAPIMSG_APPID(skb->data);
349 contr = CAPIMSG_CONTROL(skb->data); 348 contr = CAPIMSG_CONTROL(skb->data);
350 349
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index bcd158f40bb..33c4e0cd83b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -53,11 +53,13 @@ static void hci_le_connect(struct hci_conn *conn)
53 conn->state = BT_CONNECT; 53 conn->state = BT_CONNECT;
54 conn->out = 1; 54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER; 55 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW;
56 57
57 memset(&cp, 0, sizeof(cp)); 58 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004); 59 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004); 60 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst); 61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0008); 63 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100); 64 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064); 65 cp.supervision_timeout = cpu_to_le16(0x0064);
@@ -203,6 +205,55 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
203} 205}
204EXPORT_SYMBOL(hci_le_conn_update); 206EXPORT_SYMBOL(hci_le_conn_update);
205 207
208void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 __u8 ltk[16])
210{
211 struct hci_dev *hdev = conn->hdev;
212 struct hci_cp_le_start_enc cp;
213
214 BT_DBG("%p", conn);
215
216 memset(&cp, 0, sizeof(cp));
217
218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(rand));
222
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224}
225EXPORT_SYMBOL(hci_le_start_enc);
226
227void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
228{
229 struct hci_dev *hdev = conn->hdev;
230 struct hci_cp_le_ltk_reply cp;
231
232 BT_DBG("%p", conn);
233
234 memset(&cp, 0, sizeof(cp));
235
236 cp.handle = cpu_to_le16(conn->handle);
237 memcpy(cp.ltk, ltk, sizeof(ltk));
238
239 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
240}
241EXPORT_SYMBOL(hci_le_ltk_reply);
242
243void hci_le_ltk_neg_reply(struct hci_conn *conn)
244{
245 struct hci_dev *hdev = conn->hdev;
246 struct hci_cp_le_ltk_neg_reply cp;
247
248 BT_DBG("%p", conn);
249
250 memset(&cp, 0, sizeof(cp));
251
252 cp.handle = cpu_to_le16(conn->handle);
253
254 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
255}
256
206/* Device _must_ be locked */ 257/* Device _must_ be locked */
207void hci_sco_setup(struct hci_conn *conn, __u8 status) 258void hci_sco_setup(struct hci_conn *conn, __u8 status)
208{ 259{
@@ -282,7 +333,8 @@ static void hci_conn_auto_accept(unsigned long arg)
282 hci_dev_unlock(hdev); 333 hci_dev_unlock(hdev);
283} 334}
284 335
285struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 336struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
337 __u16 pkt_type, bdaddr_t *dst)
286{ 338{
287 struct hci_conn *conn; 339 struct hci_conn *conn;
288 340
@@ -310,14 +362,22 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
310 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 362 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
311 break; 363 break;
312 case SCO_LINK: 364 case SCO_LINK:
313 if (lmp_esco_capable(hdev)) 365 if (!pkt_type)
314 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 366 pkt_type = SCO_ESCO_MASK;
315 (hdev->esco_type & EDR_ESCO_MASK);
316 else
317 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
318 break;
319 case ESCO_LINK: 367 case ESCO_LINK:
320 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; 368 if (!pkt_type)
369 pkt_type = ALL_ESCO_MASK;
370 if (lmp_esco_capable(hdev)) {
371 /* HCI Setup Synchronous Connection Command uses
372 reverse logic on the EDR_ESCO_MASK bits */
373 conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
374 hdev->esco_type;
375 } else {
376 /* Legacy HCI Add Sco Connection Command uses a
377 shifted bitmask */
378 conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
379 SCO_PTYPE_MASK;
380 }
321 break; 381 break;
322 } 382 }
323 383
@@ -441,7 +501,9 @@ EXPORT_SYMBOL(hci_get_route);
441 501
442/* Create SCO, ACL or LE connection. 502/* Create SCO, ACL or LE connection.
443 * Device _must_ be locked */ 503 * Device _must_ be locked */
444struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 504struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
505 __u16 pkt_type, bdaddr_t *dst,
506 __u8 sec_level, __u8 auth_type)
445{ 507{
446 struct hci_conn *acl; 508 struct hci_conn *acl;
447 struct hci_conn *sco; 509 struct hci_conn *sco;
@@ -450,14 +512,23 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
450 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 512 BT_DBG("%s dst %s", hdev->name, batostr(dst));
451 513
452 if (type == LE_LINK) { 514 if (type == LE_LINK) {
515 struct adv_entry *entry;
516
453 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 517 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
454 if (le) 518 if (le)
455 return ERR_PTR(-EBUSY); 519 return ERR_PTR(-EBUSY);
456 le = hci_conn_add(hdev, LE_LINK, dst); 520
521 entry = hci_find_adv_entry(hdev, dst);
522 if (!entry)
523 return ERR_PTR(-EHOSTUNREACH);
524
525 le = hci_conn_add(hdev, LE_LINK, 0, dst);
457 if (!le) 526 if (!le)
458 return ERR_PTR(-ENOMEM); 527 return ERR_PTR(-ENOMEM);
459 if (le->state == BT_OPEN) 528
460 hci_le_connect(le); 529 le->dst_type = entry->bdaddr_type;
530
531 hci_le_connect(le);
461 532
462 hci_conn_hold(le); 533 hci_conn_hold(le);
463 534
@@ -466,7 +537,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
466 537
467 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 538 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
468 if (!acl) { 539 if (!acl) {
469 acl = hci_conn_add(hdev, ACL_LINK, dst); 540 acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
470 if (!acl) 541 if (!acl)
471 return NULL; 542 return NULL;
472 } 543 }
@@ -485,7 +556,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
485 556
486 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 557 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
487 if (!sco) { 558 if (!sco) {
488 sco = hci_conn_add(hdev, type, dst); 559 sco = hci_conn_add(hdev, type, pkt_type, dst);
489 if (!sco) { 560 if (!sco) {
490 hci_conn_put(acl); 561 hci_conn_put(acl);
491 return NULL; 562 return NULL;
@@ -500,7 +571,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
500 if (acl->state == BT_CONNECTED && 571 if (acl->state == BT_CONNECTED &&
501 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 572 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
502 acl->power_save = 1; 573 acl->power_save = 1;
503 hci_conn_enter_active_mode(acl); 574 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
504 575
505 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { 576 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
506 /* defer SCO setup until mode change completed */ 577 /* defer SCO setup until mode change completed */
@@ -548,9 +619,15 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
548 619
549 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 620 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
550 struct hci_cp_auth_requested cp; 621 struct hci_cp_auth_requested cp;
622
623 /* encrypt must be pending if auth is also pending */
624 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
625
551 cp.handle = cpu_to_le16(conn->handle); 626 cp.handle = cpu_to_le16(conn->handle);
552 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 627 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
553 sizeof(cp), &cp); 628 sizeof(cp), &cp);
629 if (conn->key_type != 0xff)
630 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
554 } 631 }
555 632
556 return 0; 633 return 0;
@@ -634,9 +711,7 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
634 if (sec_level != BT_SECURITY_HIGH) 711 if (sec_level != BT_SECURITY_HIGH)
635 return 1; /* Accept if non-secure is required */ 712 return 1; /* Accept if non-secure is required */
636 713
637 if (conn->key_type == HCI_LK_AUTH_COMBINATION || 714 if (conn->sec_level == BT_SECURITY_HIGH)
638 (conn->key_type == HCI_LK_COMBINATION &&
639 conn->pin_length == 16))
640 return 1; 715 return 1;
641 716
642 return 0; /* Reject not secure link */ 717 return 0; /* Reject not secure link */
@@ -679,7 +754,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
679EXPORT_SYMBOL(hci_conn_switch_role); 754EXPORT_SYMBOL(hci_conn_switch_role);
680 755
681/* Enter active mode */ 756/* Enter active mode */
682void hci_conn_enter_active_mode(struct hci_conn *conn) 757void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
683{ 758{
684 struct hci_dev *hdev = conn->hdev; 759 struct hci_dev *hdev = conn->hdev;
685 760
@@ -688,7 +763,10 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
688 if (test_bit(HCI_RAW, &hdev->flags)) 763 if (test_bit(HCI_RAW, &hdev->flags))
689 return; 764 return;
690 765
691 if (conn->mode != HCI_CM_SNIFF || !conn->power_save) 766 if (conn->mode != HCI_CM_SNIFF)
767 goto timer;
768
769 if (!conn->power_save && !force_active)
692 goto timer; 770 goto timer;
693 771
694 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 772 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
@@ -829,6 +907,15 @@ int hci_get_conn_list(void __user *arg)
829 (ci + n)->out = c->out; 907 (ci + n)->out = c->out;
830 (ci + n)->state = c->state; 908 (ci + n)->state = c->state;
831 (ci + n)->link_mode = c->link_mode; 909 (ci + n)->link_mode = c->link_mode;
910 if (c->type == SCO_LINK) {
911 (ci + n)->mtu = hdev->sco_mtu;
912 (ci + n)->cnt = hdev->sco_cnt;
913 (ci + n)->pkts = hdev->sco_pkts;
914 } else {
915 (ci + n)->mtu = hdev->acl_mtu;
916 (ci + n)->cnt = hdev->acl_cnt;
917 (ci + n)->pkts = hdev->acl_pkts;
918 }
832 if (++n >= req.conn_num) 919 if (++n >= req.conn_num)
833 break; 920 break;
834 } 921 }
@@ -865,6 +952,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
865 ci.out = conn->out; 952 ci.out = conn->out;
866 ci.state = conn->state; 953 ci.state = conn->state;
867 ci.link_mode = conn->link_mode; 954 ci.link_mode = conn->link_mode;
955 if (req.type == SCO_LINK) {
956 ci.mtu = hdev->sco_mtu;
957 ci.cnt = hdev->sco_cnt;
958 ci.pkts = hdev->sco_pkts;
959 } else {
960 ci.mtu = hdev->acl_mtu;
961 ci.cnt = hdev->acl_cnt;
962 ci.pkts = hdev->acl_pkts;
963 }
868 } 964 }
869 hci_dev_unlock_bh(hdev); 965 hci_dev_unlock_bh(hdev);
870 966
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 815269b07f2..f38e633c754 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -42,6 +42,7 @@
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/crypto.h>
45#include <net/sock.h> 46#include <net/sock.h>
46 47
47#include <asm/system.h> 48#include <asm/system.h>
@@ -145,7 +146,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
145 146
146 switch (hdev->req_status) { 147 switch (hdev->req_status) {
147 case HCI_REQ_DONE: 148 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result); 149 err = -bt_to_errno(hdev->req_result);
149 break; 150 break;
150 151
151 case HCI_REQ_CANCELED: 152 case HCI_REQ_CANCELED:
@@ -509,6 +510,11 @@ int hci_dev_open(__u16 dev)
509 510
510 hci_req_lock(hdev); 511 hci_req_lock(hdev);
511 512
513 if (test_bit(HCI_UNREGISTER, &hdev->flags)) {
514 ret = -ENODEV;
515 goto done;
516 }
517
512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 518 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513 ret = -ERFKILL; 519 ret = -ERFKILL;
514 goto done; 520 goto done;
@@ -539,7 +545,7 @@ int hci_dev_open(__u16 dev)
539 ret = __hci_request(hdev, hci_init_req, 0, 545 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 546 msecs_to_jiffies(HCI_INIT_TIMEOUT));
541 547
542 if (lmp_le_capable(hdev)) 548 if (lmp_host_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0, 549 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 550 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545 551
@@ -1056,6 +1062,42 @@ static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1056 return 0; 1062 return 0;
1057} 1063}
1058 1064
1065struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1066{
1067 struct link_key *k;
1068
1069 list_for_each_entry(k, &hdev->link_keys, list) {
1070 struct key_master_id *id;
1071
1072 if (k->type != HCI_LK_SMP_LTK)
1073 continue;
1074
1075 if (k->dlen != sizeof(*id))
1076 continue;
1077
1078 id = (void *) &k->data;
1079 if (id->ediv == ediv &&
1080 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1081 return k;
1082 }
1083
1084 return NULL;
1085}
1086EXPORT_SYMBOL(hci_find_ltk);
1087
1088struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1089 bdaddr_t *bdaddr, u8 type)
1090{
1091 struct link_key *k;
1092
1093 list_for_each_entry(k, &hdev->link_keys, list)
1094 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1095 return k;
1096
1097 return NULL;
1098}
1099EXPORT_SYMBOL(hci_find_link_key_type);
1100
1059int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1101int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1102 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1061{ 1103{
@@ -1111,6 +1153,44 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1111 return 0; 1153 return 0;
1112} 1154}
1113 1155
1156int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1157 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1158{
1159 struct link_key *key, *old_key;
1160 struct key_master_id *id;
1161 u8 old_key_type;
1162
1163 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1164
1165 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1166 if (old_key) {
1167 key = old_key;
1168 old_key_type = old_key->type;
1169 } else {
1170 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1171 if (!key)
1172 return -ENOMEM;
1173 list_add(&key->list, &hdev->link_keys);
1174 old_key_type = 0xff;
1175 }
1176
1177 key->dlen = sizeof(*id);
1178
1179 bacpy(&key->bdaddr, bdaddr);
1180 memcpy(key->val, ltk, sizeof(key->val));
1181 key->type = HCI_LK_SMP_LTK;
1182 key->pin_len = key_size;
1183
1184 id = (void *) &key->data;
1185 id->ediv = ediv;
1186 memcpy(id->rand, rand, sizeof(id->rand));
1187
1188 if (new_key)
1189 mgmt_new_key(hdev->id, key, old_key_type);
1190
1191 return 0;
1192}
1193
1114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1194int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1115{ 1195{
1116 struct link_key *key; 1196 struct link_key *key;
@@ -1134,7 +1214,6 @@ static void hci_cmd_timer(unsigned long arg)
1134 1214
1135 BT_ERR("%s command tx timeout", hdev->name); 1215 BT_ERR("%s command tx timeout", hdev->name);
1136 atomic_set(&hdev->cmd_cnt, 1); 1216 atomic_set(&hdev->cmd_cnt, 1);
1137 clear_bit(HCI_RESET, &hdev->flags);
1138 tasklet_schedule(&hdev->cmd_task); 1217 tasklet_schedule(&hdev->cmd_task);
1139} 1218}
1140 1219
@@ -1202,6 +1281,169 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1202 return 0; 1281 return 0;
1203} 1282}
1204 1283
1284struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1285 bdaddr_t *bdaddr)
1286{
1287 struct list_head *p;
1288
1289 list_for_each(p, &hdev->blacklist) {
1290 struct bdaddr_list *b;
1291
1292 b = list_entry(p, struct bdaddr_list, list);
1293
1294 if (bacmp(bdaddr, &b->bdaddr) == 0)
1295 return b;
1296 }
1297
1298 return NULL;
1299}
1300
1301int hci_blacklist_clear(struct hci_dev *hdev)
1302{
1303 struct list_head *p, *n;
1304
1305 list_for_each_safe(p, n, &hdev->blacklist) {
1306 struct bdaddr_list *b;
1307
1308 b = list_entry(p, struct bdaddr_list, list);
1309
1310 list_del(p);
1311 kfree(b);
1312 }
1313
1314 return 0;
1315}
1316
1317int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1318{
1319 struct bdaddr_list *entry;
1320 int err;
1321
1322 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1323 return -EBADF;
1324
1325 hci_dev_lock_bh(hdev);
1326
1327 if (hci_blacklist_lookup(hdev, bdaddr)) {
1328 err = -EEXIST;
1329 goto err;
1330 }
1331
1332 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1333 if (!entry) {
1334 return -ENOMEM;
1335 goto err;
1336 }
1337
1338 bacpy(&entry->bdaddr, bdaddr);
1339
1340 list_add(&entry->list, &hdev->blacklist);
1341
1342 err = 0;
1343
1344err:
1345 hci_dev_unlock_bh(hdev);
1346 return err;
1347}
1348
1349int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351 struct bdaddr_list *entry;
1352 int err = 0;
1353
1354 hci_dev_lock_bh(hdev);
1355
1356 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1357 hci_blacklist_clear(hdev);
1358 goto done;
1359 }
1360
1361 entry = hci_blacklist_lookup(hdev, bdaddr);
1362 if (!entry) {
1363 err = -ENOENT;
1364 goto done;
1365 }
1366
1367 list_del(&entry->list);
1368 kfree(entry);
1369
1370done:
1371 hci_dev_unlock_bh(hdev);
1372 return err;
1373}
1374
1375static void hci_clear_adv_cache(unsigned long arg)
1376{
1377 struct hci_dev *hdev = (void *) arg;
1378
1379 hci_dev_lock(hdev);
1380
1381 hci_adv_entries_clear(hdev);
1382
1383 hci_dev_unlock(hdev);
1384}
1385
1386int hci_adv_entries_clear(struct hci_dev *hdev)
1387{
1388 struct adv_entry *entry, *tmp;
1389
1390 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1391 list_del(&entry->list);
1392 kfree(entry);
1393 }
1394
1395 BT_DBG("%s adv cache cleared", hdev->name);
1396
1397 return 0;
1398}
1399
1400struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1401{
1402 struct adv_entry *entry;
1403
1404 list_for_each_entry(entry, &hdev->adv_entries, list)
1405 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1406 return entry;
1407
1408 return NULL;
1409}
1410
1411static inline int is_connectable_adv(u8 evt_type)
1412{
1413 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1414 return 1;
1415
1416 return 0;
1417}
1418
1419int hci_add_adv_entry(struct hci_dev *hdev,
1420 struct hci_ev_le_advertising_info *ev)
1421{
1422 struct adv_entry *entry;
1423
1424 if (!is_connectable_adv(ev->evt_type))
1425 return -EINVAL;
1426
1427 /* Only new entries should be added to adv_entries. So, if
1428 * bdaddr was found, don't add it. */
1429 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1430 return 0;
1431
1432 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1433 if (!entry)
1434 return -ENOMEM;
1435
1436 bacpy(&entry->bdaddr, &ev->bdaddr);
1437 entry->bdaddr_type = ev->bdaddr_type;
1438
1439 list_add(&entry->list, &hdev->adv_entries);
1440
1441 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1442 batostr(&entry->bdaddr), entry->bdaddr_type);
1443
1444 return 0;
1445}
1446
1205/* Register HCI device */ 1447/* Register HCI device */
1206int hci_register_dev(struct hci_dev *hdev) 1448int hci_register_dev(struct hci_dev *hdev)
1207{ 1449{
@@ -1268,6 +1510,10 @@ int hci_register_dev(struct hci_dev *hdev)
1268 1510
1269 INIT_LIST_HEAD(&hdev->remote_oob_data); 1511 INIT_LIST_HEAD(&hdev->remote_oob_data);
1270 1512
1513 INIT_LIST_HEAD(&hdev->adv_entries);
1514 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1515 (unsigned long) hdev);
1516
1271 INIT_WORK(&hdev->power_on, hci_power_on); 1517 INIT_WORK(&hdev->power_on, hci_power_on);
1272 INIT_WORK(&hdev->power_off, hci_power_off); 1518 INIT_WORK(&hdev->power_off, hci_power_off);
1273 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1519 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1282,6 +1528,11 @@ int hci_register_dev(struct hci_dev *hdev)
1282 if (!hdev->workqueue) 1528 if (!hdev->workqueue)
1283 goto nomem; 1529 goto nomem;
1284 1530
1531 hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1532 if (IS_ERR(hdev->tfm))
1533 BT_INFO("Failed to load transform for ecb(aes): %ld",
1534 PTR_ERR(hdev->tfm));
1535
1285 hci_register_sysfs(hdev); 1536 hci_register_sysfs(hdev);
1286 1537
1287 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1538 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1317,6 +1568,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
1317 1568
1318 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1569 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1319 1570
1571 set_bit(HCI_UNREGISTER, &hdev->flags);
1572
1320 write_lock_bh(&hci_dev_list_lock); 1573 write_lock_bh(&hci_dev_list_lock);
1321 list_del(&hdev->list); 1574 list_del(&hdev->list);
1322 write_unlock_bh(&hci_dev_list_lock); 1575 write_unlock_bh(&hci_dev_list_lock);
@@ -1330,6 +1583,9 @@ int hci_unregister_dev(struct hci_dev *hdev)
1330 !test_bit(HCI_SETUP, &hdev->flags)) 1583 !test_bit(HCI_SETUP, &hdev->flags))
1331 mgmt_index_removed(hdev->id); 1584 mgmt_index_removed(hdev->id);
1332 1585
1586 if (!IS_ERR(hdev->tfm))
1587 crypto_free_blkcipher(hdev->tfm);
1588
1333 hci_notify(hdev, HCI_DEV_UNREG); 1589 hci_notify(hdev, HCI_DEV_UNREG);
1334 1590
1335 if (hdev->rfkill) { 1591 if (hdev->rfkill) {
@@ -1340,6 +1596,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1340 hci_unregister_sysfs(hdev); 1596 hci_unregister_sysfs(hdev);
1341 1597
1342 hci_del_off_timer(hdev); 1598 hci_del_off_timer(hdev);
1599 del_timer(&hdev->adv_timer);
1343 1600
1344 destroy_workqueue(hdev->workqueue); 1601 destroy_workqueue(hdev->workqueue);
1345 1602
@@ -1348,6 +1605,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1348 hci_uuids_clear(hdev); 1605 hci_uuids_clear(hdev);
1349 hci_link_keys_clear(hdev); 1606 hci_link_keys_clear(hdev);
1350 hci_remote_oob_data_clear(hdev); 1607 hci_remote_oob_data_clear(hdev);
1608 hci_adv_entries_clear(hdev);
1351 hci_dev_unlock_bh(hdev); 1609 hci_dev_unlock_bh(hdev);
1352 1610
1353 __hci_dev_put(hdev); 1611 __hci_dev_put(hdev);
@@ -1891,7 +2149,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1891 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2149 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1892 BT_DBG("skb %p len %d", skb, skb->len); 2150 BT_DBG("skb %p len %d", skb, skb->len);
1893 2151
1894 hci_conn_enter_active_mode(conn); 2152 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
1895 2153
1896 hci_send_frame(skb); 2154 hci_send_frame(skb);
1897 hdev->acl_last_tx = jiffies; 2155 hdev->acl_last_tx = jiffies;
@@ -2030,7 +2288,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2030 if (conn) { 2288 if (conn) {
2031 register struct hci_proto *hp; 2289 register struct hci_proto *hp;
2032 2290
2033 hci_conn_enter_active_mode(conn); 2291 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2034 2292
2035 /* Send to upper protocol */ 2293 /* Send to upper protocol */
2036 hp = hci_proto[HCI_PROTO_L2CAP]; 2294 hp = hci_proto[HCI_PROTO_L2CAP];
@@ -2156,7 +2414,10 @@ static void hci_cmd_task(unsigned long arg)
2156 if (hdev->sent_cmd) { 2414 if (hdev->sent_cmd) {
2157 atomic_dec(&hdev->cmd_cnt); 2415 atomic_dec(&hdev->cmd_cnt);
2158 hci_send_frame(skb); 2416 hci_send_frame(skb);
2159 mod_timer(&hdev->cmd_timer, 2417 if (test_bit(HCI_RESET, &hdev->flags))
2418 del_timer(&hdev->cmd_timer);
2419 else
2420 mod_timer(&hdev->cmd_timer,
2160 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2421 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2161 } else { 2422 } else {
2162 skb_queue_head(&hdev->cmd_q, skb); 2423 skb_queue_head(&hdev->cmd_q, skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 77930aa522e..5a7074a7b5b 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,6 +45,8 @@
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static int enable_le;
49
48/* Handle HCI Event packets */ 50/* Handle HCI Event packets */
49 51
50static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -56,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56 if (status) 58 if (status)
57 return; 59 return;
58 60
59 if (test_bit(HCI_MGMT, &hdev->flags) && 61 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 62 test_bit(HCI_MGMT, &hdev->flags))
61 mgmt_discovering(hdev->id, 0); 63 mgmt_discovering(hdev->id, 0);
62 64
63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -74,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 if (status) 76 if (status)
75 return; 77 return;
76 78
77 if (test_bit(HCI_MGMT, &hdev->flags) && 79 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 80 test_bit(HCI_MGMT, &hdev->flags))
79 mgmt_discovering(hdev->id, 0); 81 mgmt_discovering(hdev->id, 0);
80 82
81 hci_conn_check_pending(hdev); 83 hci_conn_check_pending(hdev);
@@ -525,6 +527,20 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
525 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
526} 528}
527 529
530static void hci_set_le_support(struct hci_dev *hdev)
531{
532 struct hci_cp_write_le_host_supported cp;
533
534 memset(&cp, 0, sizeof(cp));
535
536 if (enable_le) {
537 cp.le = 1;
538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
539 }
540
541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
542}
543
528static void hci_setup(struct hci_dev *hdev) 544static void hci_setup(struct hci_dev *hdev)
529{ 545{
530 hci_setup_event_mask(hdev); 546 hci_setup_event_mask(hdev);
@@ -542,6 +558,17 @@ static void hci_setup(struct hci_dev *hdev)
542 558
543 if (hdev->features[7] & LMP_INQ_TX_PWR) 559 if (hdev->features[7] & LMP_INQ_TX_PWR)
544 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
561
562 if (hdev->features[7] & LMP_EXTFEATURES) {
563 struct hci_cp_read_local_ext_features cp;
564
565 cp.page = 0x01;
566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
567 sizeof(cp), &cp);
568 }
569
570 if (hdev->features[4] & LMP_LE)
571 hci_set_le_support(hdev);
545} 572}
546 573
547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 574static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -658,6 +685,21 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
658 hdev->features[6], hdev->features[7]); 685 hdev->features[6], hdev->features[7]);
659} 686}
660 687
688static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
689 struct sk_buff *skb)
690{
691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
692
693 BT_DBG("%s status 0x%x", hdev->name, rp->status);
694
695 if (rp->status)
696 return;
697
698 memcpy(hdev->extfeatures, rp->features, 8);
699
700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
701}
702
661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 703static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
662{ 704{
663 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 705 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -841,6 +883,72 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
841 rp->randomizer, rp->status); 883 rp->randomizer, rp->status);
842} 884}
843 885
886static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
887 struct sk_buff *skb)
888{
889 struct hci_cp_le_set_scan_enable *cp;
890 __u8 status = *((__u8 *) skb->data);
891
892 BT_DBG("%s status 0x%x", hdev->name, status);
893
894 if (status)
895 return;
896
897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
898 if (!cp)
899 return;
900
901 hci_dev_lock(hdev);
902
903 if (cp->enable == 0x01) {
904 del_timer(&hdev->adv_timer);
905 hci_adv_entries_clear(hdev);
906 } else if (cp->enable == 0x00) {
907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
908 }
909
910 hci_dev_unlock(hdev);
911}
912
913static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
914{
915 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
916
917 BT_DBG("%s status 0x%x", hdev->name, rp->status);
918
919 if (rp->status)
920 return;
921
922 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
923}
924
925static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
926{
927 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
928
929 BT_DBG("%s status 0x%x", hdev->name, rp->status);
930
931 if (rp->status)
932 return;
933
934 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
935}
936
937static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
938 struct sk_buff *skb)
939{
940 struct hci_cp_read_local_ext_features cp;
941 __u8 status = *((__u8 *) skb->data);
942
943 BT_DBG("%s status 0x%x", hdev->name, status);
944
945 if (status)
946 return;
947
948 cp.page = 0x01;
949 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
950}
951
844static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 952static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
845{ 953{
846 BT_DBG("%s status 0x%x", hdev->name, status); 954 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -851,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
851 return; 959 return;
852 } 960 }
853 961
854 if (test_bit(HCI_MGMT, &hdev->flags) && 962 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
855 !test_and_set_bit(HCI_INQUIRY, 963 test_bit(HCI_MGMT, &hdev->flags))
856 &hdev->flags))
857 mgmt_discovering(hdev->id, 1); 964 mgmt_discovering(hdev->id, 1);
858} 965}
859 966
@@ -885,7 +992,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
885 } 992 }
886 } else { 993 } else {
887 if (!conn) { 994 if (!conn) {
888 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 995 conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
889 if (conn) { 996 if (conn) {
890 conn->out = 1; 997 conn->out = 1;
891 conn->link_mode |= HCI_LM_MASTER; 998 conn->link_mode |= HCI_LM_MASTER;
@@ -1208,25 +1315,32 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1208 } 1315 }
1209 } else { 1316 } else {
1210 if (!conn) { 1317 if (!conn) {
1211 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1318 conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
1212 if (conn) 1319 if (conn) {
1320 conn->dst_type = cp->peer_addr_type;
1213 conn->out = 1; 1321 conn->out = 1;
1214 else 1322 } else {
1215 BT_ERR("No memory for new connection"); 1323 BT_ERR("No memory for new connection");
1324 }
1216 } 1325 }
1217 } 1326 }
1218 1327
1219 hci_dev_unlock(hdev); 1328 hci_dev_unlock(hdev);
1220} 1329}
1221 1330
1331static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1332{
1333 BT_DBG("%s status 0x%x", hdev->name, status);
1334}
1335
1222static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1336static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1223{ 1337{
1224 __u8 status = *((__u8 *) skb->data); 1338 __u8 status = *((__u8 *) skb->data);
1225 1339
1226 BT_DBG("%s status %d", hdev->name, status); 1340 BT_DBG("%s status %d", hdev->name, status);
1227 1341
1228 if (test_bit(HCI_MGMT, &hdev->flags) && 1342 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
1229 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1343 test_bit(HCI_MGMT, &hdev->flags))
1230 mgmt_discovering(hdev->id, 0); 1344 mgmt_discovering(hdev->id, 0);
1231 1345
1232 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1346 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1348,6 +1462,15 @@ unlock:
1348 hci_conn_check_pending(hdev); 1462 hci_conn_check_pending(hdev);
1349} 1463}
1350 1464
1465static inline bool is_sco_active(struct hci_dev *hdev)
1466{
1467 if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
1468 (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
1469 BT_CONNECTED)))
1470 return true;
1471 return false;
1472}
1473
1351static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1474static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1352{ 1475{
1353 struct hci_ev_conn_request *ev = (void *) skb->data; 1476 struct hci_ev_conn_request *ev = (void *) skb->data;
@@ -1372,7 +1495,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1372 1495
1373 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1496 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1374 if (!conn) { 1497 if (!conn) {
1375 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1498 /* pkt_type not yet used for incoming connections */
1499 conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
1376 if (!conn) { 1500 if (!conn) {
1377 BT_ERR("No memory for new connection"); 1501 BT_ERR("No memory for new connection");
1378 hci_dev_unlock(hdev); 1502 hci_dev_unlock(hdev);
@@ -1390,7 +1514,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1390 1514
1391 bacpy(&cp.bdaddr, &ev->bdaddr); 1515 bacpy(&cp.bdaddr, &ev->bdaddr);
1392 1516
1393 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1517 if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
1518 || is_sco_active(hdev)))
1394 cp.role = 0x00; /* Become master */ 1519 cp.role = 0x00; /* Become master */
1395 else 1520 else
1396 cp.role = 0x01; /* Remain slave */ 1521 cp.role = 0x01; /* Remain slave */
@@ -1462,51 +1587,58 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1462 hci_dev_lock(hdev); 1587 hci_dev_lock(hdev);
1463 1588
1464 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1589 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1465 if (conn) { 1590 if (!conn)
1466 if (!ev->status) { 1591 goto unlock;
1592
1593 if (!ev->status) {
1594 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1595 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1596 BT_INFO("re-auth of legacy device is not possible.");
1597 } else {
1467 conn->link_mode |= HCI_LM_AUTH; 1598 conn->link_mode |= HCI_LM_AUTH;
1468 conn->sec_level = conn->pending_sec_level; 1599 conn->sec_level = conn->pending_sec_level;
1469 } else {
1470 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1471 } 1600 }
1601 } else {
1602 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1603 }
1472 1604
1473 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1605 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1606 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1474 1607
1475 if (conn->state == BT_CONFIG) { 1608 if (conn->state == BT_CONFIG) {
1476 if (!ev->status && hdev->ssp_mode > 0 && 1609 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1477 conn->ssp_mode > 0) { 1610 struct hci_cp_set_conn_encrypt cp;
1478 struct hci_cp_set_conn_encrypt cp; 1611 cp.handle = ev->handle;
1479 cp.handle = ev->handle; 1612 cp.encrypt = 0x01;
1480 cp.encrypt = 0x01; 1613 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1481 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, 1614 &cp);
1482 sizeof(cp), &cp);
1483 } else {
1484 conn->state = BT_CONNECTED;
1485 hci_proto_connect_cfm(conn, ev->status);
1486 hci_conn_put(conn);
1487 }
1488 } else { 1615 } else {
1489 hci_auth_cfm(conn, ev->status); 1616 conn->state = BT_CONNECTED;
1490 1617 hci_proto_connect_cfm(conn, ev->status);
1491 hci_conn_hold(conn);
1492 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1493 hci_conn_put(conn); 1618 hci_conn_put(conn);
1494 } 1619 }
1620 } else {
1621 hci_auth_cfm(conn, ev->status);
1495 1622
1496 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1623 hci_conn_hold(conn);
1497 if (!ev->status) { 1624 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1498 struct hci_cp_set_conn_encrypt cp; 1625 hci_conn_put(conn);
1499 cp.handle = ev->handle; 1626 }
1500 cp.encrypt = 0x01; 1627
1501 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, 1628 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1502 sizeof(cp), &cp); 1629 if (!ev->status) {
1503 } else { 1630 struct hci_cp_set_conn_encrypt cp;
1504 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1631 cp.handle = ev->handle;
1505 hci_encrypt_cfm(conn, ev->status, 0x00); 1632 cp.encrypt = 0x01;
1506 } 1633 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1634 &cp);
1635 } else {
1636 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1637 hci_encrypt_cfm(conn, ev->status, 0x00);
1507 } 1638 }
1508 } 1639 }
1509 1640
1641unlock:
1510 hci_dev_unlock(hdev); 1642 hci_dev_unlock(hdev);
1511} 1643}
1512 1644
@@ -1557,6 +1689,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1557 /* Encryption implies authentication */ 1689 /* Encryption implies authentication */
1558 conn->link_mode |= HCI_LM_AUTH; 1690 conn->link_mode |= HCI_LM_AUTH;
1559 conn->link_mode |= HCI_LM_ENCRYPT; 1691 conn->link_mode |= HCI_LM_ENCRYPT;
1692 conn->sec_level = conn->pending_sec_level;
1560 } else 1693 } else
1561 conn->link_mode &= ~HCI_LM_ENCRYPT; 1694 conn->link_mode &= ~HCI_LM_ENCRYPT;
1562 } 1695 }
@@ -1760,6 +1893,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1760 hci_cc_read_local_features(hdev, skb); 1893 hci_cc_read_local_features(hdev, skb);
1761 break; 1894 break;
1762 1895
1896 case HCI_OP_READ_LOCAL_EXT_FEATURES:
1897 hci_cc_read_local_ext_features(hdev, skb);
1898 break;
1899
1763 case HCI_OP_READ_BUFFER_SIZE: 1900 case HCI_OP_READ_BUFFER_SIZE:
1764 hci_cc_read_buffer_size(hdev, skb); 1901 hci_cc_read_buffer_size(hdev, skb);
1765 break; 1902 break;
@@ -1816,6 +1953,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1816 hci_cc_user_confirm_neg_reply(hdev, skb); 1953 hci_cc_user_confirm_neg_reply(hdev, skb);
1817 break; 1954 break;
1818 1955
1956 case HCI_OP_LE_SET_SCAN_ENABLE:
1957 hci_cc_le_set_scan_enable(hdev, skb);
1958 break;
1959
1960 case HCI_OP_LE_LTK_REPLY:
1961 hci_cc_le_ltk_reply(hdev, skb);
1962 break;
1963
1964 case HCI_OP_LE_LTK_NEG_REPLY:
1965 hci_cc_le_ltk_neg_reply(hdev, skb);
1966 break;
1967
1968 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
1969 hci_cc_write_le_host_supported(hdev, skb);
1970 break;
1971
1819 default: 1972 default:
1820 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1973 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1821 break; 1974 break;
@@ -1894,6 +2047,10 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1894 hci_cs_le_create_conn(hdev, ev->status); 2047 hci_cs_le_create_conn(hdev, ev->status);
1895 break; 2048 break;
1896 2049
2050 case HCI_OP_LE_START_ENC:
2051 hci_cs_le_start_enc(hdev, ev->status);
2052 break;
2053
1897 default: 2054 default:
1898 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2055 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1899 break; 2056 break;
@@ -2333,6 +2490,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
2333 hci_conn_add_sysfs(conn); 2490 hci_conn_add_sysfs(conn);
2334 break; 2491 break;
2335 2492
2493 case 0x10: /* Connection Accept Timeout */
2336 case 0x11: /* Unsupported Feature or Parameter Value */ 2494 case 0x11: /* Unsupported Feature or Parameter Value */
2337 case 0x1c: /* SCO interval rejected */ 2495 case 0x1c: /* SCO interval rejected */
2338 case 0x1a: /* Unsupported Remote Feature */ 2496 case 0x1a: /* Unsupported Remote Feature */
@@ -2652,12 +2810,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2652 2810
2653 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 2811 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2654 if (!conn) { 2812 if (!conn) {
2655 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 2813 conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
2656 if (!conn) { 2814 if (!conn) {
2657 BT_ERR("No memory for new connection"); 2815 BT_ERR("No memory for new connection");
2658 hci_dev_unlock(hdev); 2816 hci_dev_unlock(hdev);
2659 return; 2817 return;
2660 } 2818 }
2819
2820 conn->dst_type = ev->bdaddr_type;
2661 } 2821 }
2662 2822
2663 if (ev->status) { 2823 if (ev->status) {
@@ -2670,6 +2830,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2670 2830
2671 mgmt_connected(hdev->id, &ev->bdaddr); 2831 mgmt_connected(hdev->id, &ev->bdaddr);
2672 2832
2833 conn->sec_level = BT_SECURITY_LOW;
2673 conn->handle = __le16_to_cpu(ev->handle); 2834 conn->handle = __le16_to_cpu(ev->handle);
2674 conn->state = BT_CONNECTED; 2835 conn->state = BT_CONNECTED;
2675 2836
@@ -2682,6 +2843,64 @@ unlock:
2682 hci_dev_unlock(hdev); 2843 hci_dev_unlock(hdev);
2683} 2844}
2684 2845
2846static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2847 struct sk_buff *skb)
2848{
2849 struct hci_ev_le_advertising_info *ev;
2850 u8 num_reports;
2851
2852 num_reports = skb->data[0];
2853 ev = (void *) &skb->data[1];
2854
2855 hci_dev_lock(hdev);
2856
2857 hci_add_adv_entry(hdev, ev);
2858
2859 while (--num_reports) {
2860 ev = (void *) (ev->data + ev->length + 1);
2861 hci_add_adv_entry(hdev, ev);
2862 }
2863
2864 hci_dev_unlock(hdev);
2865}
2866
2867static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
2868 struct sk_buff *skb)
2869{
2870 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
2871 struct hci_cp_le_ltk_reply cp;
2872 struct hci_cp_le_ltk_neg_reply neg;
2873 struct hci_conn *conn;
2874 struct link_key *ltk;
2875
2876 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
2877
2878 hci_dev_lock(hdev);
2879
2880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2881 if (conn == NULL)
2882 goto not_found;
2883
2884 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
2885 if (ltk == NULL)
2886 goto not_found;
2887
2888 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
2889 cp.handle = cpu_to_le16(conn->handle);
2890 conn->pin_length = ltk->pin_len;
2891
2892 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
2893
2894 hci_dev_unlock(hdev);
2895
2896 return;
2897
2898not_found:
2899 neg.handle = ev->handle;
2900 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
2901 hci_dev_unlock(hdev);
2902}
2903
2685static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 2904static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2686{ 2905{
2687 struct hci_ev_le_meta *le_ev = (void *) skb->data; 2906 struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -2693,6 +2912,14 @@ static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693 hci_le_conn_complete_evt(hdev, skb); 2912 hci_le_conn_complete_evt(hdev, skb);
2694 break; 2913 break;
2695 2914
2915 case HCI_EV_LE_ADVERTISING_REPORT:
2916 hci_le_adv_report_evt(hdev, skb);
2917 break;
2918
2919 case HCI_EV_LE_LTK_REQ:
2920 hci_le_ltk_request_evt(hdev, skb);
2921 break;
2922
2696 default: 2923 default:
2697 break; 2924 break;
2698 } 2925 }
@@ -2886,3 +3113,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2886 hci_send_to_sock(hdev, skb, NULL); 3113 hci_send_to_sock(hdev, skb, NULL);
2887 kfree_skb(skb); 3114 kfree_skb(skb);
2888} 3115}
3116
3117module_param(enable_le, bool, 0444);
3118MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 295e4a88fff..ff02cf5e77c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -180,82 +180,24 @@ static int hci_sock_release(struct socket *sock)
180 return 0; 180 return 0;
181} 181}
182 182
183struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
184{
185 struct list_head *p;
186
187 list_for_each(p, &hdev->blacklist) {
188 struct bdaddr_list *b;
189
190 b = list_entry(p, struct bdaddr_list, list);
191
192 if (bacmp(bdaddr, &b->bdaddr) == 0)
193 return b;
194 }
195
196 return NULL;
197}
198
199static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
200{ 184{
201 bdaddr_t bdaddr; 185 bdaddr_t bdaddr;
202 struct bdaddr_list *entry;
203 186
204 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 187 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
205 return -EFAULT; 188 return -EFAULT;
206 189
207 if (bacmp(&bdaddr, BDADDR_ANY) == 0) 190 return hci_blacklist_add(hdev, &bdaddr);
208 return -EBADF;
209
210 if (hci_blacklist_lookup(hdev, &bdaddr))
211 return -EEXIST;
212
213 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
214 if (!entry)
215 return -ENOMEM;
216
217 bacpy(&entry->bdaddr, &bdaddr);
218
219 list_add(&entry->list, &hdev->blacklist);
220
221 return 0;
222}
223
224int hci_blacklist_clear(struct hci_dev *hdev)
225{
226 struct list_head *p, *n;
227
228 list_for_each_safe(p, n, &hdev->blacklist) {
229 struct bdaddr_list *b;
230
231 b = list_entry(p, struct bdaddr_list, list);
232
233 list_del(p);
234 kfree(b);
235 }
236
237 return 0;
238} 191}
239 192
240static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg) 193static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
241{ 194{
242 bdaddr_t bdaddr; 195 bdaddr_t bdaddr;
243 struct bdaddr_list *entry;
244 196
245 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 197 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
246 return -EFAULT; 198 return -EFAULT;
247 199
248 if (bacmp(&bdaddr, BDADDR_ANY) == 0) 200 return hci_blacklist_del(hdev, &bdaddr);
249 return hci_blacklist_clear(hdev);
250
251 entry = hci_blacklist_lookup(hdev, &bdaddr);
252 if (!entry)
253 return -ENOENT;
254
255 list_del(&entry->list);
256 kfree(entry);
257
258 return 0;
259} 201}
260 202
261/* Ioctls that require bound socket */ 203/* Ioctls that require bound socket */
@@ -290,12 +232,12 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
290 case HCIBLOCKADDR: 232 case HCIBLOCKADDR:
291 if (!capable(CAP_NET_ADMIN)) 233 if (!capable(CAP_NET_ADMIN))
292 return -EACCES; 234 return -EACCES;
293 return hci_blacklist_add(hdev, (void __user *) arg); 235 return hci_sock_blacklist_add(hdev, (void __user *) arg);
294 236
295 case HCIUNBLOCKADDR: 237 case HCIUNBLOCKADDR:
296 if (!capable(CAP_NET_ADMIN)) 238 if (!capable(CAP_NET_ADMIN))
297 return -EACCES; 239 return -EACCES;
298 return hci_blacklist_del(hdev, (void __user *) arg); 240 return hci_sock_blacklist_del(hdev, (void __user *) arg);
299 241
300 default: 242 default:
301 if (hdev->ioctl) 243 if (hdev->ioctl)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 43b4c2deb7c..fb68f344c34 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -764,6 +764,7 @@ static int hidp_session(void *arg)
764 764
765 up_write(&hidp_session_sem); 765 up_write(&hidp_session_sem);
766 766
767 kfree(session->rd_data);
767 kfree(session); 768 kfree(session);
768 return 0; 769 return 0;
769} 770}
@@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session,
841 842
842 err = input_register_device(input); 843 err = input_register_device(input);
843 if (err < 0) { 844 if (err < 0) {
844 hci_conn_put_device(session->conn); 845 input_free_device(input);
846 session->input = NULL;
845 return err; 847 return err;
846 } 848 }
847 849
@@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1044 } 1046 }
1045 1047
1046 err = hid_add_device(session->hid); 1048 err = hid_add_device(session->hid);
1047 if (err < 0) 1049 if (err < 0) {
1048 goto err_add_device; 1050 atomic_inc(&session->terminate);
1051 wake_up_process(session->task);
1052 up_write(&hidp_session_sem);
1053 return err;
1054 }
1049 1055
1050 if (session->input) { 1056 if (session->input) {
1051 hidp_send_ctrl_message(session, 1057 hidp_send_ctrl_message(session,
@@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1059 up_write(&hidp_session_sem); 1065 up_write(&hidp_session_sem);
1060 return 0; 1066 return 0;
1061 1067
1062err_add_device:
1063 hid_destroy_device(session->hid);
1064 session->hid = NULL;
1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1067
1068unlink: 1068unlink:
1069 hidp_del_timer(session); 1069 hidp_del_timer(session);
1070 1070
@@ -1090,7 +1090,6 @@ purge:
1090failed: 1090failed:
1091 up_write(&hidp_session_sem); 1091 up_write(&hidp_session_sem);
1092 1092
1093 input_free_device(session->input);
1094 kfree(session); 1093 kfree(session);
1095 return err; 1094 return err;
1096} 1095}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 7705e26e699..5a0ce738751 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -54,26 +54,39 @@
54#include <net/bluetooth/bluetooth.h> 54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
57 58
58int disable_ertm; 59int disable_ertm;
59 60
60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61static u8 l2cap_fixed_chan[8] = { 0x02, }; 62static u8 l2cap_fixed_chan[8] = { 0x02, };
62 63
63static struct workqueue_struct *_busy_wq; 64static LIST_HEAD(chan_list);
64 65static DEFINE_RWLOCK(chan_list_lock);
65LIST_HEAD(chan_list);
66DEFINE_RWLOCK(chan_list_lock);
67
68static void l2cap_busy_work(struct work_struct *work);
69 66
70static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 67static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data); 68 u8 code, u8 ident, u16 dlen, void *data);
69static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
73 74
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 76
76/* ---- L2CAP channels ---- */ 77/* ---- L2CAP channels ---- */
78
79static inline void chan_hold(struct l2cap_chan *c)
80{
81 atomic_inc(&c->refcnt);
82}
83
84static inline void chan_put(struct l2cap_chan *c)
85{
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88}
89
77static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{ 91{
79 struct l2cap_chan *c; 92 struct l2cap_chan *c;
@@ -204,6 +217,62 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 return 0; 217 return 0;
205} 218}
206 219
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226}
227
228static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229{
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234}
235
236static void l2cap_state_change(struct l2cap_chan *chan, int state)
237{
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240}
241
242static void l2cap_chan_timeout(unsigned long arg)
243{
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274}
275
207struct l2cap_chan *l2cap_chan_create(struct sock *sk) 276struct l2cap_chan *l2cap_chan_create(struct sock *sk)
208{ 277{
209 struct l2cap_chan *chan; 278 struct l2cap_chan *chan;
@@ -218,6 +287,12 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
218 list_add(&chan->global_l, &chan_list); 287 list_add(&chan->global_l, &chan_list);
219 write_unlock_bh(&chan_list_lock); 288 write_unlock_bh(&chan_list_lock);
220 289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
221 return chan; 296 return chan;
222} 297}
223 298
@@ -227,13 +302,11 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
227 list_del(&chan->global_l); 302 list_del(&chan->global_l);
228 write_unlock_bh(&chan_list_lock); 303 write_unlock_bh(&chan_list_lock);
229 304
230 kfree(chan); 305 chan_put(chan);
231} 306}
232 307
233static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 308static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
234{ 309{
235 struct sock *sk = chan->sk;
236
237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238 chan->psm, chan->dcid); 311 chan->psm, chan->dcid);
239 312
@@ -241,7 +314,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
241 314
242 chan->conn = conn; 315 chan->conn = conn;
243 316
244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
245 if (conn->hcon->type == LE_LINK) { 318 if (conn->hcon->type == LE_LINK) {
246 /* LE connection */ 319 /* LE connection */
247 chan->omtu = L2CAP_LE_DEFAULT_MTU; 320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -252,7 +325,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
252 chan->scid = l2cap_alloc_cid(conn); 325 chan->scid = l2cap_alloc_cid(conn);
253 chan->omtu = L2CAP_DEFAULT_MTU; 326 chan->omtu = L2CAP_DEFAULT_MTU;
254 } 327 }
255 } else if (sk->sk_type == SOCK_DGRAM) { 328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
256 /* Connectionless socket */ 329 /* Connectionless socket */
257 chan->scid = L2CAP_CID_CONN_LESS; 330 chan->scid = L2CAP_CID_CONN_LESS;
258 chan->dcid = L2CAP_CID_CONN_LESS; 331 chan->dcid = L2CAP_CID_CONN_LESS;
@@ -264,20 +337,20 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
264 chan->omtu = L2CAP_DEFAULT_MTU; 337 chan->omtu = L2CAP_DEFAULT_MTU;
265 } 338 }
266 339
267 sock_hold(sk); 340 chan_hold(chan);
268 341
269 list_add(&chan->list, &conn->chan_l); 342 list_add(&chan->list, &conn->chan_l);
270} 343}
271 344
272/* Delete channel. 345/* Delete channel.
273 * Must be called on the locked socket. */ 346 * Must be called on the locked socket. */
274void l2cap_chan_del(struct l2cap_chan *chan, int err) 347static void l2cap_chan_del(struct l2cap_chan *chan, int err)
275{ 348{
276 struct sock *sk = chan->sk; 349 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn; 350 struct l2cap_conn *conn = chan->conn;
278 struct sock *parent = bt_sk(sk)->parent; 351 struct sock *parent = bt_sk(sk)->parent;
279 352
280 l2cap_sock_clear_timer(sk); 353 __clear_chan_timer(chan);
281 354
282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err); 355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
283 356
@@ -286,13 +359,13 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
286 write_lock_bh(&conn->chan_lock); 359 write_lock_bh(&conn->chan_lock);
287 list_del(&chan->list); 360 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock); 361 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk); 362 chan_put(chan);
290 363
291 chan->conn = NULL; 364 chan->conn = NULL;
292 hci_conn_put(conn->hcon); 365 hci_conn_put(conn->hcon);
293 } 366 }
294 367
295 sk->sk_state = BT_CLOSED; 368 l2cap_state_change(chan, BT_CLOSED);
296 sock_set_flag(sk, SOCK_ZAPPED); 369 sock_set_flag(sk, SOCK_ZAPPED);
297 370
298 if (err) 371 if (err)
@@ -304,8 +377,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
304 } else 377 } else
305 sk->sk_state_change(sk); 378 sk->sk_state_change(sk);
306 379
307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE && 380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE)) 381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
309 return; 382 return;
310 383
311 skb_queue_purge(&chan->tx_q); 384 skb_queue_purge(&chan->tx_q);
@@ -313,12 +386,11 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
313 if (chan->mode == L2CAP_MODE_ERTM) { 386 if (chan->mode == L2CAP_MODE_ERTM) {
314 struct srej_list *l, *tmp; 387 struct srej_list *l, *tmp;
315 388
316 del_timer(&chan->retrans_timer); 389 __clear_retrans_timer(chan);
317 del_timer(&chan->monitor_timer); 390 __clear_monitor_timer(chan);
318 del_timer(&chan->ack_timer); 391 __clear_ack_timer(chan);
319 392
320 skb_queue_purge(&chan->srej_q); 393 skb_queue_purge(&chan->srej_q);
321 skb_queue_purge(&chan->busy_q);
322 394
323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324 list_del(&l->list); 396 list_del(&l->list);
@@ -327,11 +399,86 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
327 } 399 }
328} 400}
329 401
330static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 402static void l2cap_chan_cleanup_listen(struct sock *parent)
331{ 403{
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417}
418
419void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420{
421 struct l2cap_conn *conn = chan->conn;
332 struct sock *sk = chan->sk; 422 struct sock *sk = chan->sk;
333 423
334 if (sk->sk_type == SOCK_RAW) { 424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477}
478
479static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480{
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
335 switch (chan->sec_level) { 482 switch (chan->sec_level) {
336 case BT_SECURITY_HIGH: 483 case BT_SECURITY_HIGH:
337 return HCI_AT_DEDICATED_BONDING_MITM; 484 return HCI_AT_DEDICATED_BONDING_MITM;
@@ -371,7 +518,7 @@ static inline int l2cap_check_security(struct l2cap_chan *chan)
371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type); 518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
372} 519}
373 520
374u8 l2cap_get_ident(struct l2cap_conn *conn) 521static u8 l2cap_get_ident(struct l2cap_conn *conn)
375{ 522{
376 u8 id; 523 u8 id;
377 524
@@ -393,7 +540,7 @@ u8 l2cap_get_ident(struct l2cap_conn *conn)
393 return id; 540 return id;
394} 541}
395 542
396void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 543static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
397{ 544{
398 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399 u8 flags; 546 u8 flags;
@@ -408,6 +555,8 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d
408 else 555 else
409 flags = ACL_START; 556 flags = ACL_START;
410 557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
411 hci_send_acl(conn->hcon, skb, flags); 560 hci_send_acl(conn->hcon, skb, flags);
412} 561}
413 562
@@ -415,13 +564,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
415{ 564{
416 struct sk_buff *skb; 565 struct sk_buff *skb;
417 struct l2cap_hdr *lh; 566 struct l2cap_hdr *lh;
418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn; 567 struct l2cap_conn *conn = chan->conn;
420 struct sock *sk = (struct sock *)pi;
421 int count, hlen = L2CAP_HDR_SIZE + 2; 568 int count, hlen = L2CAP_HDR_SIZE + 2;
422 u8 flags; 569 u8 flags;
423 570
424 if (sk->sk_state != BT_CONNECTED) 571 if (chan->state != BT_CONNECTED)
425 return; 572 return;
426 573
427 if (chan->fcs == L2CAP_FCS_CRC16) 574 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -432,15 +579,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
432 count = min_t(unsigned int, conn->mtu, hlen); 579 count = min_t(unsigned int, conn->mtu, hlen);
433 control |= L2CAP_CTRL_FRAME_TYPE; 580 control |= L2CAP_CTRL_FRAME_TYPE;
434 581
435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
436 control |= L2CAP_CTRL_FINAL; 583 control |= L2CAP_CTRL_FINAL;
437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
438 }
439 584
440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) { 585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
441 control |= L2CAP_CTRL_POLL; 586 control |= L2CAP_CTRL_POLL;
442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
443 }
444 587
445 skb = bt_skb_alloc(count, GFP_ATOMIC); 588 skb = bt_skb_alloc(count, GFP_ATOMIC);
446 if (!skb) 589 if (!skb)
@@ -461,14 +604,16 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
461 else 604 else
462 flags = ACL_START; 605 flags = ACL_START;
463 606
607 bt_cb(skb)->force_active = chan->force_active;
608
464 hci_send_acl(chan->conn->hcon, skb, flags); 609 hci_send_acl(chan->conn->hcon, skb, flags);
465} 610}
466 611
467static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) 612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
468{ 613{
469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
470 control |= L2CAP_SUPER_RCV_NOT_READY; 615 control |= L2CAP_SUPER_RCV_NOT_READY;
471 chan->conn_state |= L2CAP_CONN_RNR_SENT; 616 set_bit(CONN_RNR_SENT, &chan->conn_state);
472 } else 617 } else
473 control |= L2CAP_SUPER_RCV_READY; 618 control |= L2CAP_SUPER_RCV_READY;
474 619
@@ -479,7 +624,7 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
479 624
480static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 625static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
481{ 626{
482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND); 627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
483} 628}
484 629
485static void l2cap_do_start(struct l2cap_chan *chan) 630static void l2cap_do_start(struct l2cap_chan *chan)
@@ -497,7 +642,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
497 req.psm = chan->psm; 642 req.psm = chan->psm;
498 643
499 chan->ident = l2cap_get_ident(conn); 644 chan->ident = l2cap_get_ident(conn);
500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
501 646
502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503 sizeof(req), &req); 648 sizeof(req), &req);
@@ -533,7 +678,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
533 } 678 }
534} 679}
535 680
536void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 681static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
537{ 682{
538 struct sock *sk; 683 struct sock *sk;
539 struct l2cap_disconn_req req; 684 struct l2cap_disconn_req req;
@@ -544,9 +689,9 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in
544 sk = chan->sk; 689 sk = chan->sk;
545 690
546 if (chan->mode == L2CAP_MODE_ERTM) { 691 if (chan->mode == L2CAP_MODE_ERTM) {
547 del_timer(&chan->retrans_timer); 692 __clear_retrans_timer(chan);
548 del_timer(&chan->monitor_timer); 693 __clear_monitor_timer(chan);
549 del_timer(&chan->ack_timer); 694 __clear_ack_timer(chan);
550 } 695 }
551 696
552 req.dcid = cpu_to_le16(chan->dcid); 697 req.dcid = cpu_to_le16(chan->dcid);
@@ -554,7 +699,7 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in
554 l2cap_send_cmd(conn, l2cap_get_ident(conn), 699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
555 L2CAP_DISCONN_REQ, sizeof(req), &req); 700 L2CAP_DISCONN_REQ, sizeof(req), &req);
556 701
557 sk->sk_state = BT_DISCONN; 702 l2cap_state_change(chan, BT_DISCONN);
558 sk->sk_err = err; 703 sk->sk_err = err;
559} 704}
560 705
@@ -572,13 +717,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
572 717
573 bh_lock_sock(sk); 718 bh_lock_sock(sk);
574 719
575 if (sk->sk_type != SOCK_SEQPACKET && 720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
576 sk->sk_type != SOCK_STREAM) {
577 bh_unlock_sock(sk); 721 bh_unlock_sock(sk);
578 continue; 722 continue;
579 } 723 }
580 724
581 if (sk->sk_state == BT_CONNECT) { 725 if (chan->state == BT_CONNECT) {
582 struct l2cap_conn_req req; 726 struct l2cap_conn_req req;
583 727
584 if (!l2cap_check_security(chan) || 728 if (!l2cap_check_security(chan) ||
@@ -587,15 +731,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
587 continue; 731 continue;
588 } 732 }
589 733
590 if (!l2cap_mode_supported(chan->mode, 734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
591 conn->feat_mask) 735 && test_bit(CONF_STATE2_DEVICE,
592 && chan->conf_state & 736 &chan->conf_state)) {
593 L2CAP_CONF_STATE2_DEVICE) { 737 /* l2cap_chan_close() calls list_del(chan)
594 /* __l2cap_sock_close() calls list_del(chan)
595 * so release the lock */ 738 * so release the lock */
596 read_unlock_bh(&conn->chan_lock); 739 read_unlock(&conn->chan_lock);
597 __l2cap_sock_close(sk, ECONNRESET); 740 l2cap_chan_close(chan, ECONNRESET);
598 read_lock_bh(&conn->chan_lock); 741 read_lock(&conn->chan_lock);
599 bh_unlock_sock(sk); 742 bh_unlock_sock(sk);
600 continue; 743 continue;
601 } 744 }
@@ -604,12 +747,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
604 req.psm = chan->psm; 747 req.psm = chan->psm;
605 748
606 chan->ident = l2cap_get_ident(conn); 749 chan->ident = l2cap_get_ident(conn);
607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
608 751
609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610 sizeof(req), &req); 753 sizeof(req), &req);
611 754
612 } else if (sk->sk_state == BT_CONNECT2) { 755 } else if (chan->state == BT_CONNECT2) {
613 struct l2cap_conn_rsp rsp; 756 struct l2cap_conn_rsp rsp;
614 char buf[128]; 757 char buf[128];
615 rsp.scid = cpu_to_le16(chan->dcid); 758 rsp.scid = cpu_to_le16(chan->dcid);
@@ -624,7 +767,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
624 parent->sk_data_ready(parent, 0); 767 parent->sk_data_ready(parent, 0);
625 768
626 } else { 769 } else {
627 sk->sk_state = BT_CONFIG; 770 l2cap_state_change(chan, BT_CONFIG);
628 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
629 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
630 } 773 }
@@ -636,13 +779,13 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
636 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
637 sizeof(rsp), &rsp); 780 sizeof(rsp), &rsp);
638 781
639 if (chan->conf_state & L2CAP_CONF_REQ_SENT || 782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
640 rsp.result != L2CAP_CR_SUCCESS) { 783 rsp.result != L2CAP_CR_SUCCESS) {
641 bh_unlock_sock(sk); 784 bh_unlock_sock(sk);
642 continue; 785 continue;
643 } 786 }
644 787
645 chan->conf_state |= L2CAP_CONF_REQ_SENT; 788 set_bit(CONF_REQ_SENT, &chan->conf_state);
646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
647 l2cap_build_conf_req(chan, buf), buf); 790 l2cap_build_conf_req(chan, buf), buf);
648 chan->num_conf_req++; 791 chan->num_conf_req++;
@@ -666,7 +809,7 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
666 list_for_each_entry(c, &chan_list, global_l) { 809 list_for_each_entry(c, &chan_list, global_l) {
667 struct sock *sk = c->sk; 810 struct sock *sk = c->sk;
668 811
669 if (state && sk->sk_state != state) 812 if (state && c->state != state)
670 continue; 813 continue;
671 814
672 if (c->scid == cid) { 815 if (c->scid == cid) {
@@ -710,24 +853,16 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
710 goto clean; 853 goto clean;
711 } 854 }
712 855
713 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); 856 chan = pchan->ops->new_connection(pchan->data);
714 if (!sk) 857 if (!chan)
715 goto clean;
716
717 chan = l2cap_chan_create(sk);
718 if (!chan) {
719 l2cap_sock_kill(sk);
720 goto clean; 858 goto clean;
721 }
722 859
723 l2cap_pi(sk)->chan = chan; 860 sk = chan->sk;
724 861
725 write_lock_bh(&conn->chan_lock); 862 write_lock_bh(&conn->chan_lock);
726 863
727 hci_conn_hold(conn->hcon); 864 hci_conn_hold(conn->hcon);
728 865
729 l2cap_sock_init(sk, parent);
730
731 bacpy(&bt_sk(sk)->src, conn->src); 866 bacpy(&bt_sk(sk)->src, conn->src);
732 bacpy(&bt_sk(sk)->dst, conn->dst); 867 bacpy(&bt_sk(sk)->dst, conn->dst);
733 868
@@ -735,9 +870,9 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
735 870
736 __l2cap_chan_add(conn, chan); 871 __l2cap_chan_add(conn, chan);
737 872
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 873 __set_chan_timer(chan, sk->sk_sndtimeo);
739 874
740 sk->sk_state = BT_CONNECTED; 875 l2cap_state_change(chan, BT_CONNECTED);
741 parent->sk_data_ready(parent, 0); 876 parent->sk_data_ready(parent, 0);
742 877
743 write_unlock_bh(&conn->chan_lock); 878 write_unlock_bh(&conn->chan_lock);
@@ -746,6 +881,23 @@ clean:
746 bh_unlock_sock(parent); 881 bh_unlock_sock(parent);
747} 882}
748 883
884static void l2cap_chan_ready(struct sock *sk)
885{
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899}
900
749static void l2cap_conn_ready(struct l2cap_conn *conn) 901static void l2cap_conn_ready(struct l2cap_conn *conn)
750{ 902{
751 struct l2cap_chan *chan; 903 struct l2cap_chan *chan;
@@ -763,17 +915,15 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
763 bh_lock_sock(sk); 915 bh_lock_sock(sk);
764 916
765 if (conn->hcon->type == LE_LINK) { 917 if (conn->hcon->type == LE_LINK) {
766 l2cap_sock_clear_timer(sk); 918 if (smp_conn_security(conn, chan->sec_level))
767 sk->sk_state = BT_CONNECTED; 919 l2cap_chan_ready(sk);
768 sk->sk_state_change(sk);
769 }
770 920
771 if (sk->sk_type != SOCK_SEQPACKET && 921 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
772 sk->sk_type != SOCK_STREAM) { 922 __clear_chan_timer(chan);
773 l2cap_sock_clear_timer(sk); 923 l2cap_state_change(chan, BT_CONNECTED);
774 sk->sk_state = BT_CONNECTED;
775 sk->sk_state_change(sk); 924 sk->sk_state_change(sk);
776 } else if (sk->sk_state == BT_CONNECT) 925
926 } else if (chan->state == BT_CONNECT)
777 l2cap_do_start(chan); 927 l2cap_do_start(chan);
778 928
779 bh_unlock_sock(sk); 929 bh_unlock_sock(sk);
@@ -811,6 +961,45 @@ static void l2cap_info_timeout(unsigned long arg)
811 l2cap_conn_start(conn); 961 l2cap_conn_start(conn);
812} 962}
813 963
964static void l2cap_conn_del(struct hci_conn *hcon, int err)
965{
966 struct l2cap_conn *conn = hcon->l2cap_data;
967 struct l2cap_chan *chan, *l;
968 struct sock *sk;
969
970 if (!conn)
971 return;
972
973 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
974
975 kfree_skb(conn->rx_skb);
976
977 /* Kill channels */
978 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
979 sk = chan->sk;
980 bh_lock_sock(sk);
981 l2cap_chan_del(chan, err);
982 bh_unlock_sock(sk);
983 chan->ops->close(chan->data);
984 }
985
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer);
988
989 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
990 del_timer(&conn->security_timer);
991
992 hcon->l2cap_data = NULL;
993 kfree(conn);
994}
995
996static void security_timeout(unsigned long arg)
997{
998 struct l2cap_conn *conn = (void *) arg;
999
1000 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1001}
1002
814static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1003static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
815{ 1004{
816 struct l2cap_conn *conn = hcon->l2cap_data; 1005 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -842,7 +1031,10 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
842 1031
843 INIT_LIST_HEAD(&conn->chan_l); 1032 INIT_LIST_HEAD(&conn->chan_l);
844 1033
845 if (hcon->type != LE_LINK) 1034 if (hcon->type == LE_LINK)
1035 setup_timer(&conn->security_timer, security_timeout,
1036 (unsigned long) conn);
1037 else
846 setup_timer(&conn->info_timer, l2cap_info_timeout, 1038 setup_timer(&conn->info_timer, l2cap_info_timeout,
847 (unsigned long) conn); 1039 (unsigned long) conn);
848 1040
@@ -851,35 +1043,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
851 return conn; 1043 return conn;
852} 1044}
853 1045
854static void l2cap_conn_del(struct hci_conn *hcon, int err)
855{
856 struct l2cap_conn *conn = hcon->l2cap_data;
857 struct l2cap_chan *chan, *l;
858 struct sock *sk;
859
860 if (!conn)
861 return;
862
863 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
864
865 kfree_skb(conn->rx_skb);
866
867 /* Kill channels */
868 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
869 sk = chan->sk;
870 bh_lock_sock(sk);
871 l2cap_chan_del(chan, err);
872 bh_unlock_sock(sk);
873 l2cap_sock_kill(sk);
874 }
875
876 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
877 del_timer_sync(&conn->info_timer);
878
879 hcon->l2cap_data = NULL;
880 kfree(conn);
881}
882
883static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 1046static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
884{ 1047{
885 write_lock_bh(&conn->chan_lock); 1048 write_lock_bh(&conn->chan_lock);
@@ -901,7 +1064,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
901 list_for_each_entry(c, &chan_list, global_l) { 1064 list_for_each_entry(c, &chan_list, global_l) {
902 struct sock *sk = c->sk; 1065 struct sock *sk = c->sk;
903 1066
904 if (state && sk->sk_state != state) 1067 if (state && c->state != state)
905 continue; 1068 continue;
906 1069
907 if (c->psm == psm) { 1070 if (c->psm == psm) {
@@ -945,10 +1108,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
945 auth_type = l2cap_get_auth_type(chan); 1108 auth_type = l2cap_get_auth_type(chan);
946 1109
947 if (chan->dcid == L2CAP_CID_LE_DATA) 1110 if (chan->dcid == L2CAP_CID_LE_DATA)
948 hcon = hci_connect(hdev, LE_LINK, dst, 1111 hcon = hci_connect(hdev, LE_LINK, 0, dst,
949 chan->sec_level, auth_type); 1112 chan->sec_level, auth_type);
950 else 1113 else
951 hcon = hci_connect(hdev, ACL_LINK, dst, 1114 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
952 chan->sec_level, auth_type); 1115 chan->sec_level, auth_type);
953 1116
954 if (IS_ERR(hcon)) { 1117 if (IS_ERR(hcon)) {
@@ -968,15 +1131,14 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
968 1131
969 l2cap_chan_add(conn, chan); 1132 l2cap_chan_add(conn, chan);
970 1133
971 sk->sk_state = BT_CONNECT; 1134 l2cap_state_change(chan, BT_CONNECT);
972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 1135 __set_chan_timer(chan, sk->sk_sndtimeo);
973 1136
974 if (hcon->state == BT_CONNECTED) { 1137 if (hcon->state == BT_CONNECTED) {
975 if (sk->sk_type != SOCK_SEQPACKET && 1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
976 sk->sk_type != SOCK_STREAM) { 1139 __clear_chan_timer(chan);
977 l2cap_sock_clear_timer(sk);
978 if (l2cap_check_security(chan)) 1140 if (l2cap_check_security(chan))
979 sk->sk_state = BT_CONNECTED; 1141 l2cap_state_change(chan, BT_CONNECTED);
980 } else 1142 } else
981 l2cap_do_start(chan); 1143 l2cap_do_start(chan);
982 } 1144 }
@@ -997,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk)
997 int timeo = HZ/5; 1159 int timeo = HZ/5;
998 1160
999 add_wait_queue(sk_sleep(sk), &wait); 1161 add_wait_queue(sk_sleep(sk), &wait);
1000 while ((chan->unacked_frames > 0 && chan->conn)) { 1162 set_current_state(TASK_INTERRUPTIBLE);
1001 set_current_state(TASK_INTERRUPTIBLE); 1163 while (chan->unacked_frames > 0 && chan->conn) {
1002
1003 if (!timeo) 1164 if (!timeo)
1004 timeo = HZ/5; 1165 timeo = HZ/5;
1005 1166
@@ -1011,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk)
1011 release_sock(sk); 1172 release_sock(sk);
1012 timeo = schedule_timeout(timeo); 1173 timeo = schedule_timeout(timeo);
1013 lock_sock(sk); 1174 lock_sock(sk);
1175 set_current_state(TASK_INTERRUPTIBLE);
1014 1176
1015 err = sock_error(sk); 1177 err = sock_error(sk);
1016 if (err) 1178 if (err)
@@ -1036,7 +1198,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
1036 } 1198 }
1037 1199
1038 chan->retry_count++; 1200 chan->retry_count++;
1039 __mod_monitor_timer(); 1201 __set_monitor_timer(chan);
1040 1202
1041 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1203 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1042 bh_unlock_sock(sk); 1204 bh_unlock_sock(sk);
@@ -1051,9 +1213,9 @@ static void l2cap_retrans_timeout(unsigned long arg)
1051 1213
1052 bh_lock_sock(sk); 1214 bh_lock_sock(sk);
1053 chan->retry_count = 1; 1215 chan->retry_count = 1;
1054 __mod_monitor_timer(); 1216 __set_monitor_timer(chan);
1055 1217
1056 chan->conn_state |= L2CAP_CONN_WAIT_F; 1218 set_bit(CONN_WAIT_F, &chan->conn_state);
1057 1219
1058 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1220 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1059 bh_unlock_sock(sk); 1221 bh_unlock_sock(sk);
@@ -1075,7 +1237,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1075 } 1237 }
1076 1238
1077 if (!chan->unacked_frames) 1239 if (!chan->unacked_frames)
1078 del_timer(&chan->retrans_timer); 1240 __clear_retrans_timer(chan);
1079} 1241}
1080 1242
1081void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 1243void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -1090,6 +1252,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1090 else 1252 else
1091 flags = ACL_START; 1253 flags = ACL_START;
1092 1254
1255 bt_cb(skb)->force_active = chan->force_active;
1093 hci_send_acl(hcon, skb, flags); 1256 hci_send_acl(hcon, skb, flags);
1094} 1257}
1095 1258
@@ -1143,10 +1306,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1143 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1306 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1144 control &= L2CAP_CTRL_SAR; 1307 control &= L2CAP_CTRL_SAR;
1145 1308
1146 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 1309 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1147 control |= L2CAP_CTRL_FINAL; 1310 control |= L2CAP_CTRL_FINAL;
1148 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1149 }
1150 1311
1151 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1312 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1152 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1313 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
@@ -1164,11 +1325,10 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1164int l2cap_ertm_send(struct l2cap_chan *chan) 1325int l2cap_ertm_send(struct l2cap_chan *chan)
1165{ 1326{
1166 struct sk_buff *skb, *tx_skb; 1327 struct sk_buff *skb, *tx_skb;
1167 struct sock *sk = chan->sk;
1168 u16 control, fcs; 1328 u16 control, fcs;
1169 int nsent = 0; 1329 int nsent = 0;
1170 1330
1171 if (sk->sk_state != BT_CONNECTED) 1331 if (chan->state != BT_CONNECTED)
1172 return -ENOTCONN; 1332 return -ENOTCONN;
1173 1333
1174 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1334 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
@@ -1186,10 +1346,9 @@ int l2cap_ertm_send(struct l2cap_chan *chan)
1186 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1346 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1187 control &= L2CAP_CTRL_SAR; 1347 control &= L2CAP_CTRL_SAR;
1188 1348
1189 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 1349 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1190 control |= L2CAP_CTRL_FINAL; 1350 control |= L2CAP_CTRL_FINAL;
1191 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1351
1192 }
1193 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1352 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1194 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1353 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1195 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1354 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1202,7 +1361,7 @@ int l2cap_ertm_send(struct l2cap_chan *chan)
1202 1361
1203 l2cap_do_send(chan, tx_skb); 1362 l2cap_do_send(chan, tx_skb);
1204 1363
1205 __mod_retrans_timer(); 1364 __set_retrans_timer(chan);
1206 1365
1207 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1366 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1208 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; 1367 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
@@ -1241,9 +1400,9 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
1241 1400
1242 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1401 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1243 1402
1244 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 1403 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1245 control |= L2CAP_SUPER_RCV_NOT_READY; 1404 control |= L2CAP_SUPER_RCV_NOT_READY;
1246 chan->conn_state |= L2CAP_CONN_RNR_SENT; 1405 set_bit(CONN_RNR_SENT, &chan->conn_state);
1247 l2cap_send_sframe(chan, control); 1406 l2cap_send_sframe(chan, control);
1248 return; 1407 return;
1249 } 1408 }
@@ -1451,28 +1610,83 @@ int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t le
1451 return size; 1610 return size;
1452} 1611}
1453 1612
1454static void l2cap_chan_ready(struct sock *sk) 1613int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1455{ 1614{
1456 struct sock *parent = bt_sk(sk)->parent; 1615 struct sk_buff *skb;
1457 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 1616 u16 control;
1617 int err;
1458 1618
1459 BT_DBG("sk %p, parent %p", sk, parent); 1619 /* Connectionless channel */
1620 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1621 skb = l2cap_create_connless_pdu(chan, msg, len);
1622 if (IS_ERR(skb))
1623 return PTR_ERR(skb);
1460 1624
1461 chan->conf_state = 0; 1625 l2cap_do_send(chan, skb);
1462 l2cap_sock_clear_timer(sk); 1626 return len;
1627 }
1463 1628
1464 if (!parent) { 1629 switch (chan->mode) {
1465 /* Outgoing channel. 1630 case L2CAP_MODE_BASIC:
1466 * Wake up socket sleeping on connect. 1631 /* Check outgoing MTU */
1467 */ 1632 if (len > chan->omtu)
1468 sk->sk_state = BT_CONNECTED; 1633 return -EMSGSIZE;
1469 sk->sk_state_change(sk); 1634
1470 } else { 1635 /* Create a basic PDU */
1471 /* Incoming channel. 1636 skb = l2cap_create_basic_pdu(chan, msg, len);
1472 * Wake up socket sleeping on accept. 1637 if (IS_ERR(skb))
1473 */ 1638 return PTR_ERR(skb);
1474 parent->sk_data_ready(parent, 0); 1639
1640 l2cap_do_send(chan, skb);
1641 err = len;
1642 break;
1643
1644 case L2CAP_MODE_ERTM:
1645 case L2CAP_MODE_STREAMING:
1646 /* Entire SDU fits into one PDU */
1647 if (len <= chan->remote_mps) {
1648 control = L2CAP_SDU_UNSEGMENTED;
1649 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1650 0);
1651 if (IS_ERR(skb))
1652 return PTR_ERR(skb);
1653
1654 __skb_queue_tail(&chan->tx_q, skb);
1655
1656 if (chan->tx_send_head == NULL)
1657 chan->tx_send_head = skb;
1658
1659 } else {
1660 /* Segment SDU into multiples PDUs */
1661 err = l2cap_sar_segment_sdu(chan, msg, len);
1662 if (err < 0)
1663 return err;
1664 }
1665
1666 if (chan->mode == L2CAP_MODE_STREAMING) {
1667 l2cap_streaming_send(chan);
1668 err = len;
1669 break;
1670 }
1671
1672 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1673 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1674 err = len;
1675 break;
1676 }
1677
1678 err = l2cap_ertm_send(chan);
1679 if (err >= 0)
1680 err = len;
1681
1682 break;
1683
1684 default:
1685 BT_DBG("bad state %1.1x", chan->mode);
1686 err = -EBADFD;
1475 } 1687 }
1688
1689 return err;
1476} 1690}
1477 1691
1478/* Copy frame to all raw sockets on that connection */ 1692/* Copy frame to all raw sockets on that connection */
@@ -1486,7 +1700,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1486 read_lock(&conn->chan_lock); 1700 read_lock(&conn->chan_lock);
1487 list_for_each_entry(chan, &conn->chan_l, list) { 1701 list_for_each_entry(chan, &conn->chan_l, list) {
1488 struct sock *sk = chan->sk; 1702 struct sock *sk = chan->sk;
1489 if (sk->sk_type != SOCK_RAW) 1703 if (chan->chan_type != L2CAP_CHAN_RAW)
1490 continue; 1704 continue;
1491 1705
1492 /* Don't send frame to the socket it came from */ 1706 /* Don't send frame to the socket it came from */
@@ -1496,7 +1710,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1496 if (!nskb) 1710 if (!nskb)
1497 continue; 1711 continue;
1498 1712
1499 if (sock_queue_rcv_skb(sk, nskb)) 1713 if (chan->ops->recv(chan->data, nskb))
1500 kfree_skb(nskb); 1714 kfree_skb(nskb);
1501 } 1715 }
1502 read_unlock(&conn->chan_lock); 1716 read_unlock(&conn->chan_lock);
@@ -1655,11 +1869,9 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1655 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); 1869 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1656 1870
1657 skb_queue_head_init(&chan->srej_q); 1871 skb_queue_head_init(&chan->srej_q);
1658 skb_queue_head_init(&chan->busy_q);
1659 1872
1660 INIT_LIST_HEAD(&chan->srej_l); 1873 INIT_LIST_HEAD(&chan->srej_l);
1661 1874
1662 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1663 1875
1664 sk->sk_backlog_rcv = l2cap_ertm_data_rcv; 1876 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1665} 1877}
@@ -1691,7 +1903,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1691 switch (chan->mode) { 1903 switch (chan->mode) {
1692 case L2CAP_MODE_STREAMING: 1904 case L2CAP_MODE_STREAMING:
1693 case L2CAP_MODE_ERTM: 1905 case L2CAP_MODE_ERTM:
1694 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE) 1906 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1695 break; 1907 break;
1696 1908
1697 /* fall through */ 1909 /* fall through */
@@ -1738,7 +1950,7 @@ done:
1738 break; 1950 break;
1739 1951
1740 if (chan->fcs == L2CAP_FCS_NONE || 1952 if (chan->fcs == L2CAP_FCS_NONE ||
1741 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1953 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1742 chan->fcs = L2CAP_FCS_NONE; 1954 chan->fcs = L2CAP_FCS_NONE;
1743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1744 } 1956 }
@@ -1761,7 +1973,7 @@ done:
1761 break; 1973 break;
1762 1974
1763 if (chan->fcs == L2CAP_FCS_NONE || 1975 if (chan->fcs == L2CAP_FCS_NONE ||
1764 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1976 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1765 chan->fcs = L2CAP_FCS_NONE; 1977 chan->fcs = L2CAP_FCS_NONE;
1766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 1978 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1767 } 1979 }
@@ -1813,7 +2025,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1813 2025
1814 case L2CAP_CONF_FCS: 2026 case L2CAP_CONF_FCS:
1815 if (val == L2CAP_FCS_NONE) 2027 if (val == L2CAP_FCS_NONE)
1816 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV; 2028 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
1817 2029
1818 break; 2030 break;
1819 2031
@@ -1833,7 +2045,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1833 switch (chan->mode) { 2045 switch (chan->mode) {
1834 case L2CAP_MODE_STREAMING: 2046 case L2CAP_MODE_STREAMING:
1835 case L2CAP_MODE_ERTM: 2047 case L2CAP_MODE_ERTM:
1836 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) { 2048 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
1837 chan->mode = l2cap_select_mode(rfc.mode, 2049 chan->mode = l2cap_select_mode(rfc.mode,
1838 chan->conn->feat_mask); 2050 chan->conn->feat_mask);
1839 break; 2051 break;
@@ -1866,14 +2078,14 @@ done:
1866 result = L2CAP_CONF_UNACCEPT; 2078 result = L2CAP_CONF_UNACCEPT;
1867 else { 2079 else {
1868 chan->omtu = mtu; 2080 chan->omtu = mtu;
1869 chan->conf_state |= L2CAP_CONF_MTU_DONE; 2081 set_bit(CONF_MTU_DONE, &chan->conf_state);
1870 } 2082 }
1871 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); 2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1872 2084
1873 switch (rfc.mode) { 2085 switch (rfc.mode) {
1874 case L2CAP_MODE_BASIC: 2086 case L2CAP_MODE_BASIC:
1875 chan->fcs = L2CAP_FCS_NONE; 2087 chan->fcs = L2CAP_FCS_NONE;
1876 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2088 set_bit(CONF_MODE_DONE, &chan->conf_state);
1877 break; 2089 break;
1878 2090
1879 case L2CAP_MODE_ERTM: 2091 case L2CAP_MODE_ERTM:
@@ -1890,7 +2102,7 @@ done:
1890 rfc.monitor_timeout = 2102 rfc.monitor_timeout =
1891 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 2103 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1892 2104
1893 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2105 set_bit(CONF_MODE_DONE, &chan->conf_state);
1894 2106
1895 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1896 sizeof(rfc), (unsigned long) &rfc); 2108 sizeof(rfc), (unsigned long) &rfc);
@@ -1903,7 +2115,7 @@ done:
1903 2115
1904 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2116 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1905 2117
1906 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2118 set_bit(CONF_MODE_DONE, &chan->conf_state);
1907 2119
1908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2120 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1909 sizeof(rfc), (unsigned long) &rfc); 2121 sizeof(rfc), (unsigned long) &rfc);
@@ -1918,7 +2130,7 @@ done:
1918 } 2130 }
1919 2131
1920 if (result == L2CAP_CONF_SUCCESS) 2132 if (result == L2CAP_CONF_SUCCESS)
1921 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2133 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
1922 } 2134 }
1923 rsp->scid = cpu_to_le16(chan->dcid); 2135 rsp->scid = cpu_to_le16(chan->dcid);
1924 rsp->result = cpu_to_le16(result); 2136 rsp->result = cpu_to_le16(result);
@@ -1960,7 +2172,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
1960 if (olen == sizeof(rfc)) 2172 if (olen == sizeof(rfc))
1961 memcpy(&rfc, (void *)val, olen); 2173 memcpy(&rfc, (void *)val, olen);
1962 2174
1963 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) && 2175 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
1964 rfc.mode != chan->mode) 2176 rfc.mode != chan->mode)
1965 return -ECONNREFUSED; 2177 return -ECONNREFUSED;
1966 2178
@@ -2022,10 +2234,9 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2022 l2cap_send_cmd(conn, chan->ident, 2234 l2cap_send_cmd(conn, chan->ident,
2023 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2235 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2024 2236
2025 if (chan->conf_state & L2CAP_CONF_REQ_SENT) 2237 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2026 return; 2238 return;
2027 2239
2028 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2240 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2030 l2cap_build_conf_req(chan, buf), buf); 2241 l2cap_build_conf_req(chan, buf), buf);
2031 chan->num_conf_req++; 2242 chan->num_conf_req++;
@@ -2125,17 +2336,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2125 goto response; 2336 goto response;
2126 } 2337 }
2127 2338
2128 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); 2339 chan = pchan->ops->new_connection(pchan->data);
2129 if (!sk) 2340 if (!chan)
2130 goto response;
2131
2132 chan = l2cap_chan_create(sk);
2133 if (!chan) {
2134 l2cap_sock_kill(sk);
2135 goto response; 2341 goto response;
2136 }
2137 2342
2138 l2cap_pi(sk)->chan = chan; 2343 sk = chan->sk;
2139 2344
2140 write_lock_bh(&conn->chan_lock); 2345 write_lock_bh(&conn->chan_lock);
2141 2346
@@ -2143,13 +2348,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2143 if (__l2cap_get_chan_by_dcid(conn, scid)) { 2348 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2144 write_unlock_bh(&conn->chan_lock); 2349 write_unlock_bh(&conn->chan_lock);
2145 sock_set_flag(sk, SOCK_ZAPPED); 2350 sock_set_flag(sk, SOCK_ZAPPED);
2146 l2cap_sock_kill(sk); 2351 chan->ops->close(chan->data);
2147 goto response; 2352 goto response;
2148 } 2353 }
2149 2354
2150 hci_conn_hold(conn->hcon); 2355 hci_conn_hold(conn->hcon);
2151 2356
2152 l2cap_sock_init(sk, parent);
2153 bacpy(&bt_sk(sk)->src, conn->src); 2357 bacpy(&bt_sk(sk)->src, conn->src);
2154 bacpy(&bt_sk(sk)->dst, conn->dst); 2358 bacpy(&bt_sk(sk)->dst, conn->dst);
2155 chan->psm = psm; 2359 chan->psm = psm;
@@ -2161,29 +2365,29 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2161 2365
2162 dcid = chan->scid; 2366 dcid = chan->scid;
2163 2367
2164 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 2368 __set_chan_timer(chan, sk->sk_sndtimeo);
2165 2369
2166 chan->ident = cmd->ident; 2370 chan->ident = cmd->ident;
2167 2371
2168 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2372 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2169 if (l2cap_check_security(chan)) { 2373 if (l2cap_check_security(chan)) {
2170 if (bt_sk(sk)->defer_setup) { 2374 if (bt_sk(sk)->defer_setup) {
2171 sk->sk_state = BT_CONNECT2; 2375 l2cap_state_change(chan, BT_CONNECT2);
2172 result = L2CAP_CR_PEND; 2376 result = L2CAP_CR_PEND;
2173 status = L2CAP_CS_AUTHOR_PEND; 2377 status = L2CAP_CS_AUTHOR_PEND;
2174 parent->sk_data_ready(parent, 0); 2378 parent->sk_data_ready(parent, 0);
2175 } else { 2379 } else {
2176 sk->sk_state = BT_CONFIG; 2380 l2cap_state_change(chan, BT_CONFIG);
2177 result = L2CAP_CR_SUCCESS; 2381 result = L2CAP_CR_SUCCESS;
2178 status = L2CAP_CS_NO_INFO; 2382 status = L2CAP_CS_NO_INFO;
2179 } 2383 }
2180 } else { 2384 } else {
2181 sk->sk_state = BT_CONNECT2; 2385 l2cap_state_change(chan, BT_CONNECT2);
2182 result = L2CAP_CR_PEND; 2386 result = L2CAP_CR_PEND;
2183 status = L2CAP_CS_AUTHEN_PEND; 2387 status = L2CAP_CS_AUTHEN_PEND;
2184 } 2388 }
2185 } else { 2389 } else {
2186 sk->sk_state = BT_CONNECT2; 2390 l2cap_state_change(chan, BT_CONNECT2);
2187 result = L2CAP_CR_PEND; 2391 result = L2CAP_CR_PEND;
2188 status = L2CAP_CS_NO_INFO; 2392 status = L2CAP_CS_NO_INFO;
2189 } 2393 }
@@ -2214,10 +2418,10 @@ sendresp:
2214 L2CAP_INFO_REQ, sizeof(info), &info); 2418 L2CAP_INFO_REQ, sizeof(info), &info);
2215 } 2419 }
2216 2420
2217 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) && 2421 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2218 result == L2CAP_CR_SUCCESS) { 2422 result == L2CAP_CR_SUCCESS) {
2219 u8 buf[128]; 2423 u8 buf[128];
2220 chan->conf_state |= L2CAP_CONF_REQ_SENT; 2424 set_bit(CONF_REQ_SENT, &chan->conf_state);
2221 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2425 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2222 l2cap_build_conf_req(chan, buf), buf); 2426 l2cap_build_conf_req(chan, buf), buf);
2223 chan->num_conf_req++; 2427 chan->num_conf_req++;
@@ -2255,31 +2459,29 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2255 2459
2256 switch (result) { 2460 switch (result) {
2257 case L2CAP_CR_SUCCESS: 2461 case L2CAP_CR_SUCCESS:
2258 sk->sk_state = BT_CONFIG; 2462 l2cap_state_change(chan, BT_CONFIG);
2259 chan->ident = 0; 2463 chan->ident = 0;
2260 chan->dcid = dcid; 2464 chan->dcid = dcid;
2261 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2465 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2262 2466
2263 if (chan->conf_state & L2CAP_CONF_REQ_SENT) 2467 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2264 break; 2468 break;
2265 2469
2266 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2267
2268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2269 l2cap_build_conf_req(chan, req), req); 2471 l2cap_build_conf_req(chan, req), req);
2270 chan->num_conf_req++; 2472 chan->num_conf_req++;
2271 break; 2473 break;
2272 2474
2273 case L2CAP_CR_PEND: 2475 case L2CAP_CR_PEND:
2274 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 2476 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2275 break; 2477 break;
2276 2478
2277 default: 2479 default:
2278 /* don't delete l2cap channel if sk is owned by user */ 2480 /* don't delete l2cap channel if sk is owned by user */
2279 if (sock_owned_by_user(sk)) { 2481 if (sock_owned_by_user(sk)) {
2280 sk->sk_state = BT_DISCONN; 2482 l2cap_state_change(chan, BT_DISCONN);
2281 l2cap_sock_clear_timer(sk); 2483 __clear_chan_timer(chan);
2282 l2cap_sock_set_timer(sk, HZ / 5); 2484 __set_chan_timer(chan, HZ / 5);
2283 break; 2485 break;
2284 } 2486 }
2285 2487
@@ -2293,14 +2495,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2293 2495
2294static inline void set_default_fcs(struct l2cap_chan *chan) 2496static inline void set_default_fcs(struct l2cap_chan *chan)
2295{ 2497{
2296 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2297
2298 /* FCS is enabled only in ERTM or streaming mode, if one or both 2498 /* FCS is enabled only in ERTM or streaming mode, if one or both
2299 * sides request it. 2499 * sides request it.
2300 */ 2500 */
2301 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 2501 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2302 chan->fcs = L2CAP_FCS_NONE; 2502 chan->fcs = L2CAP_FCS_NONE;
2303 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV)) 2503 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2304 chan->fcs = L2CAP_FCS_CRC16; 2504 chan->fcs = L2CAP_FCS_CRC16;
2305} 2505}
2306 2506
@@ -2367,13 +2567,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2367 /* Reset config buffer. */ 2567 /* Reset config buffer. */
2368 chan->conf_len = 0; 2568 chan->conf_len = 0;
2369 2569
2370 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE)) 2570 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2371 goto unlock; 2571 goto unlock;
2372 2572
2373 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) { 2573 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2374 set_default_fcs(chan); 2574 set_default_fcs(chan);
2375 2575
2376 sk->sk_state = BT_CONNECTED; 2576 l2cap_state_change(chan, BT_CONNECTED);
2377 2577
2378 chan->next_tx_seq = 0; 2578 chan->next_tx_seq = 0;
2379 chan->expected_tx_seq = 0; 2579 chan->expected_tx_seq = 0;
@@ -2385,9 +2585,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2385 goto unlock; 2585 goto unlock;
2386 } 2586 }
2387 2587
2388 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) { 2588 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2389 u8 buf[64]; 2589 u8 buf[64];
2390 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2590 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2392 l2cap_build_conf_req(chan, buf), buf); 2591 l2cap_build_conf_req(chan, buf), buf);
2393 chan->num_conf_req++; 2592 chan->num_conf_req++;
@@ -2452,7 +2651,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2452 2651
2453 default: 2652 default:
2454 sk->sk_err = ECONNRESET; 2653 sk->sk_err = ECONNRESET;
2455 l2cap_sock_set_timer(sk, HZ * 5); 2654 __set_chan_timer(chan, HZ * 5);
2456 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2655 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2457 goto done; 2656 goto done;
2458 } 2657 }
@@ -2460,12 +2659,12 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2460 if (flags & 0x01) 2659 if (flags & 0x01)
2461 goto done; 2660 goto done;
2462 2661
2463 chan->conf_state |= L2CAP_CONF_INPUT_DONE; 2662 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2464 2663
2465 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2664 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2466 set_default_fcs(chan); 2665 set_default_fcs(chan);
2467 2666
2468 sk->sk_state = BT_CONNECTED; 2667 l2cap_state_change(chan, BT_CONNECTED);
2469 chan->next_tx_seq = 0; 2668 chan->next_tx_seq = 0;
2470 chan->expected_tx_seq = 0; 2669 chan->expected_tx_seq = 0;
2471 skb_queue_head_init(&chan->tx_q); 2670 skb_queue_head_init(&chan->tx_q);
@@ -2507,9 +2706,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2507 2706
2508 /* don't delete l2cap channel if sk is owned by user */ 2707 /* don't delete l2cap channel if sk is owned by user */
2509 if (sock_owned_by_user(sk)) { 2708 if (sock_owned_by_user(sk)) {
2510 sk->sk_state = BT_DISCONN; 2709 l2cap_state_change(chan, BT_DISCONN);
2511 l2cap_sock_clear_timer(sk); 2710 __clear_chan_timer(chan);
2512 l2cap_sock_set_timer(sk, HZ / 5); 2711 __set_chan_timer(chan, HZ / 5);
2513 bh_unlock_sock(sk); 2712 bh_unlock_sock(sk);
2514 return 0; 2713 return 0;
2515 } 2714 }
@@ -2517,7 +2716,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2517 l2cap_chan_del(chan, ECONNRESET); 2716 l2cap_chan_del(chan, ECONNRESET);
2518 bh_unlock_sock(sk); 2717 bh_unlock_sock(sk);
2519 2718
2520 l2cap_sock_kill(sk); 2719 chan->ops->close(chan->data);
2521 return 0; 2720 return 0;
2522} 2721}
2523 2722
@@ -2541,9 +2740,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2541 2740
2542 /* don't delete l2cap channel if sk is owned by user */ 2741 /* don't delete l2cap channel if sk is owned by user */
2543 if (sock_owned_by_user(sk)) { 2742 if (sock_owned_by_user(sk)) {
2544 sk->sk_state = BT_DISCONN; 2743 l2cap_state_change(chan,BT_DISCONN);
2545 l2cap_sock_clear_timer(sk); 2744 __clear_chan_timer(chan);
2546 l2cap_sock_set_timer(sk, HZ / 5); 2745 __set_chan_timer(chan, HZ / 5);
2547 bh_unlock_sock(sk); 2746 bh_unlock_sock(sk);
2548 return 0; 2747 return 0;
2549 } 2748 }
@@ -2551,7 +2750,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2551 l2cap_chan_del(chan, 0); 2750 l2cap_chan_del(chan, 0);
2552 bh_unlock_sock(sk); 2751 bh_unlock_sock(sk);
2553 2752
2554 l2cap_sock_kill(sk); 2753 chan->ops->close(chan->data);
2555 return 0; 2754 return 0;
2556} 2755}
2557 2756
@@ -2859,18 +3058,18 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2859 3058
2860 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3059 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2861 3060
2862 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3061 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
2863 control |= L2CAP_SUPER_RCV_NOT_READY; 3062 control |= L2CAP_SUPER_RCV_NOT_READY;
2864 l2cap_send_sframe(chan, control); 3063 l2cap_send_sframe(chan, control);
2865 chan->conn_state |= L2CAP_CONN_RNR_SENT; 3064 set_bit(CONN_RNR_SENT, &chan->conn_state);
2866 } 3065 }
2867 3066
2868 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY) 3067 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2869 l2cap_retransmit_frames(chan); 3068 l2cap_retransmit_frames(chan);
2870 3069
2871 l2cap_ertm_send(chan); 3070 l2cap_ertm_send(chan);
2872 3071
2873 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) && 3072 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2874 chan->frames_sent == 0) { 3073 chan->frames_sent == 0) {
2875 control |= L2CAP_SUPER_RCV_READY; 3074 control |= L2CAP_SUPER_RCV_READY;
2876 l2cap_send_sframe(chan, control); 3075 l2cap_send_sframe(chan, control);
@@ -2926,17 +3125,13 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2926 3125
2927 switch (control & L2CAP_CTRL_SAR) { 3126 switch (control & L2CAP_CTRL_SAR) {
2928 case L2CAP_SDU_UNSEGMENTED: 3127 case L2CAP_SDU_UNSEGMENTED:
2929 if (chan->conn_state & L2CAP_CONN_SAR_SDU) 3128 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
2930 goto drop; 3129 goto drop;
2931 3130
2932 err = sock_queue_rcv_skb(chan->sk, skb); 3131 return chan->ops->recv(chan->data, skb);
2933 if (!err)
2934 return err;
2935
2936 break;
2937 3132
2938 case L2CAP_SDU_START: 3133 case L2CAP_SDU_START:
2939 if (chan->conn_state & L2CAP_CONN_SAR_SDU) 3134 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
2940 goto drop; 3135 goto drop;
2941 3136
2942 chan->sdu_len = get_unaligned_le16(skb->data); 3137 chan->sdu_len = get_unaligned_le16(skb->data);
@@ -2955,12 +3150,12 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2955 3150
2956 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3151 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2957 3152
2958 chan->conn_state |= L2CAP_CONN_SAR_SDU; 3153 set_bit(CONN_SAR_SDU, &chan->conn_state);
2959 chan->partial_sdu_len = skb->len; 3154 chan->partial_sdu_len = skb->len;
2960 break; 3155 break;
2961 3156
2962 case L2CAP_SDU_CONTINUE: 3157 case L2CAP_SDU_CONTINUE:
2963 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3158 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
2964 goto disconnect; 3159 goto disconnect;
2965 3160
2966 if (!chan->sdu) 3161 if (!chan->sdu)
@@ -2975,39 +3170,34 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2975 break; 3170 break;
2976 3171
2977 case L2CAP_SDU_END: 3172 case L2CAP_SDU_END:
2978 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3173 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
2979 goto disconnect; 3174 goto disconnect;
2980 3175
2981 if (!chan->sdu) 3176 if (!chan->sdu)
2982 goto disconnect; 3177 goto disconnect;
2983 3178
2984 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) { 3179 chan->partial_sdu_len += skb->len;
2985 chan->partial_sdu_len += skb->len;
2986 3180
2987 if (chan->partial_sdu_len > chan->imtu) 3181 if (chan->partial_sdu_len > chan->imtu)
2988 goto drop; 3182 goto drop;
2989 3183
2990 if (chan->partial_sdu_len != chan->sdu_len) 3184 if (chan->partial_sdu_len != chan->sdu_len)
2991 goto drop; 3185 goto drop;
2992 3186
2993 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3187 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2994 }
2995 3188
2996 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3189 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2997 if (!_skb) { 3190 if (!_skb) {
2998 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2999 return -ENOMEM; 3191 return -ENOMEM;
3000 } 3192 }
3001 3193
3002 err = sock_queue_rcv_skb(chan->sk, _skb); 3194 err = chan->ops->recv(chan->data, _skb);
3003 if (err < 0) { 3195 if (err < 0) {
3004 kfree_skb(_skb); 3196 kfree_skb(_skb);
3005 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3006 return err; 3197 return err;
3007 } 3198 }
3008 3199
3009 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY; 3200 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3010 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3011 3201
3012 kfree_skb(chan->sdu); 3202 kfree_skb(chan->sdu);
3013 break; 3203 break;
@@ -3026,128 +3216,55 @@ disconnect:
3026 return 0; 3216 return 0;
3027} 3217}
3028 3218
3029static int l2cap_try_push_rx_skb(struct l2cap_chan *chan) 3219static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3030{ 3220{
3031 struct sk_buff *skb;
3032 u16 control; 3221 u16 control;
3033 int err;
3034 3222
3035 while ((skb = skb_dequeue(&chan->busy_q))) { 3223 BT_DBG("chan %p, Enter local busy", chan);
3036 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3037 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3038 if (err < 0) {
3039 skb_queue_head(&chan->busy_q, skb);
3040 return -EBUSY;
3041 }
3042
3043 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3044 }
3045 3224
3046 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT)) 3225 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3047 goto done;
3048 3226
3049 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3227 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3050 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 3228 control |= L2CAP_SUPER_RCV_NOT_READY;
3051 l2cap_send_sframe(chan, control); 3229 l2cap_send_sframe(chan, control);
3052 chan->retry_count = 1;
3053
3054 del_timer(&chan->retrans_timer);
3055 __mod_monitor_timer();
3056 3230
3057 chan->conn_state |= L2CAP_CONN_WAIT_F; 3231 set_bit(CONN_RNR_SENT, &chan->conn_state);
3058 3232
3059done: 3233 __clear_ack_timer(chan);
3060 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3061 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3062
3063 BT_DBG("chan %p, Exit local busy", chan);
3064
3065 return 0;
3066} 3234}
3067 3235
3068static void l2cap_busy_work(struct work_struct *work) 3236static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3069{ 3237{
3070 DECLARE_WAITQUEUE(wait, current); 3238 u16 control;
3071 struct l2cap_chan *chan =
3072 container_of(work, struct l2cap_chan, busy_work);
3073 struct sock *sk = chan->sk;
3074 int n_tries = 0, timeo = HZ/5, err;
3075 struct sk_buff *skb;
3076
3077 lock_sock(sk);
3078
3079 add_wait_queue(sk_sleep(sk), &wait);
3080 while ((skb = skb_peek(&chan->busy_q))) {
3081 set_current_state(TASK_INTERRUPTIBLE);
3082
3083 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3084 err = -EBUSY;
3085 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3086 break;
3087 }
3088
3089 if (!timeo)
3090 timeo = HZ/5;
3091 3239
3092 if (signal_pending(current)) { 3240 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3093 err = sock_intr_errno(timeo); 3241 goto done;
3094 break;
3095 }
3096 3242
3097 release_sock(sk); 3243 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3098 timeo = schedule_timeout(timeo); 3244 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3099 lock_sock(sk); 3245 l2cap_send_sframe(chan, control);
3246 chan->retry_count = 1;
3100 3247
3101 err = sock_error(sk); 3248 __clear_retrans_timer(chan);
3102 if (err) 3249 __set_monitor_timer(chan);
3103 break;
3104 3250
3105 if (l2cap_try_push_rx_skb(chan) == 0) 3251 set_bit(CONN_WAIT_F, &chan->conn_state);
3106 break;
3107 }
3108 3252
3109 set_current_state(TASK_RUNNING); 3253done:
3110 remove_wait_queue(sk_sleep(sk), &wait); 3254 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3255 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3111 3256
3112 release_sock(sk); 3257 BT_DBG("chan %p, Exit local busy", chan);
3113} 3258}
3114 3259
3115static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3260void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3116{ 3261{
3117 int sctrl, err; 3262 if (chan->mode == L2CAP_MODE_ERTM) {
3118 3263 if (busy)
3119 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3264 l2cap_ertm_enter_local_busy(chan);
3120 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3265 else
3121 __skb_queue_tail(&chan->busy_q, skb); 3266 l2cap_ertm_exit_local_busy(chan);
3122 return l2cap_try_push_rx_skb(chan);
3123
3124
3125 }
3126
3127 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3128 if (err >= 0) {
3129 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3130 return err;
3131 } 3267 }
3132
3133 /* Busy Condition */
3134 BT_DBG("chan %p, Enter local busy", chan);
3135
3136 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3137 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3138 __skb_queue_tail(&chan->busy_q, skb);
3139
3140 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3141 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3142 l2cap_send_sframe(chan, sctrl);
3143
3144 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3145
3146 del_timer(&chan->ack_timer);
3147
3148 queue_work(_busy_wq, &chan->busy_work);
3149
3150 return err;
3151} 3268}
3152 3269
3153static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3270static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
@@ -3162,19 +3279,19 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3162 3279
3163 switch (control & L2CAP_CTRL_SAR) { 3280 switch (control & L2CAP_CTRL_SAR) {
3164 case L2CAP_SDU_UNSEGMENTED: 3281 case L2CAP_SDU_UNSEGMENTED:
3165 if (chan->conn_state & L2CAP_CONN_SAR_SDU) { 3282 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3166 kfree_skb(chan->sdu); 3283 kfree_skb(chan->sdu);
3167 break; 3284 break;
3168 } 3285 }
3169 3286
3170 err = sock_queue_rcv_skb(chan->sk, skb); 3287 err = chan->ops->recv(chan->data, skb);
3171 if (!err) 3288 if (!err)
3172 return 0; 3289 return 0;
3173 3290
3174 break; 3291 break;
3175 3292
3176 case L2CAP_SDU_START: 3293 case L2CAP_SDU_START:
3177 if (chan->conn_state & L2CAP_CONN_SAR_SDU) { 3294 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3178 kfree_skb(chan->sdu); 3295 kfree_skb(chan->sdu);
3179 break; 3296 break;
3180 } 3297 }
@@ -3195,13 +3312,13 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3195 3312
3196 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3313 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3197 3314
3198 chan->conn_state |= L2CAP_CONN_SAR_SDU; 3315 set_bit(CONN_SAR_SDU, &chan->conn_state);
3199 chan->partial_sdu_len = skb->len; 3316 chan->partial_sdu_len = skb->len;
3200 err = 0; 3317 err = 0;
3201 break; 3318 break;
3202 3319
3203 case L2CAP_SDU_CONTINUE: 3320 case L2CAP_SDU_CONTINUE:
3204 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3321 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3205 break; 3322 break;
3206 3323
3207 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3324 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
@@ -3215,12 +3332,12 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3215 break; 3332 break;
3216 3333
3217 case L2CAP_SDU_END: 3334 case L2CAP_SDU_END:
3218 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3335 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3219 break; 3336 break;
3220 3337
3221 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3338 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3222 3339
3223 chan->conn_state &= ~L2CAP_CONN_SAR_SDU; 3340 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3224 chan->partial_sdu_len += skb->len; 3341 chan->partial_sdu_len += skb->len;
3225 3342
3226 if (chan->partial_sdu_len > chan->imtu) 3343 if (chan->partial_sdu_len > chan->imtu)
@@ -3228,7 +3345,7 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3228 3345
3229 if (chan->partial_sdu_len == chan->sdu_len) { 3346 if (chan->partial_sdu_len == chan->sdu_len) {
3230 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3347 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3231 err = sock_queue_rcv_skb(chan->sk, _skb); 3348 err = chan->ops->recv(chan->data, _skb);
3232 if (err < 0) 3349 if (err < 0)
3233 kfree_skb(_skb); 3350 kfree_skb(_skb);
3234 } 3351 }
@@ -3248,13 +3365,22 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3248 struct sk_buff *skb; 3365 struct sk_buff *skb;
3249 u16 control; 3366 u16 control;
3250 3367
3251 while ((skb = skb_peek(&chan->srej_q))) { 3368 while ((skb = skb_peek(&chan->srej_q)) &&
3369 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3370 int err;
3371
3252 if (bt_cb(skb)->tx_seq != tx_seq) 3372 if (bt_cb(skb)->tx_seq != tx_seq)
3253 break; 3373 break;
3254 3374
3255 skb = skb_dequeue(&chan->srej_q); 3375 skb = skb_dequeue(&chan->srej_q);
3256 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3376 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3257 l2cap_ertm_reassembly_sdu(chan, skb, control); 3377 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3378
3379 if (err < 0) {
3380 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3381 break;
3382 }
3383
3258 chan->buffer_seq_srej = 3384 chan->buffer_seq_srej =
3259 (chan->buffer_seq_srej + 1) % 64; 3385 (chan->buffer_seq_srej + 1) % 64;
3260 tx_seq = (tx_seq + 1) % 64; 3386 tx_seq = (tx_seq + 1) % 64;
@@ -3311,19 +3437,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3311 tx_seq, rx_control); 3437 tx_seq, rx_control);
3312 3438
3313 if (L2CAP_CTRL_FINAL & rx_control && 3439 if (L2CAP_CTRL_FINAL & rx_control &&
3314 chan->conn_state & L2CAP_CONN_WAIT_F) { 3440 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3315 del_timer(&chan->monitor_timer); 3441 __clear_monitor_timer(chan);
3316 if (chan->unacked_frames > 0) 3442 if (chan->unacked_frames > 0)
3317 __mod_retrans_timer(); 3443 __set_retrans_timer(chan);
3318 chan->conn_state &= ~L2CAP_CONN_WAIT_F; 3444 clear_bit(CONN_WAIT_F, &chan->conn_state);
3319 } 3445 }
3320 3446
3321 chan->expected_ack_seq = req_seq; 3447 chan->expected_ack_seq = req_seq;
3322 l2cap_drop_acked_frames(chan); 3448 l2cap_drop_acked_frames(chan);
3323 3449
3324 if (tx_seq == chan->expected_tx_seq)
3325 goto expected;
3326
3327 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; 3450 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3328 if (tx_seq_offset < 0) 3451 if (tx_seq_offset < 0)
3329 tx_seq_offset += 64; 3452 tx_seq_offset += 64;
@@ -3334,10 +3457,13 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3334 goto drop; 3457 goto drop;
3335 } 3458 }
3336 3459
3337 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY) 3460 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3338 goto drop; 3461 goto drop;
3339 3462
3340 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3463 if (tx_seq == chan->expected_tx_seq)
3464 goto expected;
3465
3466 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3341 struct srej_list *first; 3467 struct srej_list *first;
3342 3468
3343 first = list_first_entry(&chan->srej_l, 3469 first = list_first_entry(&chan->srej_l,
@@ -3351,7 +3477,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3351 3477
3352 if (list_empty(&chan->srej_l)) { 3478 if (list_empty(&chan->srej_l)) {
3353 chan->buffer_seq = chan->buffer_seq_srej; 3479 chan->buffer_seq = chan->buffer_seq_srej;
3354 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3480 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3355 l2cap_send_ack(chan); 3481 l2cap_send_ack(chan);
3356 BT_DBG("chan %p, Exit SREJ_SENT", chan); 3482 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3357 } 3483 }
@@ -3380,7 +3506,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3380 if (tx_seq_offset < expected_tx_seq_offset) 3506 if (tx_seq_offset < expected_tx_seq_offset)
3381 goto drop; 3507 goto drop;
3382 3508
3383 chan->conn_state |= L2CAP_CONN_SREJ_SENT; 3509 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3384 3510
3385 BT_DBG("chan %p, Enter SREJ", chan); 3511 BT_DBG("chan %p, Enter SREJ", chan);
3386 3512
@@ -3388,39 +3514,39 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3388 chan->buffer_seq_srej = chan->buffer_seq; 3514 chan->buffer_seq_srej = chan->buffer_seq;
3389 3515
3390 __skb_queue_head_init(&chan->srej_q); 3516 __skb_queue_head_init(&chan->srej_q);
3391 __skb_queue_head_init(&chan->busy_q);
3392 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 3517 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3393 3518
3394 chan->conn_state |= L2CAP_CONN_SEND_PBIT; 3519 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3395 3520
3396 l2cap_send_srejframe(chan, tx_seq); 3521 l2cap_send_srejframe(chan, tx_seq);
3397 3522
3398 del_timer(&chan->ack_timer); 3523 __clear_ack_timer(chan);
3399 } 3524 }
3400 return 0; 3525 return 0;
3401 3526
3402expected: 3527expected:
3403 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3528 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3404 3529
3405 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3530 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3406 bt_cb(skb)->tx_seq = tx_seq; 3531 bt_cb(skb)->tx_seq = tx_seq;
3407 bt_cb(skb)->sar = sar; 3532 bt_cb(skb)->sar = sar;
3408 __skb_queue_tail(&chan->srej_q, skb); 3533 __skb_queue_tail(&chan->srej_q, skb);
3409 return 0; 3534 return 0;
3410 } 3535 }
3411 3536
3412 err = l2cap_push_rx_skb(chan, skb, rx_control); 3537 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3413 if (err < 0) 3538 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3414 return 0; 3539 if (err < 0) {
3540 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3541 return err;
3542 }
3415 3543
3416 if (rx_control & L2CAP_CTRL_FINAL) { 3544 if (rx_control & L2CAP_CTRL_FINAL) {
3417 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3545 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3418 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3419 else
3420 l2cap_retransmit_frames(chan); 3546 l2cap_retransmit_frames(chan);
3421 } 3547 }
3422 3548
3423 __mod_ack_timer(); 3549 __set_ack_timer(chan);
3424 3550
3425 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 3551 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3426 if (chan->num_acked == num_to_ack - 1) 3552 if (chan->num_acked == num_to_ack - 1)
@@ -3442,33 +3568,31 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
3442 l2cap_drop_acked_frames(chan); 3568 l2cap_drop_acked_frames(chan);
3443 3569
3444 if (rx_control & L2CAP_CTRL_POLL) { 3570 if (rx_control & L2CAP_CTRL_POLL) {
3445 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3571 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3446 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3572 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3447 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3573 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3448 (chan->unacked_frames > 0)) 3574 (chan->unacked_frames > 0))
3449 __mod_retrans_timer(); 3575 __set_retrans_timer(chan);
3450 3576
3451 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3577 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3452 l2cap_send_srejtail(chan); 3578 l2cap_send_srejtail(chan);
3453 } else { 3579 } else {
3454 l2cap_send_i_or_rr_or_rnr(chan); 3580 l2cap_send_i_or_rr_or_rnr(chan);
3455 } 3581 }
3456 3582
3457 } else if (rx_control & L2CAP_CTRL_FINAL) { 3583 } else if (rx_control & L2CAP_CTRL_FINAL) {
3458 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3584 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3459 3585
3460 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3586 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3461 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3462 else
3463 l2cap_retransmit_frames(chan); 3587 l2cap_retransmit_frames(chan);
3464 3588
3465 } else { 3589 } else {
3466 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3590 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3467 (chan->unacked_frames > 0)) 3591 (chan->unacked_frames > 0))
3468 __mod_retrans_timer(); 3592 __set_retrans_timer(chan);
3469 3593
3470 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3594 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3471 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) 3595 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3472 l2cap_send_ack(chan); 3596 l2cap_send_ack(chan);
3473 else 3597 else
3474 l2cap_ertm_send(chan); 3598 l2cap_ertm_send(chan);
@@ -3481,21 +3605,19 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
3481 3605
3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3606 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3483 3607
3484 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3485 3609
3486 chan->expected_ack_seq = tx_seq; 3610 chan->expected_ack_seq = tx_seq;
3487 l2cap_drop_acked_frames(chan); 3611 l2cap_drop_acked_frames(chan);
3488 3612
3489 if (rx_control & L2CAP_CTRL_FINAL) { 3613 if (rx_control & L2CAP_CTRL_FINAL) {
3490 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3614 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3491 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3492 else
3493 l2cap_retransmit_frames(chan); 3615 l2cap_retransmit_frames(chan);
3494 } else { 3616 } else {
3495 l2cap_retransmit_frames(chan); 3617 l2cap_retransmit_frames(chan);
3496 3618
3497 if (chan->conn_state & L2CAP_CONN_WAIT_F) 3619 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3498 chan->conn_state |= L2CAP_CONN_REJ_ACT; 3620 set_bit(CONN_REJ_ACT, &chan->conn_state);
3499 } 3621 }
3500} 3622}
3501static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) 3623static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
@@ -3504,32 +3626,32 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
3504 3626
3505 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3627 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3506 3628
3507 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3629 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3508 3630
3509 if (rx_control & L2CAP_CTRL_POLL) { 3631 if (rx_control & L2CAP_CTRL_POLL) {
3510 chan->expected_ack_seq = tx_seq; 3632 chan->expected_ack_seq = tx_seq;
3511 l2cap_drop_acked_frames(chan); 3633 l2cap_drop_acked_frames(chan);
3512 3634
3513 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3635 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3514 l2cap_retransmit_one_frame(chan, tx_seq); 3636 l2cap_retransmit_one_frame(chan, tx_seq);
3515 3637
3516 l2cap_ertm_send(chan); 3638 l2cap_ertm_send(chan);
3517 3639
3518 if (chan->conn_state & L2CAP_CONN_WAIT_F) { 3640 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3519 chan->srej_save_reqseq = tx_seq; 3641 chan->srej_save_reqseq = tx_seq;
3520 chan->conn_state |= L2CAP_CONN_SREJ_ACT; 3642 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3521 } 3643 }
3522 } else if (rx_control & L2CAP_CTRL_FINAL) { 3644 } else if (rx_control & L2CAP_CTRL_FINAL) {
3523 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) && 3645 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3524 chan->srej_save_reqseq == tx_seq) 3646 chan->srej_save_reqseq == tx_seq)
3525 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT; 3647 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3526 else 3648 else
3527 l2cap_retransmit_one_frame(chan, tx_seq); 3649 l2cap_retransmit_one_frame(chan, tx_seq);
3528 } else { 3650 } else {
3529 l2cap_retransmit_one_frame(chan, tx_seq); 3651 l2cap_retransmit_one_frame(chan, tx_seq);
3530 if (chan->conn_state & L2CAP_CONN_WAIT_F) { 3652 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3531 chan->srej_save_reqseq = tx_seq; 3653 chan->srej_save_reqseq = tx_seq;
3532 chan->conn_state |= L2CAP_CONN_SREJ_ACT; 3654 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3533 } 3655 }
3534 } 3656 }
3535} 3657}
@@ -3540,15 +3662,15 @@ static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_c
3540 3662
3541 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3663 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3542 3664
3543 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY; 3665 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3544 chan->expected_ack_seq = tx_seq; 3666 chan->expected_ack_seq = tx_seq;
3545 l2cap_drop_acked_frames(chan); 3667 l2cap_drop_acked_frames(chan);
3546 3668
3547 if (rx_control & L2CAP_CTRL_POLL) 3669 if (rx_control & L2CAP_CTRL_POLL)
3548 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3670 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3549 3671
3550 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) { 3672 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3551 del_timer(&chan->retrans_timer); 3673 __clear_retrans_timer(chan);
3552 if (rx_control & L2CAP_CTRL_POLL) 3674 if (rx_control & L2CAP_CTRL_POLL)
3553 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); 3675 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3554 return; 3676 return;
@@ -3565,11 +3687,11 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
3565 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); 3687 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3566 3688
3567 if (L2CAP_CTRL_FINAL & rx_control && 3689 if (L2CAP_CTRL_FINAL & rx_control &&
3568 chan->conn_state & L2CAP_CONN_WAIT_F) { 3690 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3569 del_timer(&chan->monitor_timer); 3691 __clear_monitor_timer(chan);
3570 if (chan->unacked_frames > 0) 3692 if (chan->unacked_frames > 0)
3571 __mod_retrans_timer(); 3693 __set_retrans_timer(chan);
3572 chan->conn_state &= ~L2CAP_CONN_WAIT_F; 3694 clear_bit(CONN_WAIT_F, &chan->conn_state);
3573 } 3695 }
3574 3696
3575 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 3697 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
@@ -3668,7 +3790,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3668{ 3790{
3669 struct l2cap_chan *chan; 3791 struct l2cap_chan *chan;
3670 struct sock *sk = NULL; 3792 struct sock *sk = NULL;
3671 struct l2cap_pinfo *pi;
3672 u16 control; 3793 u16 control;
3673 u8 tx_seq; 3794 u8 tx_seq;
3674 int len; 3795 int len;
@@ -3680,11 +3801,10 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3680 } 3801 }
3681 3802
3682 sk = chan->sk; 3803 sk = chan->sk;
3683 pi = l2cap_pi(sk);
3684 3804
3685 BT_DBG("chan %p, len %d", chan, skb->len); 3805 BT_DBG("chan %p, len %d", chan, skb->len);
3686 3806
3687 if (sk->sk_state != BT_CONNECTED) 3807 if (chan->state != BT_CONNECTED)
3688 goto drop; 3808 goto drop;
3689 3809
3690 switch (chan->mode) { 3810 switch (chan->mode) {
@@ -3697,7 +3817,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3697 if (chan->imtu < skb->len) 3817 if (chan->imtu < skb->len)
3698 goto drop; 3818 goto drop;
3699 3819
3700 if (!sock_queue_rcv_skb(sk, skb)) 3820 if (!chan->ops->recv(chan->data, skb))
3701 goto done; 3821 goto done;
3702 break; 3822 break;
3703 3823
@@ -3769,13 +3889,13 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
3769 3889
3770 BT_DBG("sk %p, len %d", sk, skb->len); 3890 BT_DBG("sk %p, len %d", sk, skb->len);
3771 3891
3772 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3892 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3773 goto drop; 3893 goto drop;
3774 3894
3775 if (l2cap_pi(sk)->chan->imtu < skb->len) 3895 if (chan->imtu < skb->len)
3776 goto drop; 3896 goto drop;
3777 3897
3778 if (!sock_queue_rcv_skb(sk, skb)) 3898 if (!chan->ops->recv(chan->data, skb))
3779 goto done; 3899 goto done;
3780 3900
3781drop: 3901drop:
@@ -3802,13 +3922,13 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
3802 3922
3803 BT_DBG("sk %p, len %d", sk, skb->len); 3923 BT_DBG("sk %p, len %d", sk, skb->len);
3804 3924
3805 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3925 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3806 goto drop; 3926 goto drop;
3807 3927
3808 if (l2cap_pi(sk)->chan->imtu < skb->len) 3928 if (chan->imtu < skb->len)
3809 goto drop; 3929 goto drop;
3810 3930
3811 if (!sock_queue_rcv_skb(sk, skb)) 3931 if (!chan->ops->recv(chan->data, skb))
3812 goto done; 3932 goto done;
3813 3933
3814drop: 3934drop:
@@ -3853,6 +3973,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3853 l2cap_att_channel(conn, cid, skb); 3973 l2cap_att_channel(conn, cid, skb);
3854 break; 3974 break;
3855 3975
3976 case L2CAP_CID_SMP:
3977 if (smp_sig_channel(conn, skb))
3978 l2cap_conn_del(conn->hcon, EACCES);
3979 break;
3980
3856 default: 3981 default:
3857 l2cap_data_channel(conn, cid, skb); 3982 l2cap_data_channel(conn, cid, skb);
3858 break; 3983 break;
@@ -3876,7 +4001,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3876 list_for_each_entry(c, &chan_list, global_l) { 4001 list_for_each_entry(c, &chan_list, global_l) {
3877 struct sock *sk = c->sk; 4002 struct sock *sk = c->sk;
3878 4003
3879 if (sk->sk_state != BT_LISTEN) 4004 if (c->state != BT_LISTEN)
3880 continue; 4005 continue;
3881 4006
3882 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 4007 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
@@ -3909,7 +4034,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3909 if (conn) 4034 if (conn)
3910 l2cap_conn_ready(conn); 4035 l2cap_conn_ready(conn);
3911 } else 4036 } else
3912 l2cap_conn_del(hcon, bt_err(status)); 4037 l2cap_conn_del(hcon, bt_to_errno(status));
3913 4038
3914 return 0; 4039 return 0;
3915} 4040}
@@ -3920,7 +4045,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
3920 4045
3921 BT_DBG("hcon %p", hcon); 4046 BT_DBG("hcon %p", hcon);
3922 4047
3923 if (hcon->type != ACL_LINK || !conn) 4048 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3924 return 0x13; 4049 return 0x13;
3925 4050
3926 return conn->disc_reason; 4051 return conn->disc_reason;
@@ -3933,27 +4058,25 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3933 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) 4058 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3934 return -EINVAL; 4059 return -EINVAL;
3935 4060
3936 l2cap_conn_del(hcon, bt_err(reason)); 4061 l2cap_conn_del(hcon, bt_to_errno(reason));
3937 4062
3938 return 0; 4063 return 0;
3939} 4064}
3940 4065
3941static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 4066static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3942{ 4067{
3943 struct sock *sk = chan->sk; 4068 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
3944
3945 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3946 return; 4069 return;
3947 4070
3948 if (encrypt == 0x00) { 4071 if (encrypt == 0x00) {
3949 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4072 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3950 l2cap_sock_clear_timer(sk); 4073 __clear_chan_timer(chan);
3951 l2cap_sock_set_timer(sk, HZ * 5); 4074 __set_chan_timer(chan, HZ * 5);
3952 } else if (chan->sec_level == BT_SECURITY_HIGH) 4075 } else if (chan->sec_level == BT_SECURITY_HIGH)
3953 __l2cap_sock_close(sk, ECONNREFUSED); 4076 l2cap_chan_close(chan, ECONNREFUSED);
3954 } else { 4077 } else {
3955 if (chan->sec_level == BT_SECURITY_MEDIUM) 4078 if (chan->sec_level == BT_SECURITY_MEDIUM)
3956 l2cap_sock_clear_timer(sk); 4079 __clear_chan_timer(chan);
3957 } 4080 }
3958} 4081}
3959 4082
@@ -3974,34 +4097,48 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3974 4097
3975 bh_lock_sock(sk); 4098 bh_lock_sock(sk);
3976 4099
3977 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) { 4100 BT_DBG("chan->scid %d", chan->scid);
4101
4102 if (chan->scid == L2CAP_CID_LE_DATA) {
4103 if (!status && encrypt) {
4104 chan->sec_level = hcon->sec_level;
4105 del_timer(&conn->security_timer);
4106 l2cap_chan_ready(sk);
4107 smp_distribute_keys(conn, 0);
4108 }
4109
4110 bh_unlock_sock(sk);
4111 continue;
4112 }
4113
4114 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
3978 bh_unlock_sock(sk); 4115 bh_unlock_sock(sk);
3979 continue; 4116 continue;
3980 } 4117 }
3981 4118
3982 if (!status && (sk->sk_state == BT_CONNECTED || 4119 if (!status && (chan->state == BT_CONNECTED ||
3983 sk->sk_state == BT_CONFIG)) { 4120 chan->state == BT_CONFIG)) {
3984 l2cap_check_encryption(chan, encrypt); 4121 l2cap_check_encryption(chan, encrypt);
3985 bh_unlock_sock(sk); 4122 bh_unlock_sock(sk);
3986 continue; 4123 continue;
3987 } 4124 }
3988 4125
3989 if (sk->sk_state == BT_CONNECT) { 4126 if (chan->state == BT_CONNECT) {
3990 if (!status) { 4127 if (!status) {
3991 struct l2cap_conn_req req; 4128 struct l2cap_conn_req req;
3992 req.scid = cpu_to_le16(chan->scid); 4129 req.scid = cpu_to_le16(chan->scid);
3993 req.psm = chan->psm; 4130 req.psm = chan->psm;
3994 4131
3995 chan->ident = l2cap_get_ident(conn); 4132 chan->ident = l2cap_get_ident(conn);
3996 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 4133 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3997 4134
3998 l2cap_send_cmd(conn, chan->ident, 4135 l2cap_send_cmd(conn, chan->ident,
3999 L2CAP_CONN_REQ, sizeof(req), &req); 4136 L2CAP_CONN_REQ, sizeof(req), &req);
4000 } else { 4137 } else {
4001 l2cap_sock_clear_timer(sk); 4138 __clear_chan_timer(chan);
4002 l2cap_sock_set_timer(sk, HZ / 10); 4139 __set_chan_timer(chan, HZ / 10);
4003 } 4140 }
4004 } else if (sk->sk_state == BT_CONNECT2) { 4141 } else if (chan->state == BT_CONNECT2) {
4005 struct l2cap_conn_rsp rsp; 4142 struct l2cap_conn_rsp rsp;
4006 __u16 res, stat; 4143 __u16 res, stat;
4007 4144
@@ -4013,13 +4150,13 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4013 if (parent) 4150 if (parent)
4014 parent->sk_data_ready(parent, 0); 4151 parent->sk_data_ready(parent, 0);
4015 } else { 4152 } else {
4016 sk->sk_state = BT_CONFIG; 4153 l2cap_state_change(chan, BT_CONFIG);
4017 res = L2CAP_CR_SUCCESS; 4154 res = L2CAP_CR_SUCCESS;
4018 stat = L2CAP_CS_NO_INFO; 4155 stat = L2CAP_CS_NO_INFO;
4019 } 4156 }
4020 } else { 4157 } else {
4021 sk->sk_state = BT_DISCONN; 4158 l2cap_state_change(chan, BT_DISCONN);
4022 l2cap_sock_set_timer(sk, HZ / 10); 4159 __set_chan_timer(chan, HZ / 10);
4023 res = L2CAP_CR_SEC_BLOCK; 4160 res = L2CAP_CR_SEC_BLOCK;
4024 stat = L2CAP_CS_NO_INFO; 4161 stat = L2CAP_CS_NO_INFO;
4025 } 4162 }
@@ -4163,7 +4300,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4163 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4300 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4164 batostr(&bt_sk(sk)->src), 4301 batostr(&bt_sk(sk)->src),
4165 batostr(&bt_sk(sk)->dst), 4302 batostr(&bt_sk(sk)->dst),
4166 sk->sk_state, __le16_to_cpu(c->psm), 4303 c->state, __le16_to_cpu(c->psm),
4167 c->scid, c->dcid, c->imtu, c->omtu, 4304 c->scid, c->dcid, c->imtu, c->omtu,
4168 c->sec_level, c->mode); 4305 c->sec_level, c->mode);
4169 } 4306 }
@@ -4206,12 +4343,6 @@ int __init l2cap_init(void)
4206 if (err < 0) 4343 if (err < 0)
4207 return err; 4344 return err;
4208 4345
4209 _busy_wq = create_singlethread_workqueue("l2cap");
4210 if (!_busy_wq) {
4211 err = -ENOMEM;
4212 goto error;
4213 }
4214
4215 err = hci_register_proto(&l2cap_hci_proto); 4346 err = hci_register_proto(&l2cap_hci_proto);
4216 if (err < 0) { 4347 if (err < 0) {
4217 BT_ERR("L2CAP protocol registration failed"); 4348 BT_ERR("L2CAP protocol registration failed");
@@ -4229,7 +4360,6 @@ int __init l2cap_init(void)
4229 return 0; 4360 return 0;
4230 4361
4231error: 4362error:
4232 destroy_workqueue(_busy_wq);
4233 l2cap_cleanup_sockets(); 4363 l2cap_cleanup_sockets();
4234 return err; 4364 return err;
4235} 4365}
@@ -4238,9 +4368,6 @@ void l2cap_exit(void)
4238{ 4368{
4239 debugfs_remove(l2cap_debugfs); 4369 debugfs_remove(l2cap_debugfs);
4240 4370
4241 flush_workqueue(_busy_wq);
4242 destroy_workqueue(_busy_wq);
4243
4244 if (hci_unregister_proto(&l2cap_hci_proto) < 0) 4371 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4245 BT_ERR("L2CAP protocol unregistration failed"); 4372 BT_ERR("L2CAP protocol unregistration failed");
4246 4373
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 8248303f44e..61f1f623091 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -29,54 +29,11 @@
29#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/smp.h>
32 33
33static const struct proto_ops l2cap_sock_ops; 34static const struct proto_ops l2cap_sock_ops;
34 35static void l2cap_sock_init(struct sock *sk, struct sock *parent);
35/* ---- L2CAP timers ---- */ 36static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
36static void l2cap_sock_timeout(unsigned long arg)
37{
38 struct sock *sk = (struct sock *) arg;
39 int reason;
40
41 BT_DBG("sock %p state %d", sk, sk->sk_state);
42
43 bh_lock_sock(sk);
44
45 if (sock_owned_by_user(sk)) {
46 /* sk is owned by user. Try again later */
47 l2cap_sock_set_timer(sk, HZ / 5);
48 bh_unlock_sock(sk);
49 sock_put(sk);
50 return;
51 }
52
53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
54 reason = ECONNREFUSED;
55 else if (sk->sk_state == BT_CONNECT &&
56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
57 reason = ECONNREFUSED;
58 else
59 reason = ETIMEDOUT;
60
61 __l2cap_sock_close(sk, reason);
62
63 bh_unlock_sock(sk);
64
65 l2cap_sock_kill(sk);
66 sock_put(sk);
67}
68
69void l2cap_sock_set_timer(struct sock *sk, long timeout)
70{
71 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
72 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
73}
74
75void l2cap_sock_clear_timer(struct sock *sk)
76{
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
78 sk_stop_timer(sk, &sk->sk_timer);
79}
80 37
81static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 38static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82{ 39{
@@ -133,6 +90,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
133 chan->sec_level = BT_SECURITY_SDP; 90 chan->sec_level = BT_SECURITY_SDP;
134 91
135 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 92 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
93
94 chan->state = BT_BOUND;
136 sk->sk_state = BT_BOUND; 95 sk->sk_state = BT_BOUND;
137 96
138done: 97done:
@@ -162,7 +121,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
162 121
163 lock_sock(sk); 122 lock_sock(sk);
164 123
165 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) 124 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
166 && !(la.l2_psm || la.l2_cid)) { 125 && !(la.l2_psm || la.l2_cid)) {
167 err = -EINVAL; 126 err = -EINVAL;
168 goto done; 127 goto done;
@@ -204,8 +163,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
204 } 163 }
205 164
206 /* PSM must be odd and lsb of upper byte must be 0 */ 165 /* PSM must be odd and lsb of upper byte must be 0 */
207 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && 166 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
208 sk->sk_type != SOCK_RAW && !la.l2_cid) { 167 chan->chan_type != L2CAP_CHAN_RAW) {
209 err = -EINVAL; 168 err = -EINVAL;
210 goto done; 169 goto done;
211 } 170 }
@@ -258,6 +217,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
258 217
259 sk->sk_max_ack_backlog = backlog; 218 sk->sk_max_ack_backlog = backlog;
260 sk->sk_ack_backlog = 0; 219 sk->sk_ack_backlog = 0;
220
221 chan->state = BT_LISTEN;
261 sk->sk_state = BT_LISTEN; 222 sk->sk_state = BT_LISTEN;
262 223
263done: 224done:
@@ -274,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
274 235
275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 236 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
276 237
277 if (sk->sk_state != BT_LISTEN) {
278 err = -EBADFD;
279 goto done;
280 }
281
282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 238 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
283 239
284 BT_DBG("sk %p timeo %ld", sk, timeo); 240 BT_DBG("sk %p timeo %ld", sk, timeo);
285 241
286 /* Wait for an incoming connection. (wake-one). */ 242 /* Wait for an incoming connection. (wake-one). */
287 add_wait_queue_exclusive(sk_sleep(sk), &wait); 243 add_wait_queue_exclusive(sk_sleep(sk), &wait);
288 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 244 while (1) {
289 set_current_state(TASK_INTERRUPTIBLE); 245 set_current_state(TASK_INTERRUPTIBLE);
290 if (!timeo) { 246
291 err = -EAGAIN; 247 if (sk->sk_state != BT_LISTEN) {
248 err = -EBADFD;
292 break; 249 break;
293 } 250 }
294 251
295 release_sock(sk); 252 nsk = bt_accept_dequeue(sk, newsock);
296 timeo = schedule_timeout(timeo); 253 if (nsk)
297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 254 break;
298 255
299 if (sk->sk_state != BT_LISTEN) { 256 if (!timeo) {
300 err = -EBADFD; 257 err = -EAGAIN;
301 break; 258 break;
302 } 259 }
303 260
@@ -305,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
305 err = sock_intr_errno(timeo); 262 err = sock_intr_errno(timeo);
306 break; 263 break;
307 } 264 }
265
266 release_sock(sk);
267 timeo = schedule_timeout(timeo);
268 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
308 } 269 }
309 set_current_state(TASK_RUNNING); 270 __set_current_state(TASK_RUNNING);
310 remove_wait_queue(sk_sleep(sk), &wait); 271 remove_wait_queue(sk_sleep(sk), &wait);
311 272
312 if (err) 273 if (err)
@@ -437,6 +398,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
437 struct sock *sk = sock->sk; 398 struct sock *sk = sock->sk;
438 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 399 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
439 struct bt_security sec; 400 struct bt_security sec;
401 struct bt_power pwr;
440 int len, err = 0; 402 int len, err = 0;
441 403
442 BT_DBG("sk %p", sk); 404 BT_DBG("sk %p", sk);
@@ -454,14 +416,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
454 416
455 switch (optname) { 417 switch (optname) {
456 case BT_SECURITY: 418 case BT_SECURITY:
457 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 419 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
458 && sk->sk_type != SOCK_RAW) { 420 chan->chan_type != L2CAP_CHAN_RAW) {
459 err = -EINVAL; 421 err = -EINVAL;
460 break; 422 break;
461 } 423 }
462 424
425 memset(&sec, 0, sizeof(sec));
463 sec.level = chan->sec_level; 426 sec.level = chan->sec_level;
464 427
428 if (sk->sk_state == BT_CONNECTED)
429 sec.key_size = chan->conn->hcon->enc_key_size;
430
465 len = min_t(unsigned int, len, sizeof(sec)); 431 len = min_t(unsigned int, len, sizeof(sec));
466 if (copy_to_user(optval, (char *) &sec, len)) 432 if (copy_to_user(optval, (char *) &sec, len))
467 err = -EFAULT; 433 err = -EFAULT;
@@ -485,6 +451,21 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
485 451
486 break; 452 break;
487 453
454 case BT_POWER:
455 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
456 && sk->sk_type != SOCK_RAW) {
457 err = -EINVAL;
458 break;
459 }
460
461 pwr.force_active = chan->force_active;
462
463 len = min_t(unsigned int, len, sizeof(pwr));
464 if (copy_to_user(optval, (char *) &pwr, len))
465 err = -EFAULT;
466
467 break;
468
488 default: 469 default:
489 err = -ENOPROTOOPT; 470 err = -ENOPROTOOPT;
490 break; 471 break;
@@ -535,7 +516,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
535 chan->mode = opts.mode; 516 chan->mode = opts.mode;
536 switch (chan->mode) { 517 switch (chan->mode) {
537 case L2CAP_MODE_BASIC: 518 case L2CAP_MODE_BASIC:
538 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; 519 clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
539 break; 520 break;
540 case L2CAP_MODE_ERTM: 521 case L2CAP_MODE_ERTM:
541 case L2CAP_MODE_STREAMING: 522 case L2CAP_MODE_STREAMING:
@@ -585,6 +566,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
585 struct sock *sk = sock->sk; 566 struct sock *sk = sock->sk;
586 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 567 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
587 struct bt_security sec; 568 struct bt_security sec;
569 struct bt_power pwr;
570 struct l2cap_conn *conn;
588 int len, err = 0; 571 int len, err = 0;
589 u32 opt; 572 u32 opt;
590 573
@@ -600,8 +583,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
600 583
601 switch (optname) { 584 switch (optname) {
602 case BT_SECURITY: 585 case BT_SECURITY:
603 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 586 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
604 && sk->sk_type != SOCK_RAW) { 587 chan->chan_type != L2CAP_CHAN_RAW) {
605 err = -EINVAL; 588 err = -EINVAL;
606 break; 589 break;
607 } 590 }
@@ -621,6 +604,20 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
621 } 604 }
622 605
623 chan->sec_level = sec.level; 606 chan->sec_level = sec.level;
607
608 conn = chan->conn;
609 if (conn && chan->scid == L2CAP_CID_LE_DATA) {
610 if (!conn->hcon->out) {
611 err = -EINVAL;
612 break;
613 }
614
615 if (smp_conn_security(conn, sec.level))
616 break;
617
618 err = 0;
619 sk->sk_state = BT_CONFIG;
620 }
624 break; 621 break;
625 622
626 case BT_DEFER_SETUP: 623 case BT_DEFER_SETUP:
@@ -661,6 +658,23 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
661 chan->flushable = opt; 658 chan->flushable = opt;
662 break; 659 break;
663 660
661 case BT_POWER:
662 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
663 chan->chan_type != L2CAP_CHAN_RAW) {
664 err = -EINVAL;
665 break;
666 }
667
668 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
669
670 len = min_t(unsigned int, sizeof(pwr), optlen);
671 if (copy_from_user((char *) &pwr, optval, len)) {
672 err = -EFAULT;
673 break;
674 }
675 chan->force_active = pwr.force_active;
676 break;
677
664 default: 678 default:
665 err = -ENOPROTOOPT; 679 err = -ENOPROTOOPT;
666 break; 680 break;
@@ -674,8 +688,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
674{ 688{
675 struct sock *sk = sock->sk; 689 struct sock *sk = sock->sk;
676 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 690 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
677 struct sk_buff *skb;
678 u16 control;
679 int err; 691 int err;
680 692
681 BT_DBG("sock %p, sk %p", sock, sk); 693 BT_DBG("sock %p, sk %p", sock, sk);
@@ -690,87 +702,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
690 lock_sock(sk); 702 lock_sock(sk);
691 703
692 if (sk->sk_state != BT_CONNECTED) { 704 if (sk->sk_state != BT_CONNECTED) {
693 err = -ENOTCONN; 705 release_sock(sk);
694 goto done; 706 return -ENOTCONN;
695 }
696
697 /* Connectionless channel */
698 if (sk->sk_type == SOCK_DGRAM) {
699 skb = l2cap_create_connless_pdu(chan, msg, len);
700 if (IS_ERR(skb)) {
701 err = PTR_ERR(skb);
702 } else {
703 l2cap_do_send(chan, skb);
704 err = len;
705 }
706 goto done;
707 } 707 }
708 708
709 switch (chan->mode) { 709 err = l2cap_chan_send(chan, msg, len);
710 case L2CAP_MODE_BASIC:
711 /* Check outgoing MTU */
712 if (len > chan->omtu) {
713 err = -EMSGSIZE;
714 goto done;
715 }
716
717 /* Create a basic PDU */
718 skb = l2cap_create_basic_pdu(chan, msg, len);
719 if (IS_ERR(skb)) {
720 err = PTR_ERR(skb);
721 goto done;
722 }
723
724 l2cap_do_send(chan, skb);
725 err = len;
726 break;
727
728 case L2CAP_MODE_ERTM:
729 case L2CAP_MODE_STREAMING:
730 /* Entire SDU fits into one PDU */
731 if (len <= chan->remote_mps) {
732 control = L2CAP_SDU_UNSEGMENTED;
733 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
734 0);
735 if (IS_ERR(skb)) {
736 err = PTR_ERR(skb);
737 goto done;
738 }
739 __skb_queue_tail(&chan->tx_q, skb);
740
741 if (chan->tx_send_head == NULL)
742 chan->tx_send_head = skb;
743
744 } else {
745 /* Segment SDU into multiples PDUs */
746 err = l2cap_sar_segment_sdu(chan, msg, len);
747 if (err < 0)
748 goto done;
749 }
750
751 if (chan->mode == L2CAP_MODE_STREAMING) {
752 l2cap_streaming_send(chan);
753 err = len;
754 break;
755 }
756
757 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
758 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
759 err = len;
760 break;
761 }
762 err = l2cap_ertm_send(chan);
763
764 if (err >= 0)
765 err = len;
766 break;
767
768 default:
769 BT_DBG("bad state %1.1x", chan->mode);
770 err = -EBADFD;
771 }
772 710
773done:
774 release_sock(sk); 711 release_sock(sk);
775 return err; 712 return err;
776} 713}
@@ -778,13 +715,15 @@ done:
778static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) 715static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
779{ 716{
780 struct sock *sk = sock->sk; 717 struct sock *sk = sock->sk;
718 struct l2cap_pinfo *pi = l2cap_pi(sk);
719 int err;
781 720
782 lock_sock(sk); 721 lock_sock(sk);
783 722
784 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 723 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
785 sk->sk_state = BT_CONFIG; 724 sk->sk_state = BT_CONFIG;
786 725
787 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan); 726 __l2cap_connect_rsp_defer(pi->chan);
788 release_sock(sk); 727 release_sock(sk);
789 return 0; 728 return 0;
790 } 729 }
@@ -792,15 +731,43 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
792 release_sock(sk); 731 release_sock(sk);
793 732
794 if (sock->type == SOCK_STREAM) 733 if (sock->type == SOCK_STREAM)
795 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); 734 err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
735 else
736 err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
737
738 if (pi->chan->mode != L2CAP_MODE_ERTM)
739 return err;
740
741 /* Attempt to put pending rx data in the socket buffer */
742
743 lock_sock(sk);
744
745 if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
746 goto done;
747
748 if (pi->rx_busy_skb) {
749 if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
750 pi->rx_busy_skb = NULL;
751 else
752 goto done;
753 }
796 754
797 return bt_sock_recvmsg(iocb, sock, msg, len, flags); 755 /* Restore data flow when half of the receive buffer is
756 * available. This avoids resending large numbers of
757 * frames.
758 */
759 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
760 l2cap_chan_busy(pi->chan, 0);
761
762done:
763 release_sock(sk);
764 return err;
798} 765}
799 766
800/* Kill socket (only if zapped and orphan) 767/* Kill socket (only if zapped and orphan)
801 * Must be called on unlocked socket. 768 * Must be called on unlocked socket.
802 */ 769 */
803void l2cap_sock_kill(struct sock *sk) 770static void l2cap_sock_kill(struct sock *sk)
804{ 771{
805 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 772 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
806 return; 773 return;
@@ -814,87 +781,6 @@ void l2cap_sock_kill(struct sock *sk)
814 sock_put(sk); 781 sock_put(sk);
815} 782}
816 783
817/* Must be called on unlocked socket. */
818static void l2cap_sock_close(struct sock *sk)
819{
820 l2cap_sock_clear_timer(sk);
821 lock_sock(sk);
822 __l2cap_sock_close(sk, ECONNRESET);
823 release_sock(sk);
824 l2cap_sock_kill(sk);
825}
826
827static void l2cap_sock_cleanup_listen(struct sock *parent)
828{
829 struct sock *sk;
830
831 BT_DBG("parent %p", parent);
832
833 /* Close not yet accepted channels */
834 while ((sk = bt_accept_dequeue(parent, NULL)))
835 l2cap_sock_close(sk);
836
837 parent->sk_state = BT_CLOSED;
838 sock_set_flag(parent, SOCK_ZAPPED);
839}
840
841void __l2cap_sock_close(struct sock *sk, int reason)
842{
843 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
844 struct l2cap_conn *conn = chan->conn;
845
846 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
847
848 switch (sk->sk_state) {
849 case BT_LISTEN:
850 l2cap_sock_cleanup_listen(sk);
851 break;
852
853 case BT_CONNECTED:
854 case BT_CONFIG:
855 if ((sk->sk_type == SOCK_SEQPACKET ||
856 sk->sk_type == SOCK_STREAM) &&
857 conn->hcon->type == ACL_LINK) {
858 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
859 l2cap_send_disconn_req(conn, chan, reason);
860 } else
861 l2cap_chan_del(chan, reason);
862 break;
863
864 case BT_CONNECT2:
865 if ((sk->sk_type == SOCK_SEQPACKET ||
866 sk->sk_type == SOCK_STREAM) &&
867 conn->hcon->type == ACL_LINK) {
868 struct l2cap_conn_rsp rsp;
869 __u16 result;
870
871 if (bt_sk(sk)->defer_setup)
872 result = L2CAP_CR_SEC_BLOCK;
873 else
874 result = L2CAP_CR_BAD_PSM;
875
876 rsp.scid = cpu_to_le16(chan->dcid);
877 rsp.dcid = cpu_to_le16(chan->scid);
878 rsp.result = cpu_to_le16(result);
879 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
880 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
881 sizeof(rsp), &rsp);
882 }
883
884 l2cap_chan_del(chan, reason);
885 break;
886
887 case BT_CONNECT:
888 case BT_DISCONN:
889 l2cap_chan_del(chan, reason);
890 break;
891
892 default:
893 sock_set_flag(sk, SOCK_ZAPPED);
894 break;
895 }
896}
897
898static int l2cap_sock_shutdown(struct socket *sock, int how) 784static int l2cap_sock_shutdown(struct socket *sock, int how)
899{ 785{
900 struct sock *sk = sock->sk; 786 struct sock *sk = sock->sk;
@@ -912,8 +798,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
912 err = __l2cap_wait_ack(sk); 798 err = __l2cap_wait_ack(sk);
913 799
914 sk->sk_shutdown = SHUTDOWN_MASK; 800 sk->sk_shutdown = SHUTDOWN_MASK;
915 l2cap_sock_clear_timer(sk); 801 l2cap_chan_close(chan, 0);
916 __l2cap_sock_close(sk, 0);
917 802
918 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 803 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
919 err = bt_sock_wait_state(sk, BT_CLOSED, 804 err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -944,15 +829,85 @@ static int l2cap_sock_release(struct socket *sock)
944 return err; 829 return err;
945} 830}
946 831
832static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
833{
834 struct sock *sk, *parent = data;
835
836 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
837 GFP_ATOMIC);
838 if (!sk)
839 return NULL;
840
841 l2cap_sock_init(sk, parent);
842
843 return l2cap_pi(sk)->chan;
844}
845
846static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
847{
848 int err;
849 struct sock *sk = data;
850 struct l2cap_pinfo *pi = l2cap_pi(sk);
851
852 if (pi->rx_busy_skb)
853 return -ENOMEM;
854
855 err = sock_queue_rcv_skb(sk, skb);
856
857 /* For ERTM, handle one skb that doesn't fit into the recv
858 * buffer. This is important to do because the data frames
859 * have already been acked, so the skb cannot be discarded.
860 *
861 * Notify the l2cap core that the buffer is full, so the
862 * LOCAL_BUSY state is entered and no more frames are
863 * acked and reassembled until there is buffer space
864 * available.
865 */
866 if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
867 pi->rx_busy_skb = skb;
868 l2cap_chan_busy(pi->chan, 1);
869 err = 0;
870 }
871
872 return err;
873}
874
875static void l2cap_sock_close_cb(void *data)
876{
877 struct sock *sk = data;
878
879 l2cap_sock_kill(sk);
880}
881
882static void l2cap_sock_state_change_cb(void *data, int state)
883{
884 struct sock *sk = data;
885
886 sk->sk_state = state;
887}
888
889static struct l2cap_ops l2cap_chan_ops = {
890 .name = "L2CAP Socket Interface",
891 .new_connection = l2cap_sock_new_connection_cb,
892 .recv = l2cap_sock_recv_cb,
893 .close = l2cap_sock_close_cb,
894 .state_change = l2cap_sock_state_change_cb,
895};
896
947static void l2cap_sock_destruct(struct sock *sk) 897static void l2cap_sock_destruct(struct sock *sk)
948{ 898{
949 BT_DBG("sk %p", sk); 899 BT_DBG("sk %p", sk);
950 900
901 if (l2cap_pi(sk)->rx_busy_skb) {
902 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
903 l2cap_pi(sk)->rx_busy_skb = NULL;
904 }
905
951 skb_queue_purge(&sk->sk_receive_queue); 906 skb_queue_purge(&sk->sk_receive_queue);
952 skb_queue_purge(&sk->sk_write_queue); 907 skb_queue_purge(&sk->sk_write_queue);
953} 908}
954 909
955void l2cap_sock_init(struct sock *sk, struct sock *parent) 910static void l2cap_sock_init(struct sock *sk, struct sock *parent)
956{ 911{
957 struct l2cap_pinfo *pi = l2cap_pi(sk); 912 struct l2cap_pinfo *pi = l2cap_pi(sk);
958 struct l2cap_chan *chan = pi->chan; 913 struct l2cap_chan *chan = pi->chan;
@@ -965,6 +920,7 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
965 sk->sk_type = parent->sk_type; 920 sk->sk_type = parent->sk_type;
966 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 921 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
967 922
923 chan->chan_type = pchan->chan_type;
968 chan->imtu = pchan->imtu; 924 chan->imtu = pchan->imtu;
969 chan->omtu = pchan->omtu; 925 chan->omtu = pchan->omtu;
970 chan->conf_state = pchan->conf_state; 926 chan->conf_state = pchan->conf_state;
@@ -976,12 +932,27 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
976 chan->role_switch = pchan->role_switch; 932 chan->role_switch = pchan->role_switch;
977 chan->force_reliable = pchan->force_reliable; 933 chan->force_reliable = pchan->force_reliable;
978 chan->flushable = pchan->flushable; 934 chan->flushable = pchan->flushable;
935 chan->force_active = pchan->force_active;
979 } else { 936 } else {
937
938 switch (sk->sk_type) {
939 case SOCK_RAW:
940 chan->chan_type = L2CAP_CHAN_RAW;
941 break;
942 case SOCK_DGRAM:
943 chan->chan_type = L2CAP_CHAN_CONN_LESS;
944 break;
945 case SOCK_SEQPACKET:
946 case SOCK_STREAM:
947 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
948 break;
949 }
950
980 chan->imtu = L2CAP_DEFAULT_MTU; 951 chan->imtu = L2CAP_DEFAULT_MTU;
981 chan->omtu = 0; 952 chan->omtu = 0;
982 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { 953 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
983 chan->mode = L2CAP_MODE_ERTM; 954 chan->mode = L2CAP_MODE_ERTM;
984 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE; 955 set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
985 } else { 956 } else {
986 chan->mode = L2CAP_MODE_BASIC; 957 chan->mode = L2CAP_MODE_BASIC;
987 } 958 }
@@ -992,10 +963,15 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
992 chan->role_switch = 0; 963 chan->role_switch = 0;
993 chan->force_reliable = 0; 964 chan->force_reliable = 0;
994 chan->flushable = BT_FLUSHABLE_OFF; 965 chan->flushable = BT_FLUSHABLE_OFF;
966 chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
967
995 } 968 }
996 969
997 /* Default config options */ 970 /* Default config options */
998 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 971 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
972
973 chan->data = sk;
974 chan->ops = &l2cap_chan_ops;
999} 975}
1000 976
1001static struct proto l2cap_proto = { 977static struct proto l2cap_proto = {
@@ -1004,9 +980,10 @@ static struct proto l2cap_proto = {
1004 .obj_size = sizeof(struct l2cap_pinfo) 980 .obj_size = sizeof(struct l2cap_pinfo)
1005}; 981};
1006 982
1007struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) 983static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1008{ 984{
1009 struct sock *sk; 985 struct sock *sk;
986 struct l2cap_chan *chan;
1010 987
1011 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); 988 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1012 if (!sk) 989 if (!sk)
@@ -1016,14 +993,20 @@ struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, g
1016 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 993 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1017 994
1018 sk->sk_destruct = l2cap_sock_destruct; 995 sk->sk_destruct = l2cap_sock_destruct;
1019 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); 996 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
1020 997
1021 sock_reset_flag(sk, SOCK_ZAPPED); 998 sock_reset_flag(sk, SOCK_ZAPPED);
1022 999
1023 sk->sk_protocol = proto; 1000 sk->sk_protocol = proto;
1024 sk->sk_state = BT_OPEN; 1001 sk->sk_state = BT_OPEN;
1025 1002
1026 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); 1003 chan = l2cap_chan_create(sk);
1004 if (!chan) {
1005 l2cap_sock_kill(sk);
1006 return NULL;
1007 }
1008
1009 l2cap_pi(sk)->chan = chan;
1027 1010
1028 return sk; 1011 return sk;
1029} 1012}
@@ -1032,7 +1015,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1032 int kern) 1015 int kern)
1033{ 1016{
1034 struct sock *sk; 1017 struct sock *sk;
1035 struct l2cap_chan *chan;
1036 1018
1037 BT_DBG("sock %p", sock); 1019 BT_DBG("sock %p", sock);
1038 1020
@@ -1051,14 +1033,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1051 if (!sk) 1033 if (!sk)
1052 return -ENOMEM; 1034 return -ENOMEM;
1053 1035
1054 chan = l2cap_chan_create(sk);
1055 if (!chan) {
1056 l2cap_sock_kill(sk);
1057 return -ENOMEM;
1058 }
1059
1060 l2cap_pi(sk)->chan = chan;
1061
1062 l2cap_sock_init(sk, NULL); 1036 l2cap_sock_init(sk, NULL);
1063 return 0; 1037 return 0;
1064} 1038}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b826d1bf10d..86a6bed229d 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -59,7 +59,7 @@ char *batostr(bdaddr_t *ba)
59EXPORT_SYMBOL(batostr); 59EXPORT_SYMBOL(batostr);
60 60
61/* Bluetooth error codes to Unix errno mapping */ 61/* Bluetooth error codes to Unix errno mapping */
62int bt_err(__u16 code) 62int bt_to_errno(__u16 code)
63{ 63{
64 switch (code) { 64 switch (code) {
65 case 0: 65 case 0:
@@ -149,4 +149,23 @@ int bt_err(__u16 code)
149 return ENOSYS; 149 return ENOSYS;
150 } 150 }
151} 151}
152EXPORT_SYMBOL(bt_err); 152EXPORT_SYMBOL(bt_to_errno);
153
154int bt_printk(const char *level, const char *format, ...)
155{
156 struct va_format vaf;
157 va_list args;
158 int r;
159
160 va_start(args, format);
161
162 vaf.fmt = format;
163 vaf.va = &args;
164
165 r = printk("%sBluetooth: %pV\n", level, &vaf);
166
167 va_end(args);
168
169 return r;
170}
171EXPORT_SYMBOL(bt_printk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index dae382ce702..98327213d93 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -41,7 +41,7 @@ struct pending_cmd {
41 void *user_data; 41 void *user_data;
42}; 42};
43 43
44LIST_HEAD(cmd_list); 44static LIST_HEAD(cmd_list);
45 45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
47{ 47{
@@ -179,7 +179,7 @@ static int read_controller_info(struct sock *sk, u16 index)
179 179
180 hci_del_off_timer(hdev); 180 hci_del_off_timer(hdev);
181 181
182 hci_dev_lock(hdev); 182 hci_dev_lock_bh(hdev);
183 183
184 set_bit(HCI_MGMT, &hdev->flags); 184 set_bit(HCI_MGMT, &hdev->flags);
185 185
@@ -208,7 +208,7 @@ static int read_controller_info(struct sock *sk, u16 index)
208 208
209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210 210
211 hci_dev_unlock(hdev); 211 hci_dev_unlock_bh(hdev);
212 hci_dev_put(hdev); 212 hci_dev_put(hdev);
213 213
214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -316,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
316 if (!hdev) 316 if (!hdev)
317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
318 318
319 hci_dev_lock(hdev); 319 hci_dev_lock_bh(hdev);
320 320
321 up = test_bit(HCI_UP, &hdev->flags); 321 up = test_bit(HCI_UP, &hdev->flags);
322 if ((cp->val && up) || (!cp->val && !up)) { 322 if ((cp->val && up) || (!cp->val && !up)) {
@@ -343,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
343 err = 0; 343 err = 0;
344 344
345failed: 345failed:
346 hci_dev_unlock(hdev); 346 hci_dev_unlock_bh(hdev);
347 hci_dev_put(hdev); 347 hci_dev_put(hdev);
348 return err; 348 return err;
349} 349}
@@ -368,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
368 if (!hdev) 368 if (!hdev)
369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
370 370
371 hci_dev_lock(hdev); 371 hci_dev_lock_bh(hdev);
372 372
373 if (!test_bit(HCI_UP, &hdev->flags)) { 373 if (!test_bit(HCI_UP, &hdev->flags)) {
374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -403,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
403 mgmt_pending_remove(cmd); 403 mgmt_pending_remove(cmd);
404 404
405failed: 405failed:
406 hci_dev_unlock(hdev); 406 hci_dev_unlock_bh(hdev);
407 hci_dev_put(hdev); 407 hci_dev_put(hdev);
408 408
409 return err; 409 return err;
@@ -429,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
429 if (!hdev) 429 if (!hdev)
430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
431 431
432 hci_dev_lock(hdev); 432 hci_dev_lock_bh(hdev);
433 433
434 if (!test_bit(HCI_UP, &hdev->flags)) { 434 if (!test_bit(HCI_UP, &hdev->flags)) {
435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -463,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
463 mgmt_pending_remove(cmd); 463 mgmt_pending_remove(cmd);
464 464
465failed: 465failed:
466 hci_dev_unlock(hdev); 466 hci_dev_unlock_bh(hdev);
467 hci_dev_put(hdev); 467 hci_dev_put(hdev);
468 468
469 return err; 469 return err;
@@ -522,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
522 if (!hdev) 522 if (!hdev)
523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
524 524
525 hci_dev_lock(hdev); 525 hci_dev_lock_bh(hdev);
526 526
527 if (cp->val) 527 if (cp->val)
528 set_bit(HCI_PAIRABLE, &hdev->flags); 528 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -538,7 +538,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
539 539
540failed: 540failed:
541 hci_dev_unlock(hdev); 541 hci_dev_unlock_bh(hdev);
542 hci_dev_put(hdev); 542 hci_dev_put(hdev);
543 543
544 return err; 544 return err;
@@ -739,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
739 if (!hdev) 739 if (!hdev)
740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
741 741
742 hci_dev_lock(hdev); 742 hci_dev_lock_bh(hdev);
743 743
744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
745 if (!uuid) { 745 if (!uuid) {
@@ -763,7 +763,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
764 764
765failed: 765failed:
766 hci_dev_unlock(hdev); 766 hci_dev_unlock_bh(hdev);
767 hci_dev_put(hdev); 767 hci_dev_put(hdev);
768 768
769 return err; 769 return err;
@@ -788,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
788 if (!hdev) 788 if (!hdev)
789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
790 790
791 hci_dev_lock(hdev); 791 hci_dev_lock_bh(hdev);
792 792
793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
794 err = hci_uuids_clear(hdev); 794 err = hci_uuids_clear(hdev);
@@ -823,7 +823,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
824 824
825unlock: 825unlock:
826 hci_dev_unlock(hdev); 826 hci_dev_unlock_bh(hdev);
827 hci_dev_put(hdev); 827 hci_dev_put(hdev);
828 828
829 return err; 829 return err;
@@ -847,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
847 if (!hdev) 847 if (!hdev)
848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
849 849
850 hci_dev_lock(hdev); 850 hci_dev_lock_bh(hdev);
851 851
852 hdev->major_class = cp->major; 852 hdev->major_class = cp->major;
853 hdev->minor_class = cp->minor; 853 hdev->minor_class = cp->minor;
@@ -857,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
857 if (err == 0) 857 if (err == 0)
858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
859 859
860 hci_dev_unlock(hdev); 860 hci_dev_unlock_bh(hdev);
861 hci_dev_put(hdev); 861 hci_dev_put(hdev);
862 862
863 return err; 863 return err;
@@ -879,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
879 if (!hdev) 879 if (!hdev)
880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
881 881
882 hci_dev_lock(hdev); 882 hci_dev_lock_bh(hdev);
883 883
884 BT_DBG("hci%u enable %d", index, cp->enable); 884 BT_DBG("hci%u enable %d", index, cp->enable);
885 885
@@ -897,7 +897,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
898 0); 898 0);
899 899
900 hci_dev_unlock(hdev); 900 hci_dev_unlock_bh(hdev);
901 hci_dev_put(hdev); 901 hci_dev_put(hdev);
902 902
903 return err; 903 return err;
@@ -908,7 +908,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
908 struct hci_dev *hdev; 908 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp; 909 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len; 910 u16 key_count, expected_len;
911 int i; 911 int i, err;
912 912
913 cp = (void *) data; 913 cp = (void *) data;
914 914
@@ -918,9 +918,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
918 key_count = get_unaligned_le16(&cp->key_count); 918 key_count = get_unaligned_le16(&cp->key_count);
919 919
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); 920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len != len) { 921 if (expected_len > len) {
922 BT_ERR("load_keys: expected %u bytes, got %u bytes", 922 BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
923 len, expected_len); 923 expected_len, len);
924 return -EINVAL; 924 return -EINVAL;
925 } 925 }
926 926
@@ -931,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
932 key_count); 932 key_count);
933 933
934 hci_dev_lock(hdev); 934 hci_dev_lock_bh(hdev);
935 935
936 hci_link_keys_clear(hdev); 936 hci_link_keys_clear(hdev);
937 937
@@ -942,17 +942,36 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
942 else 942 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944 944
945 for (i = 0; i < key_count; i++) { 945 len -= sizeof(*cp);
946 struct mgmt_key_info *key = &cp->keys[i]; 946 i = 0;
947
948 while (i < len) {
949 struct mgmt_key_info *key = (void *) cp->keys + i;
950
951 i += sizeof(*key) + key->dlen;
952
953 if (key->type == HCI_LK_SMP_LTK) {
954 struct key_master_id *id = (void *) key->data;
955
956 if (key->dlen != sizeof(struct key_master_id))
957 continue;
958
959 hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
960 id->ediv, id->rand, key->val);
961
962 continue;
963 }
947 964
948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 965 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
949 key->pin_len); 966 key->pin_len);
950 } 967 }
951 968
952 hci_dev_unlock(hdev); 969 err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
970
971 hci_dev_unlock_bh(hdev);
953 hci_dev_put(hdev); 972 hci_dev_put(hdev);
954 973
955 return 0; 974 return err;
956} 975}
957 976
958static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 977static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -971,7 +990,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
971 if (!hdev) 990 if (!hdev)
972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 991 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
973 992
974 hci_dev_lock(hdev); 993 hci_dev_lock_bh(hdev);
975 994
976 err = hci_remove_link_key(hdev, &cp->bdaddr); 995 err = hci_remove_link_key(hdev, &cp->bdaddr);
977 if (err < 0) { 996 if (err < 0) {
@@ -990,11 +1009,11 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
990 1009
991 put_unaligned_le16(conn->handle, &dc.handle); 1010 put_unaligned_le16(conn->handle, &dc.handle);
992 dc.reason = 0x13; /* Remote User Terminated Connection */ 1011 dc.reason = 0x13; /* Remote User Terminated Connection */
993 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL); 1012 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
994 } 1013 }
995 1014
996unlock: 1015unlock:
997 hci_dev_unlock(hdev); 1016 hci_dev_unlock_bh(hdev);
998 hci_dev_put(hdev); 1017 hci_dev_put(hdev);
999 1018
1000 return err; 1019 return err;
@@ -1020,7 +1039,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1020 if (!hdev) 1039 if (!hdev)
1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 1040 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
1022 1041
1023 hci_dev_lock(hdev); 1042 hci_dev_lock_bh(hdev);
1024 1043
1025 if (!test_bit(HCI_UP, &hdev->flags)) { 1044 if (!test_bit(HCI_UP, &hdev->flags)) {
1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 1045 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -1055,7 +1074,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1055 mgmt_pending_remove(cmd); 1074 mgmt_pending_remove(cmd);
1056 1075
1057failed: 1076failed:
1058 hci_dev_unlock(hdev); 1077 hci_dev_unlock_bh(hdev);
1059 hci_dev_put(hdev); 1078 hci_dev_put(hdev);
1060 1079
1061 return err; 1080 return err;
@@ -1076,7 +1095,7 @@ static int get_connections(struct sock *sk, u16 index)
1076 if (!hdev) 1095 if (!hdev)
1077 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 1096 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
1078 1097
1079 hci_dev_lock(hdev); 1098 hci_dev_lock_bh(hdev);
1080 1099
1081 count = 0; 1100 count = 0;
1082 list_for_each(p, &hdev->conn_hash.list) { 1101 list_for_each(p, &hdev->conn_hash.list) {
@@ -1092,8 +1111,6 @@ static int get_connections(struct sock *sk, u16 index)
1092 1111
1093 put_unaligned_le16(count, &rp->conn_count); 1112 put_unaligned_le16(count, &rp->conn_count);
1094 1113
1095 read_lock(&hci_dev_list_lock);
1096
1097 i = 0; 1114 i = 0;
1098 list_for_each(p, &hdev->conn_hash.list) { 1115 list_for_each(p, &hdev->conn_hash.list) {
1099 struct hci_conn *c = list_entry(p, struct hci_conn, list); 1116 struct hci_conn *c = list_entry(p, struct hci_conn, list);
@@ -1101,22 +1118,41 @@ static int get_connections(struct sock *sk, u16 index)
1101 bacpy(&rp->conn[i++], &c->dst); 1118 bacpy(&rp->conn[i++], &c->dst);
1102 } 1119 }
1103 1120
1104 read_unlock(&hci_dev_list_lock);
1105
1106 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 1121 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
1107 1122
1108unlock: 1123unlock:
1109 kfree(rp); 1124 kfree(rp);
1110 hci_dev_unlock(hdev); 1125 hci_dev_unlock_bh(hdev);
1111 hci_dev_put(hdev); 1126 hci_dev_put(hdev);
1112 return err; 1127 return err;
1113} 1128}
1114 1129
1130static int send_pin_code_neg_reply(struct sock *sk, u16 index,
1131 struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
1132{
1133 struct pending_cmd *cmd;
1134 int err;
1135
1136 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
1137 sizeof(*cp));
1138 if (!cmd)
1139 return -ENOMEM;
1140
1141 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1142 &cp->bdaddr);
1143 if (err < 0)
1144 mgmt_pending_remove(cmd);
1145
1146 return err;
1147}
1148
1115static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, 1149static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1116 u16 len) 1150 u16 len)
1117{ 1151{
1118 struct hci_dev *hdev; 1152 struct hci_dev *hdev;
1153 struct hci_conn *conn;
1119 struct mgmt_cp_pin_code_reply *cp; 1154 struct mgmt_cp_pin_code_reply *cp;
1155 struct mgmt_cp_pin_code_neg_reply ncp;
1120 struct hci_cp_pin_code_reply reply; 1156 struct hci_cp_pin_code_reply reply;
1121 struct pending_cmd *cmd; 1157 struct pending_cmd *cmd;
1122 int err; 1158 int err;
@@ -1132,13 +1168,32 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1132 if (!hdev) 1168 if (!hdev)
1133 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 1169 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
1134 1170
1135 hci_dev_lock(hdev); 1171 hci_dev_lock_bh(hdev);
1136 1172
1137 if (!test_bit(HCI_UP, &hdev->flags)) { 1173 if (!test_bit(HCI_UP, &hdev->flags)) {
1138 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 1174 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
1139 goto failed; 1175 goto failed;
1140 } 1176 }
1141 1177
1178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1179 if (!conn) {
1180 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
1181 goto failed;
1182 }
1183
1184 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1185 bacpy(&ncp.bdaddr, &cp->bdaddr);
1186
1187 BT_ERR("PIN code is not 16 bytes long");
1188
1189 err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
1190 if (err >= 0)
1191 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1192 EINVAL);
1193
1194 goto failed;
1195 }
1196
1142 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); 1197 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
1143 if (!cmd) { 1198 if (!cmd) {
1144 err = -ENOMEM; 1199 err = -ENOMEM;
@@ -1147,14 +1202,14 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1147 1202
1148 bacpy(&reply.bdaddr, &cp->bdaddr); 1203 bacpy(&reply.bdaddr, &cp->bdaddr);
1149 reply.pin_len = cp->pin_len; 1204 reply.pin_len = cp->pin_len;
1150 memcpy(reply.pin_code, cp->pin_code, 16); 1205 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1151 1206
1152 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); 1207 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1153 if (err < 0) 1208 if (err < 0)
1154 mgmt_pending_remove(cmd); 1209 mgmt_pending_remove(cmd);
1155 1210
1156failed: 1211failed:
1157 hci_dev_unlock(hdev); 1212 hci_dev_unlock_bh(hdev);
1158 hci_dev_put(hdev); 1213 hci_dev_put(hdev);
1159 1214
1160 return err; 1215 return err;
@@ -1165,7 +1220,6 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1165{ 1220{
1166 struct hci_dev *hdev; 1221 struct hci_dev *hdev;
1167 struct mgmt_cp_pin_code_neg_reply *cp; 1222 struct mgmt_cp_pin_code_neg_reply *cp;
1168 struct pending_cmd *cmd;
1169 int err; 1223 int err;
1170 1224
1171 BT_DBG(""); 1225 BT_DBG("");
@@ -1181,7 +1235,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1181 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1235 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1182 ENODEV); 1236 ENODEV);
1183 1237
1184 hci_dev_lock(hdev); 1238 hci_dev_lock_bh(hdev);
1185 1239
1186 if (!test_bit(HCI_UP, &hdev->flags)) { 1240 if (!test_bit(HCI_UP, &hdev->flags)) {
1187 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1241 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1189,20 +1243,10 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1189 goto failed; 1243 goto failed;
1190 } 1244 }
1191 1245
1192 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, 1246 err = send_pin_code_neg_reply(sk, index, hdev, cp);
1193 data, len);
1194 if (!cmd) {
1195 err = -ENOMEM;
1196 goto failed;
1197 }
1198
1199 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1200 &cp->bdaddr);
1201 if (err < 0)
1202 mgmt_pending_remove(cmd);
1203 1247
1204failed: 1248failed:
1205 hci_dev_unlock(hdev); 1249 hci_dev_unlock_bh(hdev);
1206 hci_dev_put(hdev); 1250 hci_dev_put(hdev);
1207 1251
1208 return err; 1252 return err;
@@ -1225,14 +1269,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1225 if (!hdev) 1269 if (!hdev)
1226 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1270 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1227 1271
1228 hci_dev_lock(hdev); 1272 hci_dev_lock_bh(hdev);
1229 1273
1230 hdev->io_capability = cp->io_capability; 1274 hdev->io_capability = cp->io_capability;
1231 1275
1232 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1276 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1233 hdev->io_capability); 1277 hdev->io_capability);
1234 1278
1235 hci_dev_unlock(hdev); 1279 hci_dev_unlock_bh(hdev);
1236 hci_dev_put(hdev); 1280 hci_dev_put(hdev);
1237 1281
1238 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1282 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1318,7 +1362,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1318 if (!hdev) 1362 if (!hdev)
1319 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1363 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1320 1364
1321 hci_dev_lock(hdev); 1365 hci_dev_lock_bh(hdev);
1322 1366
1323 if (cp->io_cap == 0x03) { 1367 if (cp->io_cap == 0x03) {
1324 sec_level = BT_SECURITY_MEDIUM; 1368 sec_level = BT_SECURITY_MEDIUM;
@@ -1328,7 +1372,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1328 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1372 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1329 } 1373 }
1330 1374
1331 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); 1375 conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type);
1332 if (IS_ERR(conn)) { 1376 if (IS_ERR(conn)) {
1333 err = PTR_ERR(conn); 1377 err = PTR_ERR(conn);
1334 goto unlock; 1378 goto unlock;
@@ -1360,7 +1404,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1360 err = 0; 1404 err = 0;
1361 1405
1362unlock: 1406unlock:
1363 hci_dev_unlock(hdev); 1407 hci_dev_unlock_bh(hdev);
1364 hci_dev_put(hdev); 1408 hci_dev_put(hdev);
1365 1409
1366 return err; 1410 return err;
@@ -1392,7 +1436,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1392 if (!hdev) 1436 if (!hdev)
1393 return cmd_status(sk, index, mgmt_op, ENODEV); 1437 return cmd_status(sk, index, mgmt_op, ENODEV);
1394 1438
1395 hci_dev_lock(hdev); 1439 hci_dev_lock_bh(hdev);
1396 1440
1397 if (!test_bit(HCI_UP, &hdev->flags)) { 1441 if (!test_bit(HCI_UP, &hdev->flags)) {
1398 err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1442 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1410,7 +1454,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1410 mgmt_pending_remove(cmd); 1454 mgmt_pending_remove(cmd);
1411 1455
1412failed: 1456failed:
1413 hci_dev_unlock(hdev); 1457 hci_dev_unlock_bh(hdev);
1414 hci_dev_put(hdev); 1458 hci_dev_put(hdev);
1415 1459
1416 return err; 1460 return err;
@@ -1434,7 +1478,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1434 if (!hdev) 1478 if (!hdev)
1435 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); 1479 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1436 1480
1437 hci_dev_lock(hdev); 1481 hci_dev_lock_bh(hdev);
1438 1482
1439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); 1483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1440 if (!cmd) { 1484 if (!cmd) {
@@ -1449,7 +1493,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1449 mgmt_pending_remove(cmd); 1493 mgmt_pending_remove(cmd);
1450 1494
1451failed: 1495failed:
1452 hci_dev_unlock(hdev); 1496 hci_dev_unlock_bh(hdev);
1453 hci_dev_put(hdev); 1497 hci_dev_put(hdev);
1454 1498
1455 return err; 1499 return err;
@@ -1468,7 +1512,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1468 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1512 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1469 ENODEV); 1513 ENODEV);
1470 1514
1471 hci_dev_lock(hdev); 1515 hci_dev_lock_bh(hdev);
1472 1516
1473 if (!test_bit(HCI_UP, &hdev->flags)) { 1517 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1518 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
@@ -1498,7 +1542,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1498 mgmt_pending_remove(cmd); 1542 mgmt_pending_remove(cmd);
1499 1543
1500unlock: 1544unlock:
1501 hci_dev_unlock(hdev); 1545 hci_dev_unlock_bh(hdev);
1502 hci_dev_put(hdev); 1546 hci_dev_put(hdev);
1503 1547
1504 return err; 1548 return err;
@@ -1522,7 +1566,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1522 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, 1566 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1523 ENODEV); 1567 ENODEV);
1524 1568
1525 hci_dev_lock(hdev); 1569 hci_dev_lock_bh(hdev);
1526 1570
1527 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, 1571 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1528 cp->randomizer); 1572 cp->randomizer);
@@ -1532,7 +1576,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1532 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 1576 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1533 0); 1577 0);
1534 1578
1535 hci_dev_unlock(hdev); 1579 hci_dev_unlock_bh(hdev);
1536 hci_dev_put(hdev); 1580 hci_dev_put(hdev);
1537 1581
1538 return err; 1582 return err;
@@ -1556,7 +1600,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1556 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1600 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1557 ENODEV); 1601 ENODEV);
1558 1602
1559 hci_dev_lock(hdev); 1603 hci_dev_lock_bh(hdev);
1560 1604
1561 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); 1605 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1562 if (err < 0) 1606 if (err < 0)
@@ -1566,7 +1610,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1566 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1610 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1567 NULL, 0); 1611 NULL, 0);
1568 1612
1569 hci_dev_unlock(hdev); 1613 hci_dev_unlock_bh(hdev);
1570 hci_dev_put(hdev); 1614 hci_dev_put(hdev);
1571 1615
1572 return err; 1616 return err;
@@ -1641,6 +1685,70 @@ failed:
1641 return err; 1685 return err;
1642} 1686}
1643 1687
1688static int block_device(struct sock *sk, u16 index, unsigned char *data,
1689 u16 len)
1690{
1691 struct hci_dev *hdev;
1692 struct mgmt_cp_block_device *cp;
1693 int err;
1694
1695 BT_DBG("hci%u", index);
1696
1697 cp = (void *) data;
1698
1699 if (len != sizeof(*cp))
1700 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1701 EINVAL);
1702
1703 hdev = hci_dev_get(index);
1704 if (!hdev)
1705 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1706 ENODEV);
1707
1708 err = hci_blacklist_add(hdev, &cp->bdaddr);
1709
1710 if (err < 0)
1711 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
1712 else
1713 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1714 NULL, 0);
1715 hci_dev_put(hdev);
1716
1717 return err;
1718}
1719
1720static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len)
1722{
1723 struct hci_dev *hdev;
1724 struct mgmt_cp_unblock_device *cp;
1725 int err;
1726
1727 BT_DBG("hci%u", index);
1728
1729 cp = (void *) data;
1730
1731 if (len != sizeof(*cp))
1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1733 EINVAL);
1734
1735 hdev = hci_dev_get(index);
1736 if (!hdev)
1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1738 ENODEV);
1739
1740 err = hci_blacklist_del(hdev, &cp->bdaddr);
1741
1742 if (err < 0)
1743 err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
1744 else
1745 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1746 NULL, 0);
1747 hci_dev_put(hdev);
1748
1749 return err;
1750}
1751
1644int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1752int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1645{ 1753{
1646 unsigned char *buf; 1754 unsigned char *buf;
@@ -1755,6 +1863,12 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1755 case MGMT_OP_STOP_DISCOVERY: 1863 case MGMT_OP_STOP_DISCOVERY:
1756 err = stop_discovery(sk, index); 1864 err = stop_discovery(sk, index);
1757 break; 1865 break;
1866 case MGMT_OP_BLOCK_DEVICE:
1867 err = block_device(sk, index, buf + sizeof(*hdr), len);
1868 break;
1869 case MGMT_OP_UNBLOCK_DEVICE:
1870 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
1871 break;
1758 default: 1872 default:
1759 BT_DBG("Unknown op %u", opcode); 1873 BT_DBG("Unknown op %u", opcode);
1760 err = cmd_status(sk, index, opcode, 0x01); 1874 err = cmd_status(sk, index, opcode, 0x01);
@@ -1863,17 +1977,28 @@ int mgmt_connectable(u16 index, u8 connectable)
1863 1977
1864int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) 1978int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1865{ 1979{
1866 struct mgmt_ev_new_key ev; 1980 struct mgmt_ev_new_key *ev;
1981 int err, total;
1867 1982
1868 memset(&ev, 0, sizeof(ev)); 1983 total = sizeof(struct mgmt_ev_new_key) + key->dlen;
1984 ev = kzalloc(total, GFP_ATOMIC);
1985 if (!ev)
1986 return -ENOMEM;
1869 1987
1870 ev.store_hint = persistent; 1988 bacpy(&ev->key.bdaddr, &key->bdaddr);
1871 bacpy(&ev.key.bdaddr, &key->bdaddr); 1989 ev->key.type = key->type;
1872 ev.key.type = key->type; 1990 memcpy(ev->key.val, key->val, 16);
1873 memcpy(ev.key.val, key->val, 16); 1991 ev->key.pin_len = key->pin_len;
1874 ev.key.pin_len = key->pin_len; 1992 ev->key.dlen = key->dlen;
1993 ev->store_hint = persistent;
1875 1994
1876 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1995 memcpy(ev->key.data, key->data, key->dlen);
1996
1997 err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
1998
1999 kfree(ev);
2000
2001 return err;
1877} 2002}
1878 2003
1879int mgmt_connected(u16 index, bdaddr_t *bdaddr) 2004int mgmt_connected(u16 index, bdaddr_t *bdaddr)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5759bb7054f..c2486a53714 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
62#define rfcomm_lock() mutex_lock(&rfcomm_mutex) 62#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) 63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
64 64
65static unsigned long rfcomm_event;
66 65
67static LIST_HEAD(session_list); 66static LIST_HEAD(session_list);
68 67
@@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void)
120{ 119{
121 if (!rfcomm_thread) 120 if (!rfcomm_thread)
122 return; 121 return;
123 set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
124 wake_up_process(rfcomm_thread); 122 wake_up_process(rfcomm_thread);
125} 123}
126 124
@@ -466,7 +464,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
466 464
467 switch (d->state) { 465 switch (d->state) {
468 case BT_CONNECT: 466 case BT_CONNECT:
469 case BT_CONFIG:
470 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 467 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
471 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 468 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
472 rfcomm_schedule(); 469 rfcomm_schedule();
@@ -2038,19 +2035,18 @@ static int rfcomm_run(void *unused)
2038 2035
2039 rfcomm_add_listener(BDADDR_ANY); 2036 rfcomm_add_listener(BDADDR_ANY);
2040 2037
2041 while (!kthread_should_stop()) { 2038 while (1) {
2042 set_current_state(TASK_INTERRUPTIBLE); 2039 set_current_state(TASK_INTERRUPTIBLE);
2043 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { 2040
2044 /* No pending events. Let's sleep. 2041 if (kthread_should_stop())
2045 * Incoming connections and data will wake us up. */ 2042 break;
2046 schedule();
2047 }
2048 set_current_state(TASK_RUNNING);
2049 2043
2050 /* Process stuff */ 2044 /* Process stuff */
2051 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
2052 rfcomm_process_sessions(); 2045 rfcomm_process_sessions();
2046
2047 schedule();
2053 } 2048 }
2049 __set_current_state(TASK_RUNNING);
2054 2050
2055 rfcomm_kill_listener(); 2051 rfcomm_kill_listener();
2056 2052
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1b10727ce52..b02f0d47ab8 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
485 485
486 lock_sock(sk); 486 lock_sock(sk);
487 487
488 if (sk->sk_state != BT_LISTEN) {
489 err = -EBADFD;
490 goto done;
491 }
492
493 if (sk->sk_type != SOCK_STREAM) { 488 if (sk->sk_type != SOCK_STREAM) {
494 err = -EINVAL; 489 err = -EINVAL;
495 goto done; 490 goto done;
@@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
501 496
502 /* Wait for an incoming connection. (wake-one). */ 497 /* Wait for an incoming connection. (wake-one). */
503 add_wait_queue_exclusive(sk_sleep(sk), &wait); 498 add_wait_queue_exclusive(sk_sleep(sk), &wait);
504 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 499 while (1) {
505 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
506 if (!timeo) { 501
507 err = -EAGAIN; 502 if (sk->sk_state != BT_LISTEN) {
503 err = -EBADFD;
508 break; 504 break;
509 } 505 }
510 506
511 release_sock(sk); 507 nsk = bt_accept_dequeue(sk, newsock);
512 timeo = schedule_timeout(timeo); 508 if (nsk)
513 lock_sock(sk); 509 break;
514 510
515 if (sk->sk_state != BT_LISTEN) { 511 if (!timeo) {
516 err = -EBADFD; 512 err = -EAGAIN;
517 break; 513 break;
518 } 514 }
519 515
@@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
521 err = sock_intr_errno(timeo); 517 err = sock_intr_errno(timeo);
522 break; 518 break;
523 } 519 }
520
521 release_sock(sk);
522 timeo = schedule_timeout(timeo);
523 lock_sock(sk);
524 } 524 }
525 set_current_state(TASK_RUNNING); 525 __set_current_state(TASK_RUNNING);
526 remove_wait_queue(sk_sleep(sk), &wait); 526 remove_wait_queue(sk_sleep(sk), &wait);
527 527
528 if (err) 528 if (err)
@@ -679,7 +679,8 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
679{ 679{
680 struct sock *sk = sock->sk; 680 struct sock *sk = sock->sk;
681 struct bt_security sec; 681 struct bt_security sec;
682 int len, err = 0; 682 int err = 0;
683 size_t len;
683 u32 opt; 684 u32 opt;
684 685
685 BT_DBG("sk %p", sk); 686 BT_DBG("sk %p", sk);
@@ -741,7 +742,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
741static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 742static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
742{ 743{
743 struct sock *sk = sock->sk; 744 struct sock *sk = sock->sk;
744 struct sock *l2cap_sk;
745 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
747 int len, err = 0; 747 int len, err = 0;
@@ -786,7 +786,6 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
786 break; 786 break;
787 } 787 }
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790 789
791 memset(&cinfo, 0, sizeof(cinfo)); 790 memset(&cinfo, 0, sizeof(cinfo));
792 cinfo.hci_handle = conn->hcon->handle; 791 cinfo.hci_handle = conn->hcon->handle;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cb4fb7837e5..d3d48b5b542 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -177,6 +177,7 @@ static int sco_connect(struct sock *sk)
177{ 177{
178 bdaddr_t *src = &bt_sk(sk)->src; 178 bdaddr_t *src = &bt_sk(sk)->src;
179 bdaddr_t *dst = &bt_sk(sk)->dst; 179 bdaddr_t *dst = &bt_sk(sk)->dst;
180 __u16 pkt_type = sco_pi(sk)->pkt_type;
180 struct sco_conn *conn; 181 struct sco_conn *conn;
181 struct hci_conn *hcon; 182 struct hci_conn *hcon;
182 struct hci_dev *hdev; 183 struct hci_dev *hdev;
@@ -192,10 +193,12 @@ static int sco_connect(struct sock *sk)
192 193
193 if (lmp_esco_capable(hdev) && !disable_esco) 194 if (lmp_esco_capable(hdev) && !disable_esco)
194 type = ESCO_LINK; 195 type = ESCO_LINK;
195 else 196 else {
196 type = SCO_LINK; 197 type = SCO_LINK;
198 pkt_type &= SCO_ESCO_MASK;
199 }
197 200
198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 201 hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
199 if (IS_ERR(hcon)) { 202 if (IS_ERR(hcon)) {
200 err = PTR_ERR(hcon); 203 err = PTR_ERR(hcon);
201 goto done; 204 goto done;
@@ -460,18 +463,22 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
460 return 0; 463 return 0;
461} 464}
462 465
463static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 466static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
464{ 467{
465 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; 468 struct sockaddr_sco sa;
466 struct sock *sk = sock->sk; 469 struct sock *sk = sock->sk;
467 bdaddr_t *src = &sa->sco_bdaddr; 470 bdaddr_t *src = &sa.sco_bdaddr;
468 int err = 0; 471 int len, err = 0;
469 472
470 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); 473 BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
471 474
472 if (!addr || addr->sa_family != AF_BLUETOOTH) 475 if (!addr || addr->sa_family != AF_BLUETOOTH)
473 return -EINVAL; 476 return -EINVAL;
474 477
478 memset(&sa, 0, sizeof(sa));
479 len = min_t(unsigned int, sizeof(sa), alen);
480 memcpy(&sa, addr, len);
481
475 lock_sock(sk); 482 lock_sock(sk);
476 483
477 if (sk->sk_state != BT_OPEN) { 484 if (sk->sk_state != BT_OPEN) {
@@ -485,7 +492,8 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
485 err = -EADDRINUSE; 492 err = -EADDRINUSE;
486 } else { 493 } else {
487 /* Save source address */ 494 /* Save source address */
488 bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); 495 bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
496 sco_pi(sk)->pkt_type = sa.sco_pkt_type;
489 sk->sk_state = BT_BOUND; 497 sk->sk_state = BT_BOUND;
490 } 498 }
491 499
@@ -498,27 +506,34 @@ done:
498 506
499static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 507static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
500{ 508{
501 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
502 struct sock *sk = sock->sk; 509 struct sock *sk = sock->sk;
503 int err = 0; 510 struct sockaddr_sco sa;
504 511 int len, err = 0;
505 512
506 BT_DBG("sk %p", sk); 513 BT_DBG("sk %p", sk);
507 514
508 if (alen < sizeof(struct sockaddr_sco) || 515 if (!addr || addr->sa_family != AF_BLUETOOTH)
509 addr->sa_family != AF_BLUETOOTH)
510 return -EINVAL; 516 return -EINVAL;
511 517
512 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) 518 memset(&sa, 0, sizeof(sa));
513 return -EBADFD; 519 len = min_t(unsigned int, sizeof(sa), alen);
514 520 memcpy(&sa, addr, len);
515 if (sk->sk_type != SOCK_SEQPACKET)
516 return -EINVAL;
517 521
518 lock_sock(sk); 522 lock_sock(sk);
519 523
524 if (sk->sk_type != SOCK_SEQPACKET) {
525 err = -EINVAL;
526 goto done;
527 }
528
529 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
530 err = -EBADFD;
531 goto done;
532 }
533
520 /* Set destination address and psm */ 534 /* Set destination address and psm */
521 bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); 535 bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
536 sco_pi(sk)->pkt_type = sa.sco_pkt_type;
522 537
523 err = sco_connect(sk); 538 err = sco_connect(sk);
524 if (err) 539 if (err)
@@ -564,30 +579,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
564 579
565 lock_sock(sk); 580 lock_sock(sk);
566 581
567 if (sk->sk_state != BT_LISTEN) {
568 err = -EBADFD;
569 goto done;
570 }
571
572 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 582 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
573 583
574 BT_DBG("sk %p timeo %ld", sk, timeo); 584 BT_DBG("sk %p timeo %ld", sk, timeo);
575 585
576 /* Wait for an incoming connection. (wake-one). */ 586 /* Wait for an incoming connection. (wake-one). */
577 add_wait_queue_exclusive(sk_sleep(sk), &wait); 587 add_wait_queue_exclusive(sk_sleep(sk), &wait);
578 while (!(ch = bt_accept_dequeue(sk, newsock))) { 588 while (1) {
579 set_current_state(TASK_INTERRUPTIBLE); 589 set_current_state(TASK_INTERRUPTIBLE);
580 if (!timeo) { 590
581 err = -EAGAIN; 591 if (sk->sk_state != BT_LISTEN) {
592 err = -EBADFD;
582 break; 593 break;
583 } 594 }
584 595
585 release_sock(sk); 596 ch = bt_accept_dequeue(sk, newsock);
586 timeo = schedule_timeout(timeo); 597 if (ch)
587 lock_sock(sk); 598 break;
588 599
589 if (sk->sk_state != BT_LISTEN) { 600 if (!timeo) {
590 err = -EBADFD; 601 err = -EAGAIN;
591 break; 602 break;
592 } 603 }
593 604
@@ -595,8 +606,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
595 err = sock_intr_errno(timeo); 606 err = sock_intr_errno(timeo);
596 break; 607 break;
597 } 608 }
609
610 release_sock(sk);
611 timeo = schedule_timeout(timeo);
612 lock_sock(sk);
598 } 613 }
599 set_current_state(TASK_RUNNING); 614 __set_current_state(TASK_RUNNING);
600 remove_wait_queue(sk_sleep(sk), &wait); 615 remove_wait_queue(sk_sleep(sk), &wait);
601 616
602 if (err) 617 if (err)
@@ -625,6 +640,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
625 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); 640 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
626 else 641 else
627 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); 642 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
643 sa->sco_pkt_type = sco_pi(sk)->pkt_type;
628 644
629 return 0; 645 return 0;
630} 646}
@@ -932,7 +948,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
932 if (conn) 948 if (conn)
933 sco_conn_ready(conn); 949 sco_conn_ready(conn);
934 } else 950 } else
935 sco_conn_del(hcon, bt_err(status)); 951 sco_conn_del(hcon, bt_to_errno(status));
936 952
937 return 0; 953 return 0;
938} 954}
@@ -944,7 +960,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
944 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 960 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
945 return -EINVAL; 961 return -EINVAL;
946 962
947 sco_conn_del(hcon, bt_err(reason)); 963 sco_conn_del(hcon, bt_to_errno(reason));
948 964
949 return 0; 965 return 0;
950} 966}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
new file mode 100644
index 00000000000..391888b88a9
--- /dev/null
+++ b/net/bluetooth/smp.c
@@ -0,0 +1,702 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/smp.h>
27#include <linux/crypto.h>
28#include <linux/scatterlist.h>
29#include <crypto/b128ops.h>
30
31#define SMP_TIMEOUT 30000 /* 30 seconds */
32
33static inline void swap128(u8 src[16], u8 dst[16])
34{
35 int i;
36 for (i = 0; i < 16; i++)
37 dst[15 - i] = src[i];
38}
39
40static inline void swap56(u8 src[7], u8 dst[7])
41{
42 int i;
43 for (i = 0; i < 7; i++)
44 dst[6 - i] = src[i];
45}
46
47static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
48{
49 struct blkcipher_desc desc;
50 struct scatterlist sg;
51 int err, iv_len;
52 unsigned char iv[128];
53
54 if (tfm == NULL) {
55 BT_ERR("tfm %p", tfm);
56 return -EINVAL;
57 }
58
59 desc.tfm = tfm;
60 desc.flags = 0;
61
62 err = crypto_blkcipher_setkey(tfm, k, 16);
63 if (err) {
64 BT_ERR("cipher setkey failed: %d", err);
65 return err;
66 }
67
68 sg_init_one(&sg, r, 16);
69
70 iv_len = crypto_blkcipher_ivsize(tfm);
71 if (iv_len) {
72 memset(&iv, 0xff, iv_len);
73 crypto_blkcipher_set_iv(tfm, iv, iv_len);
74 }
75
76 err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
77 if (err)
78 BT_ERR("Encrypt data error %d", err);
79
80 return err;
81}
82
83static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
84 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
85 u8 _rat, bdaddr_t *ra, u8 res[16])
86{
87 u8 p1[16], p2[16];
88 int err;
89
90 memset(p1, 0, 16);
91
92 /* p1 = pres || preq || _rat || _iat */
93 swap56(pres, p1);
94 swap56(preq, p1 + 7);
95 p1[14] = _rat;
96 p1[15] = _iat;
97
98 memset(p2, 0, 16);
99
100 /* p2 = padding || ia || ra */
101 baswap((bdaddr_t *) (p2 + 4), ia);
102 baswap((bdaddr_t *) (p2 + 10), ra);
103
104 /* res = r XOR p1 */
105 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
106
107 /* res = e(k, res) */
108 err = smp_e(tfm, k, res);
109 if (err) {
110 BT_ERR("Encrypt data error");
111 return err;
112 }
113
114 /* res = res XOR p2 */
115 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
116
117 /* res = e(k, res) */
118 err = smp_e(tfm, k, res);
119 if (err)
120 BT_ERR("Encrypt data error");
121
122 return err;
123}
124
125static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
126 u8 r1[16], u8 r2[16], u8 _r[16])
127{
128 int err;
129
130 /* Just least significant octets from r1 and r2 are considered */
131 memcpy(_r, r1 + 8, 8);
132 memcpy(_r + 8, r2 + 8, 8);
133
134 err = smp_e(tfm, k, _r);
135 if (err)
136 BT_ERR("Encrypt data error");
137
138 return err;
139}
140
141static int smp_rand(u8 *buf)
142{
143 get_random_bytes(buf, 16);
144
145 return 0;
146}
147
148static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
149 u16 dlen, void *data)
150{
151 struct sk_buff *skb;
152 struct l2cap_hdr *lh;
153 int len;
154
155 len = L2CAP_HDR_SIZE + sizeof(code) + dlen;
156
157 if (len > conn->mtu)
158 return NULL;
159
160 skb = bt_skb_alloc(len, GFP_ATOMIC);
161 if (!skb)
162 return NULL;
163
164 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
165 lh->len = cpu_to_le16(sizeof(code) + dlen);
166 lh->cid = cpu_to_le16(L2CAP_CID_SMP);
167
168 memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
169
170 memcpy(skb_put(skb, dlen), data, dlen);
171
172 return skb;
173}
174
175static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
176{
177 struct sk_buff *skb = smp_build_cmd(conn, code, len, data);
178
179 BT_DBG("code 0x%2.2x", code);
180
181 if (!skb)
182 return;
183
184 hci_send_acl(conn->hcon, skb, 0);
185}
186
187static __u8 seclevel_to_authreq(__u8 level)
188{
189 switch (level) {
190 case BT_SECURITY_HIGH:
191 /* Right now we don't support bonding */
192 return SMP_AUTH_MITM;
193
194 default:
195 return SMP_AUTH_NONE;
196 }
197}
198
199static void build_pairing_cmd(struct l2cap_conn *conn,
200 struct smp_cmd_pairing *req,
201 struct smp_cmd_pairing *rsp,
202 __u8 authreq)
203{
204 u8 dist_keys;
205
206 dist_keys = 0;
207 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
208 dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
209 authreq |= SMP_AUTH_BONDING;
210 }
211
212 if (rsp == NULL) {
213 req->io_capability = conn->hcon->io_capability;
214 req->oob_flag = SMP_OOB_NOT_PRESENT;
215 req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
216 req->init_key_dist = dist_keys;
217 req->resp_key_dist = dist_keys;
218 req->auth_req = authreq;
219 return;
220 }
221
222 rsp->io_capability = conn->hcon->io_capability;
223 rsp->oob_flag = SMP_OOB_NOT_PRESENT;
224 rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
225 rsp->init_key_dist = req->init_key_dist & dist_keys;
226 rsp->resp_key_dist = req->resp_key_dist & dist_keys;
227 rsp->auth_req = authreq;
228}
229
230static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
231{
232 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
233 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
234 return SMP_ENC_KEY_SIZE;
235
236 conn->smp_key_size = max_key_size;
237
238 return 0;
239}
240
241static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
242{
243 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
244 u8 key_size;
245
246 BT_DBG("conn %p", conn);
247
248 conn->preq[0] = SMP_CMD_PAIRING_REQ;
249 memcpy(&conn->preq[1], req, sizeof(*req));
250 skb_pull(skb, sizeof(*req));
251
252 if (req->oob_flag)
253 return SMP_OOB_NOT_AVAIL;
254
255 /* We didn't start the pairing, so no requirements */
256 build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE);
257
258 key_size = min(req->max_key_size, rsp.max_key_size);
259 if (check_enc_key_size(conn, key_size))
260 return SMP_ENC_KEY_SIZE;
261
262 /* Just works */
263 memset(conn->tk, 0, sizeof(conn->tk));
264
265 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
266 memcpy(&conn->prsp[1], &rsp, sizeof(rsp));
267
268 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
269
270 mod_timer(&conn->security_timer, jiffies +
271 msecs_to_jiffies(SMP_TIMEOUT));
272
273 return 0;
274}
275
276static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
277{
278 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
279 struct smp_cmd_pairing_confirm cp;
280 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
281 int ret;
282 u8 res[16], key_size;
283
284 BT_DBG("conn %p", conn);
285
286 skb_pull(skb, sizeof(*rsp));
287
288 req = (void *) &conn->preq[1];
289
290 key_size = min(req->max_key_size, rsp->max_key_size);
291 if (check_enc_key_size(conn, key_size))
292 return SMP_ENC_KEY_SIZE;
293
294 if (rsp->oob_flag)
295 return SMP_OOB_NOT_AVAIL;
296
297 /* Just works */
298 memset(conn->tk, 0, sizeof(conn->tk));
299
300 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
301 memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
302
303 ret = smp_rand(conn->prnd);
304 if (ret)
305 return SMP_UNSPECIFIED;
306
307 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0,
308 conn->src, conn->hcon->dst_type, conn->dst, res);
309 if (ret)
310 return SMP_UNSPECIFIED;
311
312 swap128(res, cp.confirm_val);
313
314 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
315
316 return 0;
317}
318
319static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
320{
321 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
322
323 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
324
325 memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf));
326 skb_pull(skb, sizeof(conn->pcnf));
327
328 if (conn->hcon->out) {
329 u8 random[16];
330
331 swap128(conn->prnd, random);
332 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
333 random);
334 } else {
335 struct smp_cmd_pairing_confirm cp;
336 int ret;
337 u8 res[16];
338
339 ret = smp_rand(conn->prnd);
340 if (ret)
341 return SMP_UNSPECIFIED;
342
343 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
344 conn->hcon->dst_type, conn->dst,
345 0, conn->src, res);
346 if (ret)
347 return SMP_CONFIRM_FAILED;
348
349 swap128(res, cp.confirm_val);
350
351 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
352 }
353
354 mod_timer(&conn->security_timer, jiffies +
355 msecs_to_jiffies(SMP_TIMEOUT));
356
357 return 0;
358}
359
360static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
361{
362 struct hci_conn *hcon = conn->hcon;
363 struct crypto_blkcipher *tfm = hcon->hdev->tfm;
364 int ret;
365 u8 key[16], res[16], random[16], confirm[16];
366
367 swap128(skb->data, random);
368 skb_pull(skb, sizeof(random));
369
370 if (conn->hcon->out)
371 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
372 conn->src, conn->hcon->dst_type, conn->dst,
373 res);
374 else
375 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
376 conn->hcon->dst_type, conn->dst, 0, conn->src,
377 res);
378 if (ret)
379 return SMP_UNSPECIFIED;
380
381 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
382
383 swap128(res, confirm);
384
385 if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
386 BT_ERR("Pairing failed (confirmation values mismatch)");
387 return SMP_CONFIRM_FAILED;
388 }
389
390 if (conn->hcon->out) {
391 u8 stk[16], rand[8];
392 __le16 ediv;
393
394 memset(rand, 0, sizeof(rand));
395 ediv = 0;
396
397 smp_s1(tfm, conn->tk, random, conn->prnd, key);
398 swap128(key, stk);
399
400 memset(stk + conn->smp_key_size, 0,
401 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
402
403 hci_le_start_enc(hcon, ediv, rand, stk);
404 hcon->enc_key_size = conn->smp_key_size;
405 } else {
406 u8 stk[16], r[16], rand[8];
407 __le16 ediv;
408
409 memset(rand, 0, sizeof(rand));
410 ediv = 0;
411
412 swap128(conn->prnd, r);
413 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
414
415 smp_s1(tfm, conn->tk, conn->prnd, random, key);
416 swap128(key, stk);
417
418 memset(stk + conn->smp_key_size, 0,
419 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
420
421 hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size,
422 ediv, rand, stk);
423 }
424
425 return 0;
426}
427
428static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
429{
430 struct smp_cmd_security_req *rp = (void *) skb->data;
431 struct smp_cmd_pairing cp;
432 struct hci_conn *hcon = conn->hcon;
433
434 BT_DBG("conn %p", conn);
435
436 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
437 return 0;
438
439 skb_pull(skb, sizeof(*rp));
440
441 memset(&cp, 0, sizeof(cp));
442 build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
443
444 conn->preq[0] = SMP_CMD_PAIRING_REQ;
445 memcpy(&conn->preq[1], &cp, sizeof(cp));
446
447 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
448
449 mod_timer(&conn->security_timer, jiffies +
450 msecs_to_jiffies(SMP_TIMEOUT));
451
452 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
453
454 return 0;
455}
456
457int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
458{
459 struct hci_conn *hcon = conn->hcon;
460 __u8 authreq;
461
462 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
463
464 if (!lmp_host_le_capable(hcon->hdev))
465 return 1;
466
467 if (IS_ERR(hcon->hdev->tfm))
468 return 1;
469
470 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
471 return 0;
472
473 if (sec_level == BT_SECURITY_LOW)
474 return 1;
475
476 if (hcon->sec_level >= sec_level)
477 return 1;
478
479 authreq = seclevel_to_authreq(sec_level);
480
481 if (hcon->link_mode & HCI_LM_MASTER) {
482 struct smp_cmd_pairing cp;
483 struct link_key *key;
484
485 key = hci_find_link_key_type(hcon->hdev, conn->dst,
486 HCI_LK_SMP_LTK);
487 if (key) {
488 struct key_master_id *master = (void *) key->data;
489
490 hci_le_start_enc(hcon, master->ediv, master->rand,
491 key->val);
492 hcon->enc_key_size = key->pin_len;
493
494 goto done;
495 }
496
497 build_pairing_cmd(conn, &cp, NULL, authreq);
498 conn->preq[0] = SMP_CMD_PAIRING_REQ;
499 memcpy(&conn->preq[1], &cp, sizeof(cp));
500
501 mod_timer(&conn->security_timer, jiffies +
502 msecs_to_jiffies(SMP_TIMEOUT));
503
504 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
505 } else {
506 struct smp_cmd_security_req cp;
507 cp.auth_req = authreq;
508 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
509 }
510
511done:
512 hcon->pending_sec_level = sec_level;
513 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
514
515 return 0;
516}
517
518static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
519{
520 struct smp_cmd_encrypt_info *rp = (void *) skb->data;
521
522 skb_pull(skb, sizeof(*rp));
523
524 memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
525
526 return 0;
527}
528
529static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
530{
531 struct smp_cmd_master_ident *rp = (void *) skb->data;
532
533 skb_pull(skb, sizeof(*rp));
534
535 hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
536 rp->ediv, rp->rand, conn->tk);
537
538 smp_distribute_keys(conn, 1);
539
540 return 0;
541}
542
543int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
544{
545 __u8 code = skb->data[0];
546 __u8 reason;
547 int err = 0;
548
549 if (!lmp_host_le_capable(conn->hcon->hdev)) {
550 err = -ENOTSUPP;
551 reason = SMP_PAIRING_NOTSUPP;
552 goto done;
553 }
554
555 if (IS_ERR(conn->hcon->hdev->tfm)) {
556 err = PTR_ERR(conn->hcon->hdev->tfm);
557 reason = SMP_PAIRING_NOTSUPP;
558 goto done;
559 }
560
561 skb_pull(skb, sizeof(code));
562
563 switch (code) {
564 case SMP_CMD_PAIRING_REQ:
565 reason = smp_cmd_pairing_req(conn, skb);
566 break;
567
568 case SMP_CMD_PAIRING_FAIL:
569 reason = 0;
570 err = -EPERM;
571 break;
572
573 case SMP_CMD_PAIRING_RSP:
574 reason = smp_cmd_pairing_rsp(conn, skb);
575 break;
576
577 case SMP_CMD_SECURITY_REQ:
578 reason = smp_cmd_security_req(conn, skb);
579 break;
580
581 case SMP_CMD_PAIRING_CONFIRM:
582 reason = smp_cmd_pairing_confirm(conn, skb);
583 break;
584
585 case SMP_CMD_PAIRING_RANDOM:
586 reason = smp_cmd_pairing_random(conn, skb);
587 break;
588
589 case SMP_CMD_ENCRYPT_INFO:
590 reason = smp_cmd_encrypt_info(conn, skb);
591 break;
592
593 case SMP_CMD_MASTER_IDENT:
594 reason = smp_cmd_master_ident(conn, skb);
595 break;
596
597 case SMP_CMD_IDENT_INFO:
598 case SMP_CMD_IDENT_ADDR_INFO:
599 case SMP_CMD_SIGN_INFO:
600 /* Just ignored */
601 reason = 0;
602 break;
603
604 default:
605 BT_DBG("Unknown command code 0x%2.2x", code);
606
607 reason = SMP_CMD_NOTSUPP;
608 err = -EOPNOTSUPP;
609 goto done;
610 }
611
612done:
613 if (reason)
614 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
615 &reason);
616
617 kfree_skb(skb);
618 return err;
619}
620
621int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
622{
623 struct smp_cmd_pairing *req, *rsp;
624 __u8 *keydist;
625
626 BT_DBG("conn %p force %d", conn, force);
627
628 if (IS_ERR(conn->hcon->hdev->tfm))
629 return PTR_ERR(conn->hcon->hdev->tfm);
630
631 rsp = (void *) &conn->prsp[1];
632
633 /* The responder sends its keys first */
634 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
635 return 0;
636
637 req = (void *) &conn->preq[1];
638
639 if (conn->hcon->out) {
640 keydist = &rsp->init_key_dist;
641 *keydist &= req->init_key_dist;
642 } else {
643 keydist = &rsp->resp_key_dist;
644 *keydist &= req->resp_key_dist;
645 }
646
647
648 BT_DBG("keydist 0x%x", *keydist);
649
650 if (*keydist & SMP_DIST_ENC_KEY) {
651 struct smp_cmd_encrypt_info enc;
652 struct smp_cmd_master_ident ident;
653 __le16 ediv;
654
655 get_random_bytes(enc.ltk, sizeof(enc.ltk));
656 get_random_bytes(&ediv, sizeof(ediv));
657 get_random_bytes(ident.rand, sizeof(ident.rand));
658
659 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
660
661 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size,
662 ediv, ident.rand, enc.ltk);
663
664 ident.ediv = cpu_to_le16(ediv);
665
666 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
667
668 *keydist &= ~SMP_DIST_ENC_KEY;
669 }
670
671 if (*keydist & SMP_DIST_ID_KEY) {
672 struct smp_cmd_ident_addr_info addrinfo;
673 struct smp_cmd_ident_info idinfo;
674
675 /* Send a dummy key */
676 get_random_bytes(idinfo.irk, sizeof(idinfo.irk));
677
678 smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
679
680 /* Just public address */
681 memset(&addrinfo, 0, sizeof(addrinfo));
682 bacpy(&addrinfo.bdaddr, conn->src);
683
684 smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
685 &addrinfo);
686
687 *keydist &= ~SMP_DIST_ID_KEY;
688 }
689
690 if (*keydist & SMP_DIST_SIGN) {
691 struct smp_cmd_sign_info sign;
692
693 /* Send a dummy key */
694 get_random_bytes(sign.csrk, sizeof(sign.csrk));
695
696 smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
697
698 *keydist &= ~SMP_DIST_SIGN;
699 }
700
701 return 0;
702}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 32b8f9f7f79..dac6a214746 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -38,16 +38,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
38 } 38 }
39#endif 39#endif
40 40
41 u64_stats_update_begin(&brstats->syncp);
42 brstats->tx_packets++;
43 brstats->tx_bytes += skb->len;
44 u64_stats_update_end(&brstats->syncp);
45
46 BR_INPUT_SKB_CB(skb)->brdev = dev; 41 BR_INPUT_SKB_CB(skb)->brdev = dev;
47 42
48 skb_reset_mac_header(skb); 43 skb_reset_mac_header(skb);
49 skb_pull(skb, ETH_HLEN); 44 skb_pull(skb, ETH_HLEN);
50 45
46 u64_stats_update_begin(&brstats->syncp);
47 brstats->tx_packets++;
48 /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
49 brstats->tx_bytes += skb->len;
50 u64_stats_update_end(&brstats->syncp);
51
51 rcu_read_lock(); 52 rcu_read_lock();
52 if (is_broadcast_ether_addr(dest)) 53 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb); 54 br_flood_deliver(br, skb);
@@ -91,7 +92,6 @@ static int br_dev_open(struct net_device *dev)
91{ 92{
92 struct net_bridge *br = netdev_priv(dev); 93 struct net_bridge *br = netdev_priv(dev);
93 94
94 netif_carrier_off(dev);
95 netdev_update_features(dev); 95 netdev_update_features(dev);
96 netif_start_queue(dev); 96 netif_start_queue(dev);
97 br_stp_enable_bridge(br); 97 br_stp_enable_bridge(br);
@@ -108,8 +108,6 @@ static int br_dev_stop(struct net_device *dev)
108{ 108{
109 struct net_bridge *br = netdev_priv(dev); 109 struct net_bridge *br = netdev_priv(dev);
110 110
111 netif_carrier_off(dev);
112
113 br_stp_disable_bridge(br); 111 br_stp_disable_bridge(br);
114 br_multicast_stop(br); 112 br_multicast_stop(br);
115 113
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1bacca4cb67..eae6a4e9cbf 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p)
161 call_rcu(&p->rcu, destroy_nbp_rcu); 161 call_rcu(&p->rcu, destroy_nbp_rcu);
162} 162}
163 163
164/* called with RTNL */ 164/* Delete bridge device */
165static void del_br(struct net_bridge *br, struct list_head *head) 165void br_dev_delete(struct net_device *dev, struct list_head *head)
166{ 166{
167 struct net_bridge *br = netdev_priv(dev);
167 struct net_bridge_port *p, *n; 168 struct net_bridge_port *p, *n;
168 169
169 list_for_each_entry_safe(p, n, &br->port_list, list) { 170 list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -231,6 +232,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
231int br_add_bridge(struct net *net, const char *name) 232int br_add_bridge(struct net *net, const char *name)
232{ 233{
233 struct net_device *dev; 234 struct net_device *dev;
235 int res;
234 236
235 dev = alloc_netdev(sizeof(struct net_bridge), name, 237 dev = alloc_netdev(sizeof(struct net_bridge), name,
236 br_dev_setup); 238 br_dev_setup);
@@ -239,8 +241,12 @@ int br_add_bridge(struct net *net, const char *name)
239 return -ENOMEM; 241 return -ENOMEM;
240 242
241 dev_net_set(dev, net); 243 dev_net_set(dev, net);
244 dev->rtnl_link_ops = &br_link_ops;
242 245
243 return register_netdev(dev); 246 res = register_netdev(dev);
247 if (res)
248 free_netdev(dev);
249 return res;
244} 250}
245 251
246int br_del_bridge(struct net *net, const char *name) 252int br_del_bridge(struct net *net, const char *name)
@@ -264,7 +270,7 @@ int br_del_bridge(struct net *net, const char *name)
264 } 270 }
265 271
266 else 272 else
267 del_br(netdev_priv(dev), NULL); 273 br_dev_delete(dev, NULL);
268 274
269 rtnl_unlock(); 275 rtnl_unlock();
270 return ret; 276 return ret;
@@ -441,7 +447,7 @@ void __net_exit br_net_exit(struct net *net)
441 rtnl_lock(); 447 rtnl_lock();
442 for_each_netdev(net, dev) 448 for_each_netdev(net, dev)
443 if (dev->priv_flags & IFF_EBRIDGE) 449 if (dev->priv_flags & IFF_EBRIDGE)
444 del_br(netdev_priv(dev), &list); 450 br_dev_delete(dev, &list);
445 451
446 unregister_netdevice_many(&list); 452 unregister_netdevice_many(&list);
447 rtnl_unlock(); 453 rtnl_unlock();
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2d85ca7111d..e78269d798c 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data)
241 hlist_del_rcu(&mp->hlist[mdb->ver]); 241 hlist_del_rcu(&mp->hlist[mdb->ver]);
242 mdb->size--; 242 mdb->size--;
243 243
244 del_timer(&mp->query_timer);
245 call_rcu_bh(&mp->rcu, br_multicast_free_group); 244 call_rcu_bh(&mp->rcu, br_multicast_free_group);
246 245
247out: 246out:
@@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
271 rcu_assign_pointer(*pp, p->next); 270 rcu_assign_pointer(*pp, p->next);
272 hlist_del_init(&p->mglist); 271 hlist_del_init(&p->mglist);
273 del_timer(&p->timer); 272 del_timer(&p->timer);
274 del_timer(&p->query_timer);
275 call_rcu_bh(&p->rcu, br_multicast_free_pg); 273 call_rcu_bh(&p->rcu, br_multicast_free_pg);
276 274
277 if (!mp->ports && !mp->mglist && 275 if (!mp->ports && !mp->mglist &&
@@ -446,8 +444,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
446 ip6h->nexthdr = IPPROTO_HOPOPTS; 444 ip6h->nexthdr = IPPROTO_HOPOPTS;
447 ip6h->hop_limit = 1; 445 ip6h->hop_limit = 1;
448 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 446 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
449 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 447 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
450 &ip6h->saddr); 448 &ip6h->saddr)) {
449 kfree_skb(skb);
450 return NULL;
451 }
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 452 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
452 453
453 hopopt = (u8 *)(ip6h + 1); 454 hopopt = (u8 *)(ip6h + 1);
@@ -504,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
504 return NULL; 505 return NULL;
505} 506}
506 507
507static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
508{
509 struct net_bridge *br = mp->br;
510 struct sk_buff *skb;
511
512 skb = br_multicast_alloc_query(br, &mp->addr);
513 if (!skb)
514 goto timer;
515
516 netif_rx(skb);
517
518timer:
519 if (++mp->queries_sent < br->multicast_last_member_count)
520 mod_timer(&mp->query_timer,
521 jiffies + br->multicast_last_member_interval);
522}
523
524static void br_multicast_group_query_expired(unsigned long data)
525{
526 struct net_bridge_mdb_entry *mp = (void *)data;
527 struct net_bridge *br = mp->br;
528
529 spin_lock(&br->multicast_lock);
530 if (!netif_running(br->dev) || !mp->mglist ||
531 mp->queries_sent >= br->multicast_last_member_count)
532 goto out;
533
534 br_multicast_send_group_query(mp);
535
536out:
537 spin_unlock(&br->multicast_lock);
538}
539
540static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
541{
542 struct net_bridge_port *port = pg->port;
543 struct net_bridge *br = port->br;
544 struct sk_buff *skb;
545
546 skb = br_multicast_alloc_query(br, &pg->addr);
547 if (!skb)
548 goto timer;
549
550 br_deliver(port, skb);
551
552timer:
553 if (++pg->queries_sent < br->multicast_last_member_count)
554 mod_timer(&pg->query_timer,
555 jiffies + br->multicast_last_member_interval);
556}
557
558static void br_multicast_port_group_query_expired(unsigned long data)
559{
560 struct net_bridge_port_group *pg = (void *)data;
561 struct net_bridge_port *port = pg->port;
562 struct net_bridge *br = port->br;
563
564 spin_lock(&br->multicast_lock);
565 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
566 pg->queries_sent >= br->multicast_last_member_count)
567 goto out;
568
569 br_multicast_send_port_group_query(pg);
570
571out:
572 spin_unlock(&br->multicast_lock);
573}
574
575static struct net_bridge_mdb_entry *br_multicast_get_group( 508static struct net_bridge_mdb_entry *br_multicast_get_group(
576 struct net_bridge *br, struct net_bridge_port *port, 509 struct net_bridge *br, struct net_bridge_port *port,
577 struct br_ip *group, int hash) 510 struct br_ip *group, int hash)
@@ -687,8 +620,6 @@ rehash:
687 mp->addr = *group; 620 mp->addr = *group;
688 setup_timer(&mp->timer, br_multicast_group_expired, 621 setup_timer(&mp->timer, br_multicast_group_expired,
689 (unsigned long)mp); 622 (unsigned long)mp);
690 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
691 (unsigned long)mp);
692 623
693 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 624 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
694 mdb->size++; 625 mdb->size++;
@@ -743,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br,
743 hlist_add_head(&p->mglist, &port->mglist); 674 hlist_add_head(&p->mglist, &port->mglist);
744 setup_timer(&p->timer, br_multicast_port_group_expired, 675 setup_timer(&p->timer, br_multicast_port_group_expired,
745 (unsigned long)p); 676 (unsigned long)p);
746 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
747 (unsigned long)p);
748 677
749 rcu_assign_pointer(*pp, p); 678 rcu_assign_pointer(*pp, p);
750 679
@@ -1288,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1288 time_after(mp->timer.expires, time) : 1217 time_after(mp->timer.expires, time) :
1289 try_to_del_timer_sync(&mp->timer) >= 0)) { 1218 try_to_del_timer_sync(&mp->timer) >= 0)) {
1290 mod_timer(&mp->timer, time); 1219 mod_timer(&mp->timer, time);
1291
1292 mp->queries_sent = 0;
1293 mod_timer(&mp->query_timer, now);
1294 } 1220 }
1295 1221
1296 goto out; 1222 goto out;
@@ -1307,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1307 time_after(p->timer.expires, time) : 1233 time_after(p->timer.expires, time) :
1308 try_to_del_timer_sync(&p->timer) >= 0)) { 1234 try_to_del_timer_sync(&p->timer) >= 0)) {
1309 mod_timer(&p->timer, time); 1235 mod_timer(&p->timer, time);
1310
1311 p->queries_sent = 0;
1312 mod_timer(&p->query_timer, now);
1313 } 1236 }
1314 1237
1315 break; 1238 break;
@@ -1456,7 +1379,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1456{ 1379{
1457 struct sk_buff *skb2; 1380 struct sk_buff *skb2;
1458 const struct ipv6hdr *ip6h; 1381 const struct ipv6hdr *ip6h;
1459 struct icmp6hdr *icmp6h; 1382 u8 icmp6_type;
1460 u8 nexthdr; 1383 u8 nexthdr;
1461 unsigned len; 1384 unsigned len;
1462 int offset; 1385 int offset;
@@ -1502,9 +1425,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1502 __skb_pull(skb2, offset); 1425 __skb_pull(skb2, offset);
1503 skb_reset_transport_header(skb2); 1426 skb_reset_transport_header(skb2);
1504 1427
1505 icmp6h = icmp6_hdr(skb2); 1428 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1506 1429
1507 switch (icmp6h->icmp6_type) { 1430 switch (icmp6_type) {
1508 case ICMPV6_MGM_QUERY: 1431 case ICMPV6_MGM_QUERY:
1509 case ICMPV6_MGM_REPORT: 1432 case ICMPV6_MGM_REPORT:
1510 case ICMPV6_MGM_REDUCTION: 1433 case ICMPV6_MGM_REDUCTION:
@@ -1520,16 +1443,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1520 err = pskb_trim_rcsum(skb2, len); 1443 err = pskb_trim_rcsum(skb2, len);
1521 if (err) 1444 if (err)
1522 goto out; 1445 goto out;
1446 err = -EINVAL;
1523 } 1447 }
1524 1448
1449 ip6h = ipv6_hdr(skb2);
1450
1525 switch (skb2->ip_summed) { 1451 switch (skb2->ip_summed) {
1526 case CHECKSUM_COMPLETE: 1452 case CHECKSUM_COMPLETE:
1527 if (!csum_fold(skb2->csum)) 1453 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1454 IPPROTO_ICMPV6, skb2->csum))
1528 break; 1455 break;
1529 /*FALLTHROUGH*/ 1456 /*FALLTHROUGH*/
1530 case CHECKSUM_NONE: 1457 case CHECKSUM_NONE:
1531 skb2->csum = 0; 1458 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1532 if (skb_checksum_complete(skb2)) 1459 &ip6h->daddr,
1460 skb2->len,
1461 IPPROTO_ICMPV6, 0));
1462 if (__skb_checksum_complete(skb2))
1533 goto out; 1463 goto out;
1534 } 1464 }
1535 1465
@@ -1537,7 +1467,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1537 1467
1538 BR_INPUT_SKB_CB(skb)->igmp = 1; 1468 BR_INPUT_SKB_CB(skb)->igmp = 1;
1539 1469
1540 switch (icmp6h->icmp6_type) { 1470 switch (icmp6_type) {
1541 case ICMPV6_MGM_REPORT: 1471 case ICMPV6_MGM_REPORT:
1542 { 1472 {
1543 struct mld_msg *mld; 1473 struct mld_msg *mld;
@@ -1668,7 +1598,6 @@ void br_multicast_stop(struct net_bridge *br)
1668 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1598 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1669 hlist[ver]) { 1599 hlist[ver]) {
1670 del_timer(&mp->timer); 1600 del_timer(&mp->timer);
1671 del_timer(&mp->query_timer);
1672 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1601 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1673 } 1602 }
1674 } 1603 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 56149ec36d7..3dc7f5446a9 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -343,24 +343,26 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
343static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) 343static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
344{ 344{
345 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 345 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
346 struct neighbour *neigh;
346 struct dst_entry *dst; 347 struct dst_entry *dst;
347 348
348 skb->dev = bridge_parent(skb->dev); 349 skb->dev = bridge_parent(skb->dev);
349 if (!skb->dev) 350 if (!skb->dev)
350 goto free_skb; 351 goto free_skb;
351 dst = skb_dst(skb); 352 dst = skb_dst(skb);
353 neigh = dst_get_neighbour(dst);
352 if (dst->hh) { 354 if (dst->hh) {
353 neigh_hh_bridge(dst->hh, skb); 355 neigh_hh_bridge(dst->hh, skb);
354 skb->dev = nf_bridge->physindev; 356 skb->dev = nf_bridge->physindev;
355 return br_handle_frame_finish(skb); 357 return br_handle_frame_finish(skb);
356 } else if (dst->neighbour) { 358 } else if (neigh) {
357 /* the neighbour function below overwrites the complete 359 /* the neighbour function below overwrites the complete
358 * MAC header, so we save the Ethernet source address and 360 * MAC header, so we save the Ethernet source address and
359 * protocol number. */ 361 * protocol number. */
360 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); 362 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
361 /* tell br_dev_xmit to continue with forwarding */ 363 /* tell br_dev_xmit to continue with forwarding */
362 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 364 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
363 return dst->neighbour->output(skb); 365 return neigh->output(skb);
364 } 366 }
365free_skb: 367free_skb:
366 kfree_skb(skb); 368 kfree_skb(skb);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index ffb0dc4cc0e..71861a9c400 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -203,11 +203,12 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
203 return 0; 203 return 0;
204} 204}
205 205
206static struct rtnl_link_ops br_link_ops __read_mostly = { 206struct rtnl_link_ops br_link_ops __read_mostly = {
207 .kind = "bridge", 207 .kind = "bridge",
208 .priv_size = sizeof(struct net_bridge), 208 .priv_size = sizeof(struct net_bridge),
209 .setup = br_dev_setup, 209 .setup = br_dev_setup,
210 .validate = br_validate, 210 .validate = br_validate,
211 .dellink = br_dev_delete,
211}; 212};
212 213
213int __init br_netlink_init(void) 214int __init br_netlink_init(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 54578f274d8..7c1f3a09712 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -77,9 +77,7 @@ struct net_bridge_port_group {
77 struct hlist_node mglist; 77 struct hlist_node mglist;
78 struct rcu_head rcu; 78 struct rcu_head rcu;
79 struct timer_list timer; 79 struct timer_list timer;
80 struct timer_list query_timer;
81 struct br_ip addr; 80 struct br_ip addr;
82 u32 queries_sent;
83}; 81};
84 82
85struct net_bridge_mdb_entry 83struct net_bridge_mdb_entry
@@ -89,10 +87,8 @@ struct net_bridge_mdb_entry
89 struct net_bridge_port_group __rcu *ports; 87 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 88 struct rcu_head rcu;
91 struct timer_list timer; 89 struct timer_list timer;
92 struct timer_list query_timer;
93 struct br_ip addr; 90 struct br_ip addr;
94 bool mglist; 91 bool mglist;
95 u32 queries_sent;
96}; 92};
97 93
98struct net_bridge_mdb_htable 94struct net_bridge_mdb_htable
@@ -124,6 +120,7 @@ struct net_bridge_port
124 bridge_id designated_bridge; 120 bridge_id designated_bridge;
125 u32 path_cost; 121 u32 path_cost;
126 u32 designated_cost; 122 u32 designated_cost;
123 unsigned long designated_age;
127 124
128 struct timer_list forward_delay_timer; 125 struct timer_list forward_delay_timer;
129 struct timer_list hold_timer; 126 struct timer_list hold_timer;
@@ -293,6 +290,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
293 290
294/* br_device.c */ 291/* br_device.c */
295extern void br_dev_setup(struct net_device *dev); 292extern void br_dev_setup(struct net_device *dev);
293extern void br_dev_delete(struct net_device *dev, struct list_head *list);
296extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 294extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
297 struct net_device *dev); 295 struct net_device *dev);
298#ifdef CONFIG_NET_POLL_CONTROLLER 296#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -531,6 +529,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
531#endif 529#endif
532 530
533/* br_netlink.c */ 531/* br_netlink.c */
532extern struct rtnl_link_ops br_link_ops;
534extern int br_netlink_init(void); 533extern int br_netlink_init(void);
535extern void br_netlink_fini(void); 534extern void br_netlink_fini(void);
536extern void br_ifinfo_notify(int event, struct net_bridge_port *port); 535extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index bb4383e84de..fcff6225154 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -164,8 +164,7 @@ void br_transmit_config(struct net_bridge_port *p)
164 else { 164 else {
165 struct net_bridge_port *root 165 struct net_bridge_port *root
166 = br_get_port(br, br->root_port); 166 = br_get_port(br, br->root_port);
167 bpdu.message_age = br->max_age 167 bpdu.message_age = (jiffies - root->designated_age)
168 - (root->message_age_timer.expires - jiffies)
169 + MESSAGE_AGE_INCR; 168 + MESSAGE_AGE_INCR;
170 } 169 }
171 bpdu.max_age = br->max_age; 170 bpdu.max_age = br->max_age;
@@ -189,6 +188,7 @@ static inline void br_record_config_information(struct net_bridge_port *p,
189 p->designated_cost = bpdu->root_path_cost; 188 p->designated_cost = bpdu->root_path_cost;
190 p->designated_bridge = bpdu->bridge_id; 189 p->designated_bridge = bpdu->bridge_id;
191 p->designated_port = bpdu->port_id; 190 p->designated_port = bpdu->port_id;
191 p->designated_age = jiffies + bpdu->message_age;
192 192
193 mod_timer(&p->message_age_timer, jiffies 193 mod_timer(&p->message_age_timer, jiffies
194 + (p->br->max_age - bpdu->message_age)); 194 + (p->br->max_age - bpdu->message_age));
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 682c0fedf36..804e50f18a5 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -53,7 +53,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
53 struct caif_net *caifn; 53 struct caif_net *caifn;
54 BUG_ON(!net); 54 BUG_ON(!net);
55 caifn = net_generic(net, caif_net_id); 55 caifn = net_generic(net, caif_net_id);
56 BUG_ON(!caifn);
57 return caifn->cfg; 56 return caifn->cfg;
58} 57}
59EXPORT_SYMBOL(get_cfcnfg); 58EXPORT_SYMBOL(get_cfcnfg);
@@ -63,7 +62,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
63 struct caif_net *caifn; 62 struct caif_net *caifn;
64 BUG_ON(!net); 63 BUG_ON(!net);
65 caifn = net_generic(net, caif_net_id); 64 caifn = net_generic(net, caif_net_id);
66 BUG_ON(!caifn);
67 return &caifn->caifdevs; 65 return &caifn->caifdevs;
68} 66}
69 67
@@ -92,7 +90,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
92 struct caif_device_entry *caifd; 90 struct caif_device_entry *caifd;
93 91
94 caifdevs = caif_device_list(dev_net(dev)); 92 caifdevs = caif_device_list(dev_net(dev));
95 BUG_ON(!caifdevs);
96 93
97 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 94 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
98 if (!caifd) 95 if (!caifd)
@@ -108,7 +105,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
108 struct caif_device_entry_list *caifdevs = 105 struct caif_device_entry_list *caifdevs =
109 caif_device_list(dev_net(dev)); 106 caif_device_list(dev_net(dev));
110 struct caif_device_entry *caifd; 107 struct caif_device_entry *caifd;
111 BUG_ON(!caifdevs); 108
112 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 109 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
113 if (caifd->netdev == dev) 110 if (caifd->netdev == dev)
114 return caifd; 111 return caifd;
@@ -209,8 +206,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
209 enum cfcnfg_phy_preference pref; 206 enum cfcnfg_phy_preference pref;
210 enum cfcnfg_phy_type phy_type; 207 enum cfcnfg_phy_type phy_type;
211 struct cfcnfg *cfg; 208 struct cfcnfg *cfg;
212 struct caif_device_entry_list *caifdevs = 209 struct caif_device_entry_list *caifdevs;
213 caif_device_list(dev_net(dev));
214 210
215 if (dev->type != ARPHRD_CAIF) 211 if (dev->type != ARPHRD_CAIF)
216 return 0; 212 return 0;
@@ -219,6 +215,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
219 if (cfg == NULL) 215 if (cfg == NULL)
220 return 0; 216 return 0;
221 217
218 caifdevs = caif_device_list(dev_net(dev));
219
222 switch (what) { 220 switch (what) {
223 case NETDEV_REGISTER: 221 case NETDEV_REGISTER:
224 caifd = caif_device_alloc(dev); 222 caifd = caif_device_alloc(dev);
@@ -348,7 +346,7 @@ static struct notifier_block caif_device_notifier = {
348static int caif_init_net(struct net *net) 346static int caif_init_net(struct net *net)
349{ 347{
350 struct caif_net *caifn = net_generic(net, caif_net_id); 348 struct caif_net *caifn = net_generic(net, caif_net_id);
351 BUG_ON(!caifn); 349
352 INIT_LIST_HEAD(&caifn->caifdevs.list); 350 INIT_LIST_HEAD(&caifn->caifdevs.list);
353 mutex_init(&caifn->caifdevs.lock); 351 mutex_init(&caifn->caifdevs.lock);
354 352
@@ -413,7 +411,7 @@ static int __init caif_device_init(void)
413{ 411{
414 int result; 412 int result;
415 413
416 result = register_pernet_device(&caif_net_ops); 414 result = register_pernet_subsys(&caif_net_ops);
417 415
418 if (result) 416 if (result)
419 return result; 417 return result;
@@ -426,9 +424,9 @@ static int __init caif_device_init(void)
426 424
427static void __exit caif_device_exit(void) 425static void __exit caif_device_exit(void)
428{ 426{
429 unregister_pernet_device(&caif_net_ops);
430 unregister_netdevice_notifier(&caif_device_notifier); 427 unregister_netdevice_notifier(&caif_device_notifier);
431 dev_remove_pack(&caif_packet_type); 428 dev_remove_pack(&caif_packet_type);
429 unregister_pernet_subsys(&caif_net_ops);
432} 430}
433 431
434module_init(caif_device_init); 432module_init(caif_device_init);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 52fe33bee02..bca32d7c15c 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -313,7 +313,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
313 int err; 313 int err;
314 struct cfctrl_link_param param; 314 struct cfctrl_link_param param;
315 struct cfcnfg *cfg = get_cfcnfg(net); 315 struct cfcnfg *cfg = get_cfcnfg(net);
316 caif_assert(cfg != NULL);
317 316
318 rcu_read_lock(); 317 rcu_read_lock();
319 err = caif_connect_req_to_link_param(cfg, conn_req, &param); 318 err = caif_connect_req_to_link_param(cfg, conn_req, &param);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 184a6572b67..c6cc66f7286 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -343,6 +343,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
343 } 343 }
344} 344}
345 345
346static void bcm_tx_start_timer(struct bcm_op *op)
347{
348 if (op->kt_ival1.tv64 && op->count)
349 hrtimer_start(&op->timer,
350 ktime_add(ktime_get(), op->kt_ival1),
351 HRTIMER_MODE_ABS);
352 else if (op->kt_ival2.tv64)
353 hrtimer_start(&op->timer,
354 ktime_add(ktime_get(), op->kt_ival2),
355 HRTIMER_MODE_ABS);
356}
357
346static void bcm_tx_timeout_tsklet(unsigned long data) 358static void bcm_tx_timeout_tsklet(unsigned long data)
347{ 359{
348 struct bcm_op *op = (struct bcm_op *)data; 360 struct bcm_op *op = (struct bcm_op *)data;
@@ -364,26 +376,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
364 376
365 bcm_send_to_user(op, &msg_head, NULL, 0); 377 bcm_send_to_user(op, &msg_head, NULL, 0);
366 } 378 }
367 }
368
369 if (op->kt_ival1.tv64 && (op->count > 0)) {
370
371 /* send (next) frame */
372 bcm_can_tx(op); 379 bcm_can_tx(op);
373 hrtimer_start(&op->timer,
374 ktime_add(ktime_get(), op->kt_ival1),
375 HRTIMER_MODE_ABS);
376 380
377 } else { 381 } else if (op->kt_ival2.tv64)
378 if (op->kt_ival2.tv64) { 382 bcm_can_tx(op);
379 383
380 /* send (next) frame */ 384 bcm_tx_start_timer(op);
381 bcm_can_tx(op);
382 hrtimer_start(&op->timer,
383 ktime_add(ktime_get(), op->kt_ival2),
384 HRTIMER_MODE_ABS);
385 }
386 }
387} 385}
388 386
389/* 387/*
@@ -963,23 +961,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
963 hrtimer_cancel(&op->timer); 961 hrtimer_cancel(&op->timer);
964 } 962 }
965 963
966 if ((op->flags & STARTTIMER) && 964 if (op->flags & STARTTIMER) {
967 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 965 hrtimer_cancel(&op->timer);
968
969 /* spec: send can_frame when starting timer */ 966 /* spec: send can_frame when starting timer */
970 op->flags |= TX_ANNOUNCE; 967 op->flags |= TX_ANNOUNCE;
971
972 if (op->kt_ival1.tv64 && (op->count > 0)) {
973 /* op->count-- is done in bcm_tx_timeout_handler */
974 hrtimer_start(&op->timer, op->kt_ival1,
975 HRTIMER_MODE_REL);
976 } else
977 hrtimer_start(&op->timer, op->kt_ival2,
978 HRTIMER_MODE_REL);
979 } 968 }
980 969
981 if (op->flags & TX_ANNOUNCE) 970 if (op->flags & TX_ANNOUNCE) {
982 bcm_can_tx(op); 971 bcm_can_tx(op);
972 if (op->count)
973 op->count--;
974 }
975
976 if (op->flags & STARTTIMER)
977 bcm_tx_start_timer(op);
983 978
984 return msg_head->nframes * CFSIZ + MHSIZ; 979 return msg_head->nframes * CFSIZ + MHSIZ;
985} 980}
diff --git a/net/core/Makefile b/net/core/Makefile
index 8a04dd22cf7..0d357b1c4e5 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ 5obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
6 gen_stats.o gen_estimator.o net_namespace.o 6 gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
diff --git a/net/core/dev.c b/net/core/dev.c
index 9c58c1ec41a..8235b81a7db 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1163,6 +1163,7 @@ static int __dev_open(struct net_device *dev)
1163 net_dmaengine_get(); 1163 net_dmaengine_get();
1164 dev_set_rx_mode(dev); 1164 dev_set_rx_mode(dev);
1165 dev_activate(dev); 1165 dev_activate(dev);
1166 add_device_randomness(dev->dev_addr, dev->addr_len);
1166 } 1167 }
1167 1168
1168 return ret; 1169 return ret;
@@ -1406,14 +1407,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
1406 * register_netdevice_notifier(). The notifier is unlinked into the 1407 * register_netdevice_notifier(). The notifier is unlinked into the
1407 * kernel structures and may then be reused. A negative errno code 1408 * kernel structures and may then be reused. A negative errno code
1408 * is returned on a failure. 1409 * is returned on a failure.
1410 *
1411 * After unregistering unregister and down device events are synthesized
1412 * for all devices on the device list to the removed notifier to remove
1413 * the need for special case cleanup code.
1409 */ 1414 */
1410 1415
1411int unregister_netdevice_notifier(struct notifier_block *nb) 1416int unregister_netdevice_notifier(struct notifier_block *nb)
1412{ 1417{
1418 struct net_device *dev;
1419 struct net *net;
1413 int err; 1420 int err;
1414 1421
1415 rtnl_lock(); 1422 rtnl_lock();
1416 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1423 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1424 if (err)
1425 goto unlock;
1426
1427 for_each_net(net) {
1428 for_each_netdev(net, dev) {
1429 if (dev->flags & IFF_UP) {
1430 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1431 nb->notifier_call(nb, NETDEV_DOWN, dev);
1432 }
1433 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1434 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1435 }
1436 }
1437unlock:
1417 rtnl_unlock(); 1438 rtnl_unlock();
1418 return err; 1439 return err;
1419} 1440}
@@ -1513,10 +1534,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1513 kfree_skb(skb); 1534 kfree_skb(skb);
1514 return NET_RX_DROP; 1535 return NET_RX_DROP;
1515 } 1536 }
1516 skb_set_dev(skb, dev); 1537 skb->dev = dev;
1538 skb_dst_drop(skb);
1517 skb->tstamp.tv64 = 0; 1539 skb->tstamp.tv64 = 0;
1518 skb->pkt_type = PACKET_HOST; 1540 skb->pkt_type = PACKET_HOST;
1519 skb->protocol = eth_type_trans(skb, dev); 1541 skb->protocol = eth_type_trans(skb, dev);
1542 skb->mark = 0;
1543 secpath_reset(skb);
1544 nf_reset(skb);
1520 return netif_rx(skb); 1545 return netif_rx(skb);
1521} 1546}
1522EXPORT_SYMBOL_GPL(dev_forward_skb); 1547EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1771,36 +1796,6 @@ void netif_device_attach(struct net_device *dev)
1771} 1796}
1772EXPORT_SYMBOL(netif_device_attach); 1797EXPORT_SYMBOL(netif_device_attach);
1773 1798
1774/**
1775 * skb_dev_set -- assign a new device to a buffer
1776 * @skb: buffer for the new device
1777 * @dev: network device
1778 *
1779 * If an skb is owned by a device already, we have to reset
1780 * all data private to the namespace a device belongs to
1781 * before assigning it a new device.
1782 */
1783#ifdef CONFIG_NET_NS
1784void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1785{
1786 skb_dst_drop(skb);
1787 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1788 secpath_reset(skb);
1789 nf_reset(skb);
1790 skb_init_secmark(skb);
1791 skb->mark = 0;
1792 skb->priority = 0;
1793 skb->nf_trace = 0;
1794 skb->ipvs_property = 0;
1795#ifdef CONFIG_NET_SCHED
1796 skb->tc_index = 0;
1797#endif
1798 }
1799 skb->dev = dev;
1800}
1801EXPORT_SYMBOL(skb_set_dev);
1802#endif /* CONFIG_NET_NS */
1803
1804/* 1799/*
1805 * Invalidate hardware checksum when packet is to be mangled, and 1800 * Invalidate hardware checksum when packet is to be mangled, and
1806 * complete checksum manually on outgoing path. 1801 * complete checksum manually on outgoing path.
@@ -3434,14 +3429,20 @@ static inline gro_result_t
3434__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3429__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3435{ 3430{
3436 struct sk_buff *p; 3431 struct sk_buff *p;
3432 unsigned int maclen = skb->dev->hard_header_len;
3437 3433
3438 for (p = napi->gro_list; p; p = p->next) { 3434 for (p = napi->gro_list; p; p = p->next) {
3439 unsigned long diffs; 3435 unsigned long diffs;
3440 3436
3441 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3437 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3442 diffs |= p->vlan_tci ^ skb->vlan_tci; 3438 diffs |= p->vlan_tci ^ skb->vlan_tci;
3443 diffs |= compare_ether_header(skb_mac_header(p), 3439 if (maclen == ETH_HLEN)
3444 skb_gro_mac_header(skb)); 3440 diffs |= compare_ether_header(skb_mac_header(p),
3441 skb_gro_mac_header(skb));
3442 else if (!diffs)
3443 diffs = memcmp(skb_mac_header(p),
3444 skb_gro_mac_header(skb),
3445 maclen);
3445 NAPI_GRO_CB(p)->same_flow = !diffs; 3446 NAPI_GRO_CB(p)->same_flow = !diffs;
3446 NAPI_GRO_CB(p)->flush = 0; 3447 NAPI_GRO_CB(p)->flush = 0;
3447 } 3448 }
@@ -3498,7 +3499,8 @@ EXPORT_SYMBOL(napi_gro_receive);
3498static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3499static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3499{ 3500{
3500 __skb_pull(skb, skb_headlen(skb)); 3501 __skb_pull(skb, skb_headlen(skb));
3501 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3502 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3503 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3502 skb->vlan_tci = 0; 3504 skb->vlan_tci = 0;
3503 skb->dev = napi->dev; 3505 skb->dev = napi->dev;
3504 skb->skb_iif = 0; 3506 skb->skb_iif = 0;
@@ -4729,6 +4731,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4729 err = ops->ndo_set_mac_address(dev, sa); 4731 err = ops->ndo_set_mac_address(dev, sa);
4730 if (!err) 4732 if (!err)
4731 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4733 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4734 add_device_randomness(dev->dev_addr, dev->addr_len);
4732 return err; 4735 return err;
4733} 4736}
4734EXPORT_SYMBOL(dev_set_mac_address); 4737EXPORT_SYMBOL(dev_set_mac_address);
@@ -5506,6 +5509,7 @@ int register_netdevice(struct net_device *dev)
5506 dev_init_scheduler(dev); 5509 dev_init_scheduler(dev);
5507 dev_hold(dev); 5510 dev_hold(dev);
5508 list_netdevice(dev); 5511 list_netdevice(dev);
5512 add_device_randomness(dev->dev_addr, dev->addr_len);
5509 5513
5510 /* Notify protocols, that a new device appeared. */ 5514 /* Notify protocols, that a new device appeared. */
5511 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5515 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
@@ -6105,6 +6109,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6105 */ 6109 */
6106 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6110 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6107 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6111 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6112 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6108 6113
6109 /* 6114 /*
6110 * Flush the unicast and multicast chains 6115 * Flush the unicast and multicast chains
diff --git a/net/core/dst.c b/net/core/dst.c
index 6135f367169..8246d47a218 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->neighbour = NULL; 174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175 dst->hh = NULL; 175 dst->hh = NULL;
176#ifdef CONFIG_XFRM 176#ifdef CONFIG_XFRM
177 dst->xfrm = NULL; 177 dst->xfrm = NULL;
@@ -231,7 +231,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
231 smp_rmb(); 231 smp_rmb();
232 232
233again: 233again:
234 neigh = dst->neighbour; 234 neigh = rcu_dereference_protected(dst->_neighbour, 1);
235 hh = dst->hh; 235 hh = dst->hh;
236 child = dst->child; 236 child = dst->child;
237 237
@@ -240,7 +240,7 @@ again:
240 hh_cache_put(hh); 240 hh_cache_put(hh);
241 241
242 if (neigh) { 242 if (neigh) {
243 dst->neighbour = NULL; 243 RCU_INIT_POINTER(dst->_neighbour, NULL);
244 neigh_release(neigh); 244 neigh_release(neigh);
245 } 245 }
246 246
@@ -367,14 +367,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
367 if (!unregister) { 367 if (!unregister) {
368 dst->input = dst->output = dst_discard; 368 dst->input = dst->output = dst_discard;
369 } else { 369 } else {
370 struct neighbour *neigh;
371
370 dst->dev = dev_net(dst->dev)->loopback_dev; 372 dst->dev = dev_net(dst->dev)->loopback_dev;
371 dev_hold(dst->dev); 373 dev_hold(dst->dev);
372 dev_put(dev); 374 dev_put(dev);
373 if (dst->neighbour && dst->neighbour->dev == dev) { 375 rcu_read_lock();
374 dst->neighbour->dev = dst->dev; 376 neigh = dst_get_neighbour(dst);
377 if (neigh && neigh->dev == dev) {
378 neigh->dev = dst->dev;
375 dev_hold(dst->dev); 379 dev_hold(dst->dev);
376 dev_put(dev); 380 dev_put(dev);
377 } 381 }
382 rcu_read_unlock();
378 } 383 }
379} 384}
380 385
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index fd14116ad7f..891b19f2c00 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1227,7 +1227,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1227 regs.len = reglen; 1227 regs.len = reglen;
1228 1228
1229 regbuf = vzalloc(reglen); 1229 regbuf = vzalloc(reglen);
1230 if (!regbuf) 1230 if (reglen && !regbuf)
1231 return -ENOMEM; 1231 return -ENOMEM;
1232 1232
1233 ops->get_regs(dev, &regs, regbuf); 1233 ops->get_regs(dev, &regs, regbuf);
@@ -1236,7 +1236,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1236 if (copy_to_user(useraddr, &regs, sizeof(regs))) 1236 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1237 goto out; 1237 goto out;
1238 useraddr += offsetof(struct ethtool_regs, data); 1238 useraddr += offsetof(struct ethtool_regs, data);
1239 if (copy_to_user(useraddr, regbuf, regs.len)) 1239 if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
1240 goto out; 1240 goto out;
1241 ret = 0; 1241 ret = 0;
1242 1242
@@ -1964,6 +1964,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1964 case ETHTOOL_GRXCSUM: 1964 case ETHTOOL_GRXCSUM:
1965 case ETHTOOL_GTXCSUM: 1965 case ETHTOOL_GTXCSUM:
1966 case ETHTOOL_GSG: 1966 case ETHTOOL_GSG:
1967 case ETHTOOL_GSSET_INFO:
1967 case ETHTOOL_GSTRINGS: 1968 case ETHTOOL_GSTRINGS:
1968 case ETHTOOL_GTSO: 1969 case ETHTOOL_GTSO:
1969 case ETHTOOL_GPERMADDR: 1970 case ETHTOOL_GPERMADDR:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 008dc70b064..f39ef5c6084 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
384 */ 384 */
385 list_for_each_entry(r, &ops->rules_list, list) { 385 list_for_each_entry(r, &ops->rules_list, list) {
386 if (r->action == FR_ACT_GOTO && 386 if (r->action == FR_ACT_GOTO &&
387 r->target == rule->pref) { 387 r->target == rule->pref &&
388 BUG_ON(rtnl_dereference(r->ctarget) != NULL); 388 rtnl_dereference(r->ctarget) == NULL) {
389 rcu_assign_pointer(r->ctarget, rule); 389 rcu_assign_pointer(r->ctarget, rule);
390 if (--ops->unresolved_rules == 0) 390 if (--ops->unresolved_rules == 0)
391 break; 391 break;
diff --git a/net/core/flow.c b/net/core/flow.c
index 990703b8863..a6bda2a514f 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -172,29 +172,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
172 172
173static u32 flow_hash_code(struct flow_cache *fc, 173static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp, 174 struct flow_cache_percpu *fcp,
175 const struct flowi *key) 175 const struct flowi *key,
176 size_t keysize)
176{ 177{
177 const u32 *k = (const u32 *) key; 178 const u32 *k = (const u32 *) key;
179 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
178 180
179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 181 return jhash2(k, length, fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1); 182 & (flow_cache_hash_size(fc) - 1);
181} 183}
182 184
183typedef unsigned long flow_compare_t;
184
185/* I hear what you're saying, use memcmp. But memcmp cannot make 185/* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment and 186 * important assumptions that we can here, such as alignment.
187 * constant size.
188 */ 187 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 188static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
189 size_t keysize)
190{ 190{
191 const flow_compare_t *k1, *k1_lim, *k2; 191 const flow_compare_t *k1, *k1_lim, *k2;
192 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195 192
196 k1 = (const flow_compare_t *) key1; 193 k1 = (const flow_compare_t *) key1;
197 k1_lim = k1 + n_elem; 194 k1_lim = k1 + keysize;
198 195
199 k2 = (const flow_compare_t *) key2; 196 k2 = (const flow_compare_t *) key2;
200 197
@@ -215,6 +212,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
215 struct flow_cache_entry *fle, *tfle; 212 struct flow_cache_entry *fle, *tfle;
216 struct hlist_node *entry; 213 struct hlist_node *entry;
217 struct flow_cache_object *flo; 214 struct flow_cache_object *flo;
215 size_t keysize;
218 unsigned int hash; 216 unsigned int hash;
219 217
220 local_bh_disable(); 218 local_bh_disable();
@@ -222,6 +220,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
222 220
223 fle = NULL; 221 fle = NULL;
224 flo = NULL; 222 flo = NULL;
223
224 keysize = flow_key_size(family);
225 if (!keysize)
226 goto nocache;
227
225 /* Packet really early in init? Making flow_cache_init a 228 /* Packet really early in init? Making flow_cache_init a
226 * pre-smp initcall would solve this. --RR */ 229 * pre-smp initcall would solve this. --RR */
227 if (!fcp->hash_table) 230 if (!fcp->hash_table)
@@ -230,11 +233,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
230 if (fcp->hash_rnd_recalc) 233 if (fcp->hash_rnd_recalc)
231 flow_new_hash_rnd(fc, fcp); 234 flow_new_hash_rnd(fc, fcp);
232 235
233 hash = flow_hash_code(fc, fcp, key); 236 hash = flow_hash_code(fc, fcp, key, keysize);
234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 237 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235 if (tfle->family == family && 238 if (tfle->family == family &&
236 tfle->dir == dir && 239 tfle->dir == dir &&
237 flow_key_compare(key, &tfle->key) == 0) { 240 flow_key_compare(key, &tfle->key, keysize) == 0) {
238 fle = tfle; 241 fle = tfle;
239 break; 242 break;
240 } 243 }
@@ -248,7 +251,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
248 if (fle) { 251 if (fle) {
249 fle->family = family; 252 fle->family = family;
250 fle->dir = dir; 253 fle->dir = dir;
251 memcpy(&fle->key, key, sizeof(*key)); 254 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 fle->object = NULL; 255 fle->object = NULL;
253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 256 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 fcp->hash_count++; 257 fcp->hash_count++;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index a7b34213186..357bd4ee4ba 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -126,7 +126,7 @@ static void linkwatch_schedule_work(int urgent)
126 return; 126 return;
127 127
128 /* It's already running which is good enough. */ 128 /* It's already running which is good enough. */
129 if (!cancel_delayed_work(&linkwatch_work)) 129 if (!__cancel_delayed_work(&linkwatch_work))
130 return; 130 return;
131 131
132 /* Otherwise we reschedule it again for immediate execution. */ 132 /* Otherwise we reschedule it again for immediate execution. */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 799f06e03a2..96bb0a33f86 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -823,6 +823,8 @@ next_elt:
823 write_unlock_bh(&tbl->lock); 823 write_unlock_bh(&tbl->lock);
824 cond_resched(); 824 cond_resched();
825 write_lock_bh(&tbl->lock); 825 write_lock_bh(&tbl->lock);
826 nht = rcu_dereference_protected(tbl->nht,
827 lockdep_is_held(&tbl->lock));
826 } 828 }
827 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 829 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
828 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 830 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
@@ -1173,12 +1175,17 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1173 1175
1174 while (neigh->nud_state & NUD_VALID && 1176 while (neigh->nud_state & NUD_VALID &&
1175 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1177 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1176 struct neighbour *n1 = neigh; 1178 struct dst_entry *dst = skb_dst(skb);
1179 struct neighbour *n2, *n1 = neigh;
1177 write_unlock_bh(&neigh->lock); 1180 write_unlock_bh(&neigh->lock);
1181
1182 rcu_read_lock();
1178 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1183 /* On shaper/eql skb->dst->neighbour != neigh :( */
1179 if (skb_dst(skb) && skb_dst(skb)->neighbour) 1184 if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1180 n1 = skb_dst(skb)->neighbour; 1185 n1 = n2;
1181 n1->output(skb); 1186 n1->output(skb);
1187 rcu_read_unlock();
1188
1182 write_lock_bh(&neigh->lock); 1189 write_lock_bh(&neigh->lock);
1183 } 1190 }
1184 skb_queue_purge(&neigh->arp_queue); 1191 skb_queue_purge(&neigh->arp_queue);
@@ -1300,10 +1307,10 @@ EXPORT_SYMBOL(neigh_compat_output);
1300int neigh_resolve_output(struct sk_buff *skb) 1307int neigh_resolve_output(struct sk_buff *skb)
1301{ 1308{
1302 struct dst_entry *dst = skb_dst(skb); 1309 struct dst_entry *dst = skb_dst(skb);
1303 struct neighbour *neigh; 1310 struct neighbour *neigh = dst_get_neighbour(dst);
1304 int rc = 0; 1311 int rc = 0;
1305 1312
1306 if (!dst || !(neigh = dst->neighbour)) 1313 if (!dst)
1307 goto discard; 1314 goto discard;
1308 1315
1309 __skb_pull(skb, skb_network_offset(skb)); 1316 __skb_pull(skb, skb_network_offset(skb));
@@ -1333,7 +1340,7 @@ out:
1333 return rc; 1340 return rc;
1334discard: 1341discard:
1335 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", 1342 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1336 dst, dst ? dst->neighbour : NULL); 1343 dst, neigh);
1337out_kfree_skb: 1344out_kfree_skb:
1338 rc = -EINVAL; 1345 rc = -EINVAL;
1339 kfree_skb(skb); 1346 kfree_skb(skb);
@@ -1347,7 +1354,7 @@ int neigh_connected_output(struct sk_buff *skb)
1347{ 1354{
1348 int err; 1355 int err;
1349 struct dst_entry *dst = skb_dst(skb); 1356 struct dst_entry *dst = skb_dst(skb);
1350 struct neighbour *neigh = dst->neighbour; 1357 struct neighbour *neigh = dst_get_neighbour(dst);
1351 struct net_device *dev = neigh->dev; 1358 struct net_device *dev = neigh->dev;
1352 unsigned int seq; 1359 unsigned int seq;
1353 1360
@@ -1383,11 +1390,15 @@ static void neigh_proxy_process(unsigned long arg)
1383 1390
1384 if (tdif <= 0) { 1391 if (tdif <= 0) {
1385 struct net_device *dev = skb->dev; 1392 struct net_device *dev = skb->dev;
1393
1386 __skb_unlink(skb, &tbl->proxy_queue); 1394 __skb_unlink(skb, &tbl->proxy_queue);
1387 if (tbl->proxy_redo && netif_running(dev)) 1395 if (tbl->proxy_redo && netif_running(dev)) {
1396 rcu_read_lock();
1388 tbl->proxy_redo(skb); 1397 tbl->proxy_redo(skb);
1389 else 1398 rcu_read_unlock();
1399 } else {
1390 kfree_skb(skb); 1400 kfree_skb(skb);
1401 }
1391 1402
1392 dev_put(dev); 1403 dev_put(dev);
1393 } else if (!sched_next || tdif < sched_next) 1404 } else if (!sched_next || tdif < sched_next)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 33d2a1fba13..ec4eae35330 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -781,7 +781,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
781#endif 781#endif
782} 782}
783 783
784#ifdef CONFIG_XPS 784#ifdef CONFIG_SYSFS
785/* 785/*
786 * netdev_queue sysfs structures and functions. 786 * netdev_queue sysfs structures and functions.
787 */ 787 */
@@ -827,6 +827,23 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
827 .store = netdev_queue_attr_store, 827 .store = netdev_queue_attr_store,
828}; 828};
829 829
830static ssize_t show_trans_timeout(struct netdev_queue *queue,
831 struct netdev_queue_attribute *attribute,
832 char *buf)
833{
834 unsigned long trans_timeout;
835
836 spin_lock_irq(&queue->_xmit_lock);
837 trans_timeout = queue->trans_timeout;
838 spin_unlock_irq(&queue->_xmit_lock);
839
840 return sprintf(buf, "%lu", trans_timeout);
841}
842
843static struct netdev_queue_attribute queue_trans_timeout =
844 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
845
846#ifdef CONFIG_XPS
830static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) 847static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
831{ 848{
832 struct net_device *dev = queue->dev; 849 struct net_device *dev = queue->dev;
@@ -1021,12 +1038,17 @@ error:
1021 1038
1022static struct netdev_queue_attribute xps_cpus_attribute = 1039static struct netdev_queue_attribute xps_cpus_attribute =
1023 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map); 1040 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1041#endif /* CONFIG_XPS */
1024 1042
1025static struct attribute *netdev_queue_default_attrs[] = { 1043static struct attribute *netdev_queue_default_attrs[] = {
1044 &queue_trans_timeout.attr,
1045#ifdef CONFIG_XPS
1026 &xps_cpus_attribute.attr, 1046 &xps_cpus_attribute.attr,
1047#endif
1027 NULL 1048 NULL
1028}; 1049};
1029 1050
1051#ifdef CONFIG_XPS
1030static void netdev_queue_release(struct kobject *kobj) 1052static void netdev_queue_release(struct kobject *kobj)
1031{ 1053{
1032 struct netdev_queue *queue = to_netdev_queue(kobj); 1054 struct netdev_queue *queue = to_netdev_queue(kobj);
@@ -1077,10 +1099,13 @@ static void netdev_queue_release(struct kobject *kobj)
1077 memset(kobj, 0, sizeof(*kobj)); 1099 memset(kobj, 0, sizeof(*kobj));
1078 dev_put(queue->dev); 1100 dev_put(queue->dev);
1079} 1101}
1102#endif /* CONFIG_XPS */
1080 1103
1081static struct kobj_type netdev_queue_ktype = { 1104static struct kobj_type netdev_queue_ktype = {
1082 .sysfs_ops = &netdev_queue_sysfs_ops, 1105 .sysfs_ops = &netdev_queue_sysfs_ops,
1106#ifdef CONFIG_XPS
1083 .release = netdev_queue_release, 1107 .release = netdev_queue_release,
1108#endif
1084 .default_attrs = netdev_queue_default_attrs, 1109 .default_attrs = netdev_queue_default_attrs,
1085}; 1110};
1086 1111
@@ -1103,12 +1128,12 @@ static int netdev_queue_add_kobject(struct net_device *net, int index)
1103 1128
1104 return error; 1129 return error;
1105} 1130}
1106#endif /* CONFIG_XPS */ 1131#endif /* CONFIG_SYSFS */
1107 1132
1108int 1133int
1109netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num) 1134netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1110{ 1135{
1111#ifdef CONFIG_XPS 1136#ifdef CONFIG_SYSFS
1112 int i; 1137 int i;
1113 int error = 0; 1138 int error = 0;
1114 1139
@@ -1126,14 +1151,14 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1126 return error; 1151 return error;
1127#else 1152#else
1128 return 0; 1153 return 0;
1129#endif 1154#endif /* CONFIG_SYSFS */
1130} 1155}
1131 1156
1132static int register_queue_kobjects(struct net_device *net) 1157static int register_queue_kobjects(struct net_device *net)
1133{ 1158{
1134 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; 1159 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1135 1160
1136#if defined(CONFIG_RPS) || defined(CONFIG_XPS) 1161#ifdef CONFIG_SYSFS
1137 net->queues_kset = kset_create_and_add("queues", 1162 net->queues_kset = kset_create_and_add("queues",
1138 NULL, &net->dev.kobj); 1163 NULL, &net->dev.kobj);
1139 if (!net->queues_kset) 1164 if (!net->queues_kset)
@@ -1174,7 +1199,7 @@ static void remove_queue_kobjects(struct net_device *net)
1174 1199
1175 net_rx_queue_update_kobjects(net, real_rx, 0); 1200 net_rx_queue_update_kobjects(net, real_rx, 0);
1176 netdev_queue_update_kobjects(net, real_tx, 0); 1201 netdev_queue_update_kobjects(net, real_tx, 0);
1177#if defined(CONFIG_RPS) || defined(CONFIG_XPS) 1202#ifdef CONFIG_SYSFS
1178 kset_unregister(net->queues_kset); 1203 kset_unregister(net->queues_kset);
1179#endif 1204#endif
1180} 1205}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ea489db1bc2..2772ed11bec 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -29,6 +29,20 @@ EXPORT_SYMBOL(init_net);
29 29
30#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 30#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
31 31
32static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
33
34static struct net_generic *net_alloc_generic(void)
35{
36 struct net_generic *ng;
37 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
38
39 ng = kzalloc(generic_size, GFP_KERNEL);
40 if (ng)
41 ng->len = max_gen_ptrs;
42
43 return ng;
44}
45
32static int net_assign_generic(struct net *net, int id, void *data) 46static int net_assign_generic(struct net *net, int id, void *data)
33{ 47{
34 struct net_generic *ng, *old_ng; 48 struct net_generic *ng, *old_ng;
@@ -42,8 +56,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
42 if (old_ng->len >= id) 56 if (old_ng->len >= id)
43 goto assign; 57 goto assign;
44 58
45 ng = kzalloc(sizeof(struct net_generic) + 59 ng = net_alloc_generic();
46 id * sizeof(void *), GFP_KERNEL);
47 if (ng == NULL) 60 if (ng == NULL)
48 return -ENOMEM; 61 return -ENOMEM;
49 62
@@ -58,7 +71,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
58 * the old copy for kfree after a grace period. 71 * the old copy for kfree after a grace period.
59 */ 72 */
60 73
61 ng->len = id;
62 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 74 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
63 75
64 rcu_assign_pointer(net->gen, ng); 76 rcu_assign_pointer(net->gen, ng);
@@ -70,21 +82,29 @@ assign:
70 82
71static int ops_init(const struct pernet_operations *ops, struct net *net) 83static int ops_init(const struct pernet_operations *ops, struct net *net)
72{ 84{
73 int err; 85 int err = -ENOMEM;
86 void *data = NULL;
87
74 if (ops->id && ops->size) { 88 if (ops->id && ops->size) {
75 void *data = kzalloc(ops->size, GFP_KERNEL); 89 data = kzalloc(ops->size, GFP_KERNEL);
76 if (!data) 90 if (!data)
77 return -ENOMEM; 91 goto out;
78 92
79 err = net_assign_generic(net, *ops->id, data); 93 err = net_assign_generic(net, *ops->id, data);
80 if (err) { 94 if (err)
81 kfree(data); 95 goto cleanup;
82 return err;
83 }
84 } 96 }
97 err = 0;
85 if (ops->init) 98 if (ops->init)
86 return ops->init(net); 99 err = ops->init(net);
87 return 0; 100 if (!err)
101 return 0;
102
103cleanup:
104 kfree(data);
105
106out:
107 return err;
88} 108}
89 109
90static void ops_free(const struct pernet_operations *ops, struct net *net) 110static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -159,18 +179,6 @@ out_undo:
159 goto out; 179 goto out;
160} 180}
161 181
162static struct net_generic *net_alloc_generic(void)
163{
164 struct net_generic *ng;
165 size_t generic_size = sizeof(struct net_generic) +
166 INITIAL_NET_GEN_PTRS * sizeof(void *);
167
168 ng = kzalloc(generic_size, GFP_KERNEL);
169 if (ng)
170 ng->len = INITIAL_NET_GEN_PTRS;
171
172 return ng;
173}
174 182
175#ifdef CONFIG_NET_NS 183#ifdef CONFIG_NET_NS
176static struct kmem_cache *net_cachep; 184static struct kmem_cache *net_cachep;
@@ -446,12 +454,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
446static int __register_pernet_operations(struct list_head *list, 454static int __register_pernet_operations(struct list_head *list,
447 struct pernet_operations *ops) 455 struct pernet_operations *ops)
448{ 456{
449 int err = 0; 457 return ops_init(ops, &init_net);
450 err = ops_init(ops, &init_net);
451 if (err)
452 ops_free(ops, &init_net);
453 return err;
454
455} 458}
456 459
457static void __unregister_pernet_operations(struct pernet_operations *ops) 460static void __unregister_pernet_operations(struct pernet_operations *ops)
@@ -481,6 +484,7 @@ again:
481 } 484 }
482 return error; 485 return error;
483 } 486 }
487 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
484 } 488 }
485 error = __register_pernet_operations(list, ops); 489 error = __register_pernet_operations(list, ops);
486 if (error) { 490 if (error) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 18d9cbda3a3..207a178f73b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -193,7 +193,7 @@ void netpoll_poll_dev(struct net_device *dev)
193 193
194 poll_napi(dev); 194 poll_napi(dev);
195 195
196 if (dev->priv_flags & IFF_SLAVE) { 196 if (dev->flags & IFF_SLAVE) {
197 if (dev->npinfo) { 197 if (dev->npinfo) {
198 struct net_device *bond_dev = dev->master; 198 struct net_device *bond_dev = dev->master;
199 struct sk_buff *skb; 199 struct sk_buff *skb;
@@ -357,22 +357,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
357 357
358void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 358void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
359{ 359{
360 int total_len, eth_len, ip_len, udp_len; 360 int total_len, ip_len, udp_len;
361 struct sk_buff *skb; 361 struct sk_buff *skb;
362 struct udphdr *udph; 362 struct udphdr *udph;
363 struct iphdr *iph; 363 struct iphdr *iph;
364 struct ethhdr *eth; 364 struct ethhdr *eth;
365 365
366 udp_len = len + sizeof(*udph); 366 udp_len = len + sizeof(*udph);
367 ip_len = eth_len = udp_len + sizeof(*iph); 367 ip_len = udp_len + sizeof(*iph);
368 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; 368 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
369 369
370 skb = find_skb(np, total_len, total_len - len); 370 skb = find_skb(np, total_len + np->dev->needed_tailroom,
371 total_len - len);
371 if (!skb) 372 if (!skb)
372 return; 373 return;
373 374
374 skb_copy_to_linear_data(skb, msg, len); 375 skb_copy_to_linear_data(skb, msg, len);
375 skb->len += len; 376 skb_put(skb, len);
376 377
377 skb_push(skb, sizeof(*udph)); 378 skb_push(skb, sizeof(*udph));
378 skb_reset_transport_header(skb); 379 skb_reset_transport_header(skb);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f76079cd750..c0e0f7679e7 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1070,7 +1070,9 @@ static ssize_t pktgen_if_write(struct file *file,
1070 len = num_arg(&user_buffer[i], 10, &value); 1070 len = num_arg(&user_buffer[i], 10, &value);
1071 if (len < 0) 1071 if (len < 0)
1072 return len; 1072 return len;
1073 1073 if ((value > 0) &&
1074 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
1075 return -ENOTSUPP;
1074 i += len; 1076 i += len;
1075 pkt_dev->clone_skb = value; 1077 pkt_dev->clone_skb = value;
1076 1078
@@ -1930,7 +1932,7 @@ static int pktgen_device_event(struct notifier_block *unused,
1930{ 1932{
1931 struct net_device *dev = ptr; 1933 struct net_device *dev = ptr;
1932 1934
1933 if (!net_eq(dev_net(dev), &init_net)) 1935 if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
1934 return NOTIFY_DONE; 1936 return NOTIFY_DONE;
1935 1937
1936 /* It is OK that we do not hold the group lock right now, 1938 /* It is OK that we do not hold the group lock right now,
@@ -3555,7 +3557,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3555 pkt_dev->min_pkt_size = ETH_ZLEN; 3557 pkt_dev->min_pkt_size = ETH_ZLEN;
3556 pkt_dev->max_pkt_size = ETH_ZLEN; 3558 pkt_dev->max_pkt_size = ETH_ZLEN;
3557 pkt_dev->nfrags = 0; 3559 pkt_dev->nfrags = 0;
3558 pkt_dev->clone_skb = pg_clone_skb_d;
3559 pkt_dev->delay = pg_delay_d; 3560 pkt_dev->delay = pg_delay_d;
3560 pkt_dev->count = pg_count_d; 3561 pkt_dev->count = pg_count_d;
3561 pkt_dev->sofar = 0; 3562 pkt_dev->sofar = 0;
@@ -3563,7 +3564,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3563 pkt_dev->udp_src_max = 9; 3564 pkt_dev->udp_src_max = 9;
3564 pkt_dev->udp_dst_min = 9; 3565 pkt_dev->udp_dst_min = 9;
3565 pkt_dev->udp_dst_max = 9; 3566 pkt_dev->udp_dst_max = 9;
3566
3567 pkt_dev->vlan_p = 0; 3567 pkt_dev->vlan_p = 0;
3568 pkt_dev->vlan_cfi = 0; 3568 pkt_dev->vlan_cfi = 0;
3569 pkt_dev->vlan_id = 0xffff; 3569 pkt_dev->vlan_id = 0xffff;
@@ -3575,6 +3575,8 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3575 err = pktgen_setup_dev(pkt_dev, ifname); 3575 err = pktgen_setup_dev(pkt_dev, ifname);
3576 if (err) 3576 if (err)
3577 goto out1; 3577 goto out1;
3578 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
3579 pkt_dev->clone_skb = pg_clone_skb_d;
3578 3580
3579 pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir, 3581 pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir,
3580 &pktgen_if_fops, pkt_dev); 3582 &pktgen_if_fops, pkt_dev);
@@ -3753,12 +3755,18 @@ static void __exit pg_cleanup(void)
3753{ 3755{
3754 struct pktgen_thread *t; 3756 struct pktgen_thread *t;
3755 struct list_head *q, *n; 3757 struct list_head *q, *n;
3758 LIST_HEAD(list);
3756 3759
3757 /* Stop all interfaces & threads */ 3760 /* Stop all interfaces & threads */
3758 pktgen_exiting = true; 3761 pktgen_exiting = true;
3759 3762
3760 list_for_each_safe(q, n, &pktgen_threads) { 3763 mutex_lock(&pktgen_thread_lock);
3764 list_splice_init(&pktgen_threads, &list);
3765 mutex_unlock(&pktgen_thread_lock);
3766
3767 list_for_each_safe(q, n, &list) {
3761 t = list_entry(q, struct pktgen_thread, th_list); 3768 t = list_entry(q, struct pktgen_thread, th_list);
3769 list_del(&t->th_list);
3762 kthread_stop(t->tsk); 3770 kthread_stop(t->tsk);
3763 kfree(t); 3771 kfree(t);
3764 } 3772 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index abd936d8a71..ac49ad519ca 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -647,6 +647,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
647 } 647 }
648} 648}
649 649
650static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
651{
652 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
653 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
654}
655
650static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, 656static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
651 const struct ifinfomsg *ifm) 657 const struct ifinfomsg *ifm)
652{ 658{
@@ -655,7 +661,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
655 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ 661 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
656 if (ifm->ifi_change) 662 if (ifm->ifi_change)
657 flags = (flags & ifm->ifi_change) | 663 flags = (flags & ifm->ifi_change) |
658 (dev->flags & ~ifm->ifi_change); 664 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
659 665
660 return flags; 666 return flags;
661} 667}
@@ -1298,6 +1304,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1298 goto errout; 1304 goto errout;
1299 send_addr_notify = 1; 1305 send_addr_notify = 1;
1300 modified = 1; 1306 modified = 1;
1307 add_device_randomness(dev->dev_addr, dev->addr_len);
1301 } 1308 }
1302 1309
1303 if (tb[IFLA_MTU]) { 1310 if (tb[IFLA_MTU]) {
diff --git a/net/core/scm.c b/net/core/scm.c
index 4c1ef026d69..811b53fb330 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
192 goto error; 192 goto error;
193 193
194 cred->uid = cred->euid = p->creds.uid; 194 cred->uid = cred->euid = p->creds.uid;
195 cred->gid = cred->egid = p->creds.uid; 195 cred->gid = cred->egid = p->creds.gid;
196 put_cred(p->cred); 196 put_cred(p->cred);
197 p->cred = cred; 197 p->cred = cred;
198 } 198 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
new file mode 100644
index 00000000000..45329d7c9dd
--- /dev/null
+++ b/net/core/secure_seq.c
@@ -0,0 +1,184 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/cryptohash.h>
4#include <linux/module.h>
5#include <linux/cache.h>
6#include <linux/random.h>
7#include <linux/hrtimer.h>
8#include <linux/ktime.h>
9#include <linux/string.h>
10
11#include <net/secure_seq.h>
12
13static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
14
15static int __init net_secret_init(void)
16{
17 get_random_bytes(net_secret, sizeof(net_secret));
18 return 0;
19}
20late_initcall(net_secret_init);
21
22static u32 seq_scale(u32 seq)
23{
24 /*
25 * As close as possible to RFC 793, which
26 * suggests using a 250 kHz clock.
27 * Further reading shows this assumes 2 Mb/s networks.
28 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
29 * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
30 * we also need to limit the resolution so that the u32 seq
31 * overlaps less than one time per MSL (2 minutes).
32 * Choosing a clock of 64 ns period is OK. (period of 274 s)
33 */
34 return seq + (ktime_to_ns(ktime_get_real()) >> 6);
35}
36
37#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
38__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
39 __be16 sport, __be16 dport)
40{
41 u32 secret[MD5_MESSAGE_BYTES / 4];
42 u32 hash[MD5_DIGEST_WORDS];
43 u32 i;
44
45 memcpy(hash, saddr, 16);
46 for (i = 0; i < 4; i++)
47 secret[i] = net_secret[i] + daddr[i];
48 secret[4] = net_secret[4] +
49 (((__force u16)sport << 16) + (__force u16)dport);
50 for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
51 secret[i] = net_secret[i];
52
53 md5_transform(hash, secret);
54
55 return seq_scale(hash[0]);
56}
57EXPORT_SYMBOL(secure_tcpv6_sequence_number);
58
59u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
60 __be16 dport)
61{
62 u32 secret[MD5_MESSAGE_BYTES / 4];
63 u32 hash[MD5_DIGEST_WORDS];
64 u32 i;
65
66 memcpy(hash, saddr, 16);
67 for (i = 0; i < 4; i++)
68 secret[i] = net_secret[i] + (__force u32) daddr[i];
69 secret[4] = net_secret[4] + (__force u32)dport;
70 for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
71 secret[i] = net_secret[i];
72
73 md5_transform(hash, secret);
74
75 return hash[0];
76}
77#endif
78
79#ifdef CONFIG_INET
80__u32 secure_ip_id(__be32 daddr)
81{
82 u32 hash[MD5_DIGEST_WORDS];
83
84 hash[0] = (__force __u32) daddr;
85 hash[1] = net_secret[13];
86 hash[2] = net_secret[14];
87 hash[3] = net_secret[15];
88
89 md5_transform(hash, net_secret);
90
91 return hash[0];
92}
93
94__u32 secure_ipv6_id(const __be32 daddr[4])
95{
96 __u32 hash[4];
97
98 memcpy(hash, daddr, 16);
99 md5_transform(hash, net_secret);
100
101 return hash[0];
102}
103
104__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
105 __be16 sport, __be16 dport)
106{
107 u32 hash[MD5_DIGEST_WORDS];
108
109 hash[0] = (__force u32)saddr;
110 hash[1] = (__force u32)daddr;
111 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
112 hash[3] = net_secret[15];
113
114 md5_transform(hash, net_secret);
115
116 return seq_scale(hash[0]);
117}
118
119u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
120{
121 u32 hash[MD5_DIGEST_WORDS];
122
123 hash[0] = (__force u32)saddr;
124 hash[1] = (__force u32)daddr;
125 hash[2] = (__force u32)dport ^ net_secret[14];
126 hash[3] = net_secret[15];
127
128 md5_transform(hash, net_secret);
129
130 return hash[0];
131}
132EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
133#endif
134
135#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
136u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
137 __be16 sport, __be16 dport)
138{
139 u32 hash[MD5_DIGEST_WORDS];
140 u64 seq;
141
142 hash[0] = (__force u32)saddr;
143 hash[1] = (__force u32)daddr;
144 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
145 hash[3] = net_secret[15];
146
147 md5_transform(hash, net_secret);
148
149 seq = hash[0] | (((u64)hash[1]) << 32);
150 seq += ktime_to_ns(ktime_get_real());
151 seq &= (1ull << 48) - 1;
152
153 return seq;
154}
155EXPORT_SYMBOL(secure_dccp_sequence_number);
156
157#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
158u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
159 __be16 sport, __be16 dport)
160{
161 u32 secret[MD5_MESSAGE_BYTES / 4];
162 u32 hash[MD5_DIGEST_WORDS];
163 u64 seq;
164 u32 i;
165
166 memcpy(hash, saddr, 16);
167 for (i = 0; i < 4; i++)
168 secret[i] = net_secret[i] + daddr[i];
169 secret[4] = net_secret[4] +
170 (((__force u16)sport << 16) + (__force u16)dport);
171 for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
172 secret[i] = net_secret[i];
173
174 md5_transform(hash, secret);
175
176 seq = hash[0] | (((u64)hash[1]) << 32);
177 seq += ktime_to_ns(ktime_get_real());
178 seq &= (1ull << 48) - 1;
179
180 return seq;
181}
182EXPORT_SYMBOL(secure_dccpv6_sequence_number);
183#endif
184#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46cbd28f40f..4821df84eba 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2985,6 +2985,8 @@ static void sock_rmem_free(struct sk_buff *skb)
2985 */ 2985 */
2986int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 2986int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2987{ 2987{
2988 int len = skb->len;
2989
2988 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 2990 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2989 (unsigned)sk->sk_rcvbuf) 2991 (unsigned)sk->sk_rcvbuf)
2990 return -ENOMEM; 2992 return -ENOMEM;
@@ -2999,7 +3001,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2999 3001
3000 skb_queue_tail(&sk->sk_error_queue, skb); 3002 skb_queue_tail(&sk->sk_error_queue, skb);
3001 if (!sock_flag(sk, SOCK_DEAD)) 3003 if (!sock_flag(sk, SOCK_DEAD))
3002 sk->sk_data_ready(sk, skb->len); 3004 sk->sk_data_ready(sk, len);
3003 return 0; 3005 return 0;
3004} 3006}
3005EXPORT_SYMBOL(sock_queue_err_skb); 3007EXPORT_SYMBOL(sock_queue_err_skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6e819780c23..b4bb59a9245 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1257,6 +1257,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1257 /* It is still raw copy of parent, so invalidate 1257 /* It is still raw copy of parent, so invalidate
1258 * destructor and make plain sk_free() */ 1258 * destructor and make plain sk_free() */
1259 newsk->sk_destruct = NULL; 1259 newsk->sk_destruct = NULL;
1260 bh_unlock_sock(newsk);
1260 sk_free(newsk); 1261 sk_free(newsk);
1261 newsk = NULL; 1262 newsk = NULL;
1262 goto out; 1263 goto out;
@@ -1500,6 +1501,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1500 gfp_t gfp_mask; 1501 gfp_t gfp_mask;
1501 long timeo; 1502 long timeo;
1502 int err; 1503 int err;
1504 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1505
1506 err = -EMSGSIZE;
1507 if (npages > MAX_SKB_FRAGS)
1508 goto failure;
1503 1509
1504 gfp_mask = sk->sk_allocation; 1510 gfp_mask = sk->sk_allocation;
1505 if (gfp_mask & __GFP_WAIT) 1511 if (gfp_mask & __GFP_WAIT)
@@ -1518,14 +1524,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1518 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1524 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1519 skb = alloc_skb(header_len, gfp_mask); 1525 skb = alloc_skb(header_len, gfp_mask);
1520 if (skb) { 1526 if (skb) {
1521 int npages;
1522 int i; 1527 int i;
1523 1528
1524 /* No pages, we're done... */ 1529 /* No pages, we're done... */
1525 if (!data_len) 1530 if (!data_len)
1526 break; 1531 break;
1527 1532
1528 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1529 skb->truesize += data_len; 1533 skb->truesize += data_len;
1530 skb_shinfo(skb)->nr_frags = npages; 1534 skb_shinfo(skb)->nr_frags = npages;
1531 for (i = 0; i < npages; i++) { 1535 for (i = 0; i < npages; i++) {
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 7e7ca375d43..97d036a6b89 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -57,9 +57,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
57 case PTP_CLASS_V2_VLAN: 57 case PTP_CLASS_V2_VLAN:
58 phydev = skb->dev->phydev; 58 phydev = skb->dev->phydev;
59 if (likely(phydev->drv->txtstamp)) { 59 if (likely(phydev->drv->txtstamp)) {
60 if (!atomic_inc_not_zero(&sk->sk_refcnt))
61 return;
60 clone = skb_clone(skb, GFP_ATOMIC); 62 clone = skb_clone(skb, GFP_ATOMIC);
61 if (!clone) 63 if (!clone) {
64 sock_put(sk);
62 return; 65 return;
66 }
63 clone->sk = sk; 67 clone->sk = sk;
64 phydev->drv->txtstamp(phydev, clone, type); 68 phydev->drv->txtstamp(phydev, clone, type);
65 } 69 }
@@ -76,8 +80,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
76 struct sock_exterr_skb *serr; 80 struct sock_exterr_skb *serr;
77 int err; 81 int err;
78 82
79 if (!hwtstamps) 83 if (!hwtstamps) {
84 sock_put(sk);
85 kfree_skb(skb);
80 return; 86 return;
87 }
81 88
82 *skb_hwtstamps(skb) = *hwtstamps; 89 *skb_hwtstamps(skb) = *hwtstamps;
83 serr = SKB_EXT_ERR(skb); 90 serr = SKB_EXT_ERR(skb);
@@ -86,6 +93,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
86 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 93 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
87 skb->sk = NULL; 94 skb->sk = NULL;
88 err = sock_queue_err_skb(sk, skb); 95 err = sock_queue_err_skb(sk, skb);
96 sock_put(sk);
89 if (err) 97 if (err)
90 kfree_skb(skb); 98 kfree_skb(skb);
91} 99}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c36adfd191..332639b56f4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -26,6 +26,7 @@
26#include <net/timewait_sock.h> 26#include <net/timewait_sock.h>
27#include <net/tcp_states.h> 27#include <net/tcp_states.h>
28#include <net/xfrm.h> 28#include <net/xfrm.h>
29#include <net/secure_seq.h>
29 30
30#include "ackvec.h" 31#include "ackvec.h"
31#include "ccid.h" 32#include "ccid.h"
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8dc4348774a..b74f76117dc 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -29,6 +29,7 @@
29#include <net/transp_v6.h> 29#include <net/transp_v6.h>
30#include <net/ip6_checksum.h> 30#include <net/ip6_checksum.h>
31#include <net/xfrm.h> 31#include <net/xfrm.h>
32#include <net/secure_seq.h>
32 33
33#include "dccp.h" 34#include "dccp.h"
34#include "ipv6.h" 35#include "ipv6.h"
@@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
69 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); 70 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70} 71}
71 72
72static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, 73static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
73 __be16 sport, __be16 dport )
74{
75 return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76}
77
78static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79{ 74{
80 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 75 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81 ipv6_hdr(skb)->saddr.s6_addr32, 76 ipv6_hdr(skb)->saddr.s6_addr32,
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 602dade7e9a..9810610d26c 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -208,7 +208,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
208{ 208{
209 struct dst_entry *dst = skb_dst(skb); 209 struct dst_entry *dst = skb_dst(skb);
210 struct dn_route *rt = (struct dn_route *)dst; 210 struct dn_route *rt = (struct dn_route *)dst;
211 struct neighbour *neigh = dst->neighbour; 211 struct neighbour *neigh = dst_get_neighbour(dst);
212 struct net_device *dev = neigh->dev; 212 struct net_device *dev = neigh->dev;
213 char mac_addr[ETH_ALEN]; 213 char mac_addr[ETH_ALEN];
214 214
@@ -227,7 +227,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
227static int dn_long_output(struct sk_buff *skb) 227static int dn_long_output(struct sk_buff *skb)
228{ 228{
229 struct dst_entry *dst = skb_dst(skb); 229 struct dst_entry *dst = skb_dst(skb);
230 struct neighbour *neigh = dst->neighbour; 230 struct neighbour *neigh = dst_get_neighbour(dst);
231 struct net_device *dev = neigh->dev; 231 struct net_device *dev = neigh->dev;
232 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; 232 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
233 unsigned char *data; 233 unsigned char *data;
@@ -274,7 +274,7 @@ static int dn_long_output(struct sk_buff *skb)
274static int dn_short_output(struct sk_buff *skb) 274static int dn_short_output(struct sk_buff *skb)
275{ 275{
276 struct dst_entry *dst = skb_dst(skb); 276 struct dst_entry *dst = skb_dst(skb);
277 struct neighbour *neigh = dst->neighbour; 277 struct neighbour *neigh = dst_get_neighbour(dst);
278 struct net_device *dev = neigh->dev; 278 struct net_device *dev = neigh->dev;
279 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 279 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
280 struct dn_short_packet *sp; 280 struct dn_short_packet *sp;
@@ -318,7 +318,7 @@ static int dn_short_output(struct sk_buff *skb)
318static int dn_phase3_output(struct sk_buff *skb) 318static int dn_phase3_output(struct sk_buff *skb)
319{ 319{
320 struct dst_entry *dst = skb_dst(skb); 320 struct dst_entry *dst = skb_dst(skb);
321 struct neighbour *neigh = dst->neighbour; 321 struct neighbour *neigh = dst_get_neighbour(dst);
322 struct net_device *dev = neigh->dev; 322 struct net_device *dev = neigh->dev;
323 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 323 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
324 struct dn_short_packet *sp; 324 struct dn_short_packet *sp;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 74544bc6fde..b91b60363c3 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -241,9 +241,11 @@ static int dn_dst_gc(struct dst_ops *ops)
241 */ 241 */
242static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 242static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
243{ 243{
244 struct neighbour *n = dst_get_neighbour(dst);
244 u32 min_mtu = 230; 245 u32 min_mtu = 230;
245 struct dn_dev *dn = dst->neighbour ? 246 struct dn_dev *dn;
246 rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL; 247
248 dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
247 249
248 if (dn && dn->use_long == 0) 250 if (dn && dn->use_long == 0)
249 min_mtu -= 6; 251 min_mtu -= 6;
@@ -715,7 +717,7 @@ static int dn_output(struct sk_buff *skb)
715 717
716 int err = -EINVAL; 718 int err = -EINVAL;
717 719
718 if ((neigh = dst->neighbour) == NULL) 720 if ((neigh = dst_get_neighbour(dst)) == NULL)
719 goto error; 721 goto error;
720 722
721 skb->dev = dev; 723 skb->dev = dev;
@@ -750,7 +752,7 @@ static int dn_forward(struct sk_buff *skb)
750 struct dst_entry *dst = skb_dst(skb); 752 struct dst_entry *dst = skb_dst(skb);
751 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); 753 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
752 struct dn_route *rt; 754 struct dn_route *rt;
753 struct neighbour *neigh = dst->neighbour; 755 struct neighbour *neigh = dst_get_neighbour(dst);
754 int header_len; 756 int header_len;
755#ifdef CONFIG_NETFILTER 757#ifdef CONFIG_NETFILTER
756 struct net_device *dev = skb->dev; 758 struct net_device *dev = skb->dev;
@@ -833,11 +835,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
833 } 835 }
834 rt->rt_type = res->type; 836 rt->rt_type = res->type;
835 837
836 if (dev != NULL && rt->dst.neighbour == NULL) { 838 if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
837 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 839 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
838 if (IS_ERR(n)) 840 if (IS_ERR(n))
839 return PTR_ERR(n); 841 return PTR_ERR(n);
840 rt->dst.neighbour = n; 842 dst_set_neighbour(&rt->dst, n);
841 } 843 }
842 844
843 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 845 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1144,7 +1146,7 @@ make_route:
1144 rt->rt_dst_map = fld.daddr; 1146 rt->rt_dst_map = fld.daddr;
1145 rt->rt_src_map = fld.saddr; 1147 rt->rt_src_map = fld.saddr;
1146 1148
1147 rt->dst.neighbour = neigh; 1149 dst_set_neighbour(&rt->dst, neigh);
1148 neigh = NULL; 1150 neigh = NULL;
1149 1151
1150 rt->dst.lastuse = jiffies; 1152 rt->dst.lastuse = jiffies;
@@ -1416,7 +1418,7 @@ make_route:
1416 rt->fld.flowidn_iif = in_dev->ifindex; 1418 rt->fld.flowidn_iif = in_dev->ifindex;
1417 rt->fld.flowidn_mark = fld.flowidn_mark; 1419 rt->fld.flowidn_mark = fld.flowidn_mark;
1418 1420
1419 rt->dst.neighbour = neigh; 1421 dst_set_neighbour(&rt->dst, neigh);
1420 rt->dst.lastuse = jiffies; 1422 rt->dst.lastuse = jiffies;
1421 rt->dst.output = dn_rt_bug; 1423 rt->dst.output = dn_rt_bug;
1422 switch(res.type) { 1424 switch(res.type) {
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 44d2b42fda5..2780e9b2b1c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -340,6 +340,7 @@ void ether_setup(struct net_device *dev)
340 dev->addr_len = ETH_ALEN; 340 dev->addr_len = ETH_ALEN;
341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
343 dev->priv_flags = IFF_TX_SKB_SHARING;
343 344
344 memset(dev->broadcast, 0xFF, ETH_ALEN); 345 memset(dev->broadcast, 0xFF, ETH_ALEN);
345 346
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f2dc69cffb5..681084d76a9 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -14,6 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \
14 inet_fragment.o ping.o 14 inet_fragment.o ping.o
15 15
16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o 16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
17obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
17obj-$(CONFIG_PROC_FS) += proc.o 18obj-$(CONFIG_PROC_FS) += proc.o
18obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o 19obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
19obj-$(CONFIG_IP_MROUTE) += ipmr.o 20obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ef1528af7ab..4d60f12c7b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -118,6 +118,19 @@
118#include <linux/mroute.h> 118#include <linux/mroute.h>
119#endif 119#endif
120 120
121#ifdef CONFIG_ANDROID_PARANOID_NETWORK
122#include <linux/android_aid.h>
123
124static inline int current_has_network(void)
125{
126 return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
127}
128#else
129static inline int current_has_network(void)
130{
131 return 1;
132}
133#endif
121 134
122/* The inetsw table contains everything that inet_create needs to 135/* The inetsw table contains everything that inet_create needs to
123 * build a new socket. 136 * build a new socket.
@@ -258,6 +271,7 @@ static inline int inet_netns_ok(struct net *net, int protocol)
258 return ipprot->netns_ok; 271 return ipprot->netns_ok;
259} 272}
260 273
274
261/* 275/*
262 * Create an inet socket. 276 * Create an inet socket.
263 */ 277 */
@@ -274,6 +288,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
274 int try_loading_module = 0; 288 int try_loading_module = 0;
275 int err; 289 int err;
276 290
291 if (!current_has_network())
292 return -EACCES;
293
277 if (unlikely(!inet_ehash_secret)) 294 if (unlikely(!inet_ehash_secret))
278 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 295 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
279 build_ehash_secret(); 296 build_ehash_secret();
@@ -874,6 +891,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
874 case SIOCSIFPFLAGS: 891 case SIOCSIFPFLAGS:
875 case SIOCGIFPFLAGS: 892 case SIOCGIFPFLAGS:
876 case SIOCSIFFLAGS: 893 case SIOCSIFFLAGS:
894 case SIOCKILLADDR:
877 err = devinet_ioctl(net, cmd, (void __user *)arg); 895 err = devinet_ioctl(net, cmd, (void __user *)arg);
878 break; 896 break;
879 default: 897 default:
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index c1f4154552f..36d14406261 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); 136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
137 } 137 }
138 138
139 err = ah->nexthdr;
140
141 kfree(AH_SKB_CB(skb)->tmp); 139 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err); 140 xfrm_output_resume(skb, err);
143} 141}
@@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
264 if (err) 262 if (err)
265 goto out; 263 goto out;
266 264
265 err = ah->nexthdr;
266
267 skb->network_header += ah_hlen; 267 skb->network_header += ah_hlen;
268 memcpy(skb_network_header(skb), work_iph, ihl); 268 memcpy(skb_network_header(skb), work_iph, ihl);
269 __skb_pull(skb, ah_hlen + ihl); 269 __skb_pull(skb, ah_hlen + ihl);
270 skb_set_transport_header(skb, -ihl); 270 skb_set_transport_header(skb, -ihl);
271
272 err = ah->nexthdr;
273out: 271out:
274 kfree(AH_SKB_CB(skb)->tmp); 272 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err); 273 xfrm_input_resume(skb, err);
@@ -371,8 +369,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
371 if (err == -EINPROGRESS) 369 if (err == -EINPROGRESS)
372 goto out; 370 goto out;
373 371
374 if (err == -EBUSY)
375 err = NET_XMIT_DROP;
376 goto out_free; 372 goto out_free;
377 } 373 }
378 374
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 1b74d3b6437..d8f852dbf66 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -518,26 +518,32 @@ EXPORT_SYMBOL(arp_find);
518 518
519/* END OF OBSOLETE FUNCTIONS */ 519/* END OF OBSOLETE FUNCTIONS */
520 520
521struct neighbour *__arp_bind_neighbour(struct dst_entry *dst, __be32 nexthop)
522{
523 struct net_device *dev = dst->dev;
524
525 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
526 nexthop = 0;
527 return __neigh_lookup_errno(
528#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
529 dev->type == ARPHRD_ATM ?
530 clip_tbl_hook :
531#endif
532 &arp_tbl, &nexthop, dev);
533}
534
521int arp_bind_neighbour(struct dst_entry *dst) 535int arp_bind_neighbour(struct dst_entry *dst)
522{ 536{
523 struct net_device *dev = dst->dev; 537 struct net_device *dev = dst->dev;
524 struct neighbour *n = dst->neighbour; 538 struct neighbour *n = dst_get_neighbour(dst);
525 539
526 if (dev == NULL) 540 if (dev == NULL)
527 return -EINVAL; 541 return -EINVAL;
528 if (n == NULL) { 542 if (n == NULL) {
529 __be32 nexthop = ((struct rtable *)dst)->rt_gateway; 543 n = __arp_bind_neighbour(dst, ((struct rtable *)dst)->rt_gateway);
530 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
531 nexthop = 0;
532 n = __neigh_lookup_errno(
533#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
534 dev->type == ARPHRD_ATM ?
535 clip_tbl_hook :
536#endif
537 &arp_tbl, &nexthop, dev);
538 if (IS_ERR(n)) 544 if (IS_ERR(n))
539 return PTR_ERR(n); 545 return PTR_ERR(n);
540 dst->neighbour = n; 546 dst_set_neighbour(dst, n);
541 } 547 }
542 return 0; 548 return 0;
543} 549}
@@ -900,7 +906,8 @@ static int arp_process(struct sk_buff *skb)
900 if (addr_type == RTN_UNICAST && 906 if (addr_type == RTN_UNICAST &&
901 (arp_fwd_proxy(in_dev, dev, rt) || 907 (arp_fwd_proxy(in_dev, dev, rt) ||
902 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 908 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
903 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 909 (rt->dst.dev != dev &&
910 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
904 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 911 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
905 if (n) 912 if (n)
906 neigh_release(n); 913 neigh_release(n);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2b3c23c287c..062876b7730 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1725 case CIPSO_V4_TAG_LOCAL: 1725 case CIPSO_V4_TAG_LOCAL:
1726 /* This is a non-standard tag that we only allow for 1726 /* This is a non-standard tag that we only allow for
1727 * local connections, so if the incoming interface is 1727 * local connections, so if the incoming interface is
1728 * not the loopback device drop the packet. */ 1728 * not the loopback device drop the packet. Further,
1729 if (!(skb->dev->flags & IFF_LOOPBACK)) { 1729 * there is no legitimate reason for setting this from
1730 * userspace so reject it if skb is NULL. */
1731 if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
1730 err_offset = opt_iter; 1732 err_offset = opt_iter;
1731 goto validate_return_locked; 1733 goto validate_return_locked;
1732 } 1734 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 0d4a184af16..c48323ad268 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -59,6 +59,7 @@
59 59
60#include <net/arp.h> 60#include <net/arp.h>
61#include <net/ip.h> 61#include <net/ip.h>
62#include <net/tcp.h>
62#include <net/route.h> 63#include <net/route.h>
63#include <net/ip_fib.h> 64#include <net/ip_fib.h>
64#include <net/rtnetlink.h> 65#include <net/rtnetlink.h>
@@ -735,6 +736,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
735 case SIOCSIFBRDADDR: /* Set the broadcast address */ 736 case SIOCSIFBRDADDR: /* Set the broadcast address */
736 case SIOCSIFDSTADDR: /* Set the destination address */ 737 case SIOCSIFDSTADDR: /* Set the destination address */
737 case SIOCSIFNETMASK: /* Set the netmask for the interface */ 738 case SIOCSIFNETMASK: /* Set the netmask for the interface */
739 case SIOCKILLADDR: /* Nuke all sockets on this address */
738 ret = -EACCES; 740 ret = -EACCES;
739 if (!capable(CAP_NET_ADMIN)) 741 if (!capable(CAP_NET_ADMIN))
740 goto out; 742 goto out;
@@ -786,7 +788,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
786 } 788 }
787 789
788 ret = -EADDRNOTAVAIL; 790 ret = -EADDRNOTAVAIL;
789 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) 791 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
792 && cmd != SIOCKILLADDR)
790 goto done; 793 goto done;
791 794
792 switch (cmd) { 795 switch (cmd) {
@@ -912,6 +915,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
912 inet_insert_ifa(ifa); 915 inet_insert_ifa(ifa);
913 } 916 }
914 break; 917 break;
918 case SIOCKILLADDR: /* Nuke all connections on this address */
919 ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
920 break;
915 } 921 }
916done: 922done:
917 rtnl_unlock(); 923 rtnl_unlock();
@@ -1134,15 +1140,15 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev,
1134 struct in_device *in_dev) 1140 struct in_device *in_dev)
1135 1141
1136{ 1142{
1137 struct in_ifaddr *ifa = in_dev->ifa_list; 1143 struct in_ifaddr *ifa;
1138
1139 if (!ifa)
1140 return;
1141 1144
1142 arp_send(ARPOP_REQUEST, ETH_P_ARP, 1145 for (ifa = in_dev->ifa_list; ifa;
1143 ifa->ifa_local, dev, 1146 ifa = ifa->ifa_next) {
1144 ifa->ifa_local, NULL, 1147 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1145 dev->dev_addr, NULL); 1148 ifa->ifa_local, dev,
1149 ifa->ifa_local, NULL,
1150 dev->dev_addr, NULL);
1151 }
1146} 1152}
1147 1153
1148/* Called only under RTNL semaphore */ 1154/* Called only under RTNL semaphore */
@@ -1490,7 +1496,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1490 void __user *buffer, 1496 void __user *buffer,
1491 size_t *lenp, loff_t *ppos) 1497 size_t *lenp, loff_t *ppos)
1492{ 1498{
1499 int old_value = *(int *)ctl->data;
1493 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 1500 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1501 int new_value = *(int *)ctl->data;
1494 1502
1495 if (write) { 1503 if (write) {
1496 struct ipv4_devconf *cnf = ctl->extra1; 1504 struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1509,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1501 1509
1502 if (cnf == net->ipv4.devconf_dflt) 1510 if (cnf == net->ipv4.devconf_dflt)
1503 devinet_copy_dflt_conf(net, i); 1511 devinet_copy_dflt_conf(net, i);
1512 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
1513 if ((new_value == 0) && (old_value != 0))
1514 rt_cache_flush(net, 0);
1504 } 1515 }
1505 1516
1506 return ret; 1517 return ret;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index a5b413416da..530787bc199 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -457,28 +457,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
457 struct esp_data *esp = x->data; 457 struct esp_data *esp = x->data;
458 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 458 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
459 u32 align = max_t(u32, blksize, esp->padlen); 459 u32 align = max_t(u32, blksize, esp->padlen);
460 u32 rem; 460 unsigned int net_adj;
461
462 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
463 rem = mtu & (align - 1);
464 mtu &= ~(align - 1);
465 461
466 switch (x->props.mode) { 462 switch (x->props.mode) {
467 case XFRM_MODE_TUNNEL:
468 break;
469 default:
470 case XFRM_MODE_TRANSPORT: 463 case XFRM_MODE_TRANSPORT:
471 /* The worst case */
472 mtu -= blksize - 4;
473 mtu += min_t(u32, blksize - 4, rem);
474 break;
475 case XFRM_MODE_BEET: 464 case XFRM_MODE_BEET:
476 /* The worst case. */ 465 net_adj = sizeof(struct iphdr);
477 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
478 break; 466 break;
467 case XFRM_MODE_TUNNEL:
468 net_adj = 0;
469 break;
470 default:
471 BUG();
479 } 472 }
480 473
481 return mtu - 2; 474 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
475 net_adj) & ~(align - 1)) + (net_adj - 2);
482} 476}
483 477
484static void esp4_err(struct sk_buff *skb, u32 info) 478static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 33e2c35b74b..7e454ba8e85 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -142,6 +142,18 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
142}; 142};
143 143
144/* Release a nexthop info record */ 144/* Release a nexthop info record */
145static void free_fib_info_rcu(struct rcu_head *head)
146{
147 struct fib_info *fi = container_of(head, struct fib_info, rcu);
148
149 change_nexthops(fi) {
150 if (nexthop_nh->nh_dev)
151 dev_put(nexthop_nh->nh_dev);
152 } endfor_nexthops(fi);
153
154 release_net(fi->fib_net);
155 kfree(fi);
156}
145 157
146void free_fib_info(struct fib_info *fi) 158void free_fib_info(struct fib_info *fi)
147{ 159{
@@ -149,14 +161,8 @@ void free_fib_info(struct fib_info *fi)
149 pr_warning("Freeing alive fib_info %p\n", fi); 161 pr_warning("Freeing alive fib_info %p\n", fi);
150 return; 162 return;
151 } 163 }
152 change_nexthops(fi) {
153 if (nexthop_nh->nh_dev)
154 dev_put(nexthop_nh->nh_dev);
155 nexthop_nh->nh_dev = NULL;
156 } endfor_nexthops(fi);
157 fib_info_cnt--; 164 fib_info_cnt--;
158 release_net(fi->fib_net); 165 call_rcu(&fi->rcu, free_fib_info_rcu);
159 kfree_rcu(fi, rcu);
160} 166}
161 167
162void fib_release_info(struct fib_info *fi) 168void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 58c25ea5a5c..0d884eb2b14 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1371,6 +1371,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1371 1371
1372 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1372 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1373 continue; 1373 continue;
1374 if (fi->fib_dead)
1375 continue;
1374 if (fa->fa_info->fib_scope < flp->flowi4_scope) 1376 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1375 continue; 1377 continue;
1376 fib_alias_accessed(fa); 1378 fib_alias_accessed(fa);
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index c6933f2ea31..3e3f75d96be 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -15,6 +15,7 @@
15#include <linux/kmod.h> 15#include <linux/kmod.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/ip.h>
18#include <linux/netdevice.h> 19#include <linux/netdevice.h>
19#include <linux/version.h> 20#include <linux/version.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
@@ -97,27 +98,17 @@ drop:
97static void gre_err(struct sk_buff *skb, u32 info) 98static void gre_err(struct sk_buff *skb, u32 info)
98{ 99{
99 const struct gre_protocol *proto; 100 const struct gre_protocol *proto;
100 u8 ver; 101 const struct iphdr *iph = (const struct iphdr *)skb->data;
101 102 u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
102 if (!pskb_may_pull(skb, 12))
103 goto drop;
104 103
105 ver = skb->data[1]&0x7f;
106 if (ver >= GREPROTO_MAX) 104 if (ver >= GREPROTO_MAX)
107 goto drop; 105 return;
108 106
109 rcu_read_lock(); 107 rcu_read_lock();
110 proto = rcu_dereference(gre_proto[ver]); 108 proto = rcu_dereference(gre_proto[ver]);
111 if (!proto || !proto->err_handler) 109 if (proto && proto->err_handler)
112 goto drop_unlock; 110 proto->err_handler(skb, info);
113 proto->err_handler(skb, info);
114 rcu_read_unlock();
115 return;
116
117drop_unlock:
118 rcu_read_unlock(); 111 rcu_read_unlock();
119drop:
120 kfree_skb(skb);
121} 112}
122 113
123static const struct net_protocol net_gre_protocol = { 114static const struct net_protocol net_gre_protocol = {
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5395e45dcce..23ef31baa1a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -380,6 +380,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
380 struct icmp_bxm *param) 380 struct icmp_bxm *param)
381{ 381{
382 struct rtable *rt, *rt2; 382 struct rtable *rt, *rt2;
383 struct flowi4 fl4_dec;
383 int err; 384 int err;
384 385
385 memset(fl4, 0, sizeof(*fl4)); 386 memset(fl4, 0, sizeof(*fl4));
@@ -408,19 +409,19 @@ static struct rtable *icmp_route_lookup(struct net *net,
408 } else 409 } else
409 return rt; 410 return rt;
410 411
411 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(fl4), AF_INET); 412 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
412 if (err) 413 if (err)
413 goto relookup_failed; 414 goto relookup_failed;
414 415
415 if (inet_addr_type(net, fl4->saddr) == RTN_LOCAL) { 416 if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
416 rt2 = __ip_route_output_key(net, fl4); 417 rt2 = __ip_route_output_key(net, &fl4_dec);
417 if (IS_ERR(rt2)) 418 if (IS_ERR(rt2))
418 err = PTR_ERR(rt2); 419 err = PTR_ERR(rt2);
419 } else { 420 } else {
420 struct flowi4 fl4_2 = {}; 421 struct flowi4 fl4_2 = {};
421 unsigned long orefdst; 422 unsigned long orefdst;
422 423
423 fl4_2.daddr = fl4->saddr; 424 fl4_2.daddr = fl4_dec.saddr;
424 rt2 = ip_route_output_key(net, &fl4_2); 425 rt2 = ip_route_output_key(net, &fl4_2);
425 if (IS_ERR(rt2)) { 426 if (IS_ERR(rt2)) {
426 err = PTR_ERR(rt2); 427 err = PTR_ERR(rt2);
@@ -428,7 +429,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
428 } 429 }
429 /* Ugh! */ 430 /* Ugh! */
430 orefdst = skb_in->_skb_refdst; /* save old refdst */ 431 orefdst = skb_in->_skb_refdst; /* save old refdst */
431 err = ip_route_input(skb_in, fl4->daddr, fl4->saddr, 432 err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
432 RT_TOS(tos), rt2->dst.dev); 433 RT_TOS(tos), rt2->dst.dev);
433 434
434 dst_release(&rt2->dst); 435 dst_release(&rt2->dst);
@@ -440,10 +441,11 @@ static struct rtable *icmp_route_lookup(struct net *net,
440 goto relookup_failed; 441 goto relookup_failed;
441 442
442 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, 443 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
443 flowi4_to_flowi(fl4), NULL, 444 flowi4_to_flowi(&fl4_dec), NULL,
444 XFRM_LOOKUP_ICMP); 445 XFRM_LOOKUP_ICMP);
445 if (!IS_ERR(rt2)) { 446 if (!IS_ERR(rt2)) {
446 dst_release(&rt->dst); 447 dst_release(&rt->dst);
448 memcpy(fl4, &fl4_dec, sizeof(*fl4));
447 rt = rt2; 449 rt = rt2;
448 } else if (PTR_ERR(rt2) == -EPERM) { 450 } else if (PTR_ERR(rt2) == -EPERM) {
449 if (rt) 451 if (rt)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f1d27f6c935..e0d42dbb33f 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
767 break; 767 break;
768 for (i=0; i<nsrcs; i++) { 768 for (i=0; i<nsrcs; i++) {
769 /* skip inactive filters */ 769 /* skip inactive filters */
770 if (pmc->sfcount[MCAST_INCLUDE] || 770 if (psf->sf_count[MCAST_INCLUDE] ||
771 pmc->sfcount[MCAST_EXCLUDE] != 771 pmc->sfcount[MCAST_EXCLUDE] !=
772 psf->sf_count[MCAST_EXCLUDE]) 772 psf->sf_count[MCAST_EXCLUDE])
773 continue; 773 continue;
@@ -875,6 +875,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
875 * to be intended in a v3 query. 875 * to be intended in a v3 query.
876 */ 876 */
877 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); 877 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
878 if (!max_delay)
879 max_delay = 1; /* can't mod w/ 0 */
878 } else { /* v3 */ 880 } else { /* v3 */
879 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 881 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
880 return; 882 return;
@@ -1718,7 +1720,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1718 1720
1719 pmc->sfcount[sfmode]--; 1721 pmc->sfcount[sfmode]--;
1720 for (j=0; j<i; j++) 1722 for (j=0; j<i; j++)
1721 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); 1723 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
1722 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { 1724 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
1723#ifdef CONFIG_IP_MULTICAST 1725#ifdef CONFIG_IP_MULTICAST
1724 struct ip_sf_list *psf; 1726 struct ip_sf_list *psf;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3c0369a3a66..984ec656b03 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -21,6 +21,7 @@
21 21
22#include <net/inet_connection_sock.h> 22#include <net/inet_connection_sock.h>
23#include <net/inet_hashtables.h> 23#include <net/inet_hashtables.h>
24#include <net/secure_seq.h>
24#include <net/ip.h> 25#include <net/ip.h>
25 26
26/* 27/*
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index ce616d92cc5..687764544af 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -19,6 +19,7 @@
19#include <linux/net.h> 19#include <linux/net.h>
20#include <net/ip.h> 20#include <net/ip.h>
21#include <net/inetpeer.h> 21#include <net/inetpeer.h>
22#include <net/secure_seq.h>
22 23
23/* 24/*
24 * Theory of operations. 25 * Theory of operations.
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3b34d1c8627..29a07b6c716 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
84 84
85 rt = skb_rtable(skb); 85 rt = skb_rtable(skb);
86 86
87 if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway) 87 if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8871067560d..d7bb94c4834 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -731,9 +731,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
731 } 731 }
732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
733 else if (skb->protocol == htons(ETH_P_IPV6)) { 733 else if (skb->protocol == htons(ETH_P_IPV6)) {
734 struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
734 const struct in6_addr *addr6; 735 const struct in6_addr *addr6;
735 int addr_type; 736 int addr_type;
736 struct neighbour *neigh = skb_dst(skb)->neighbour;
737 737
738 if (neigh == NULL) 738 if (neigh == NULL)
739 goto tx_error; 739 goto tx_error;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ec93335901d..42dd1a90ede 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -568,11 +568,12 @@ void ip_forward_options(struct sk_buff *skb)
568 ) { 568 ) {
569 if (srrptr + 3 > srrspace) 569 if (srrptr + 3 > srrspace)
570 break; 570 break;
571 if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0) 571 if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
572 break; 572 break;
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_hdr(skb)->daddr = opt->nexthop;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt); 577 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
578 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
@@ -640,6 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
640 } 641 }
641 if (srrptr <= srrspace) { 642 if (srrptr <= srrspace) {
642 opt->srr_is_hit = 1; 643 opt->srr_is_hit = 1;
644 opt->nexthop = nexthop;
643 opt->is_changed = 1; 645 opt->is_changed = 1;
644 } 646 }
645 return 0; 647 return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 84f26e8e6c6..51a3eec2c70 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -182,6 +182,8 @@ static inline int ip_finish_output2(struct sk_buff *skb)
182 struct rtable *rt = (struct rtable *)dst; 182 struct rtable *rt = (struct rtable *)dst;
183 struct net_device *dev = dst->dev; 183 struct net_device *dev = dst->dev;
184 unsigned int hh_len = LL_RESERVED_SPACE(dev); 184 unsigned int hh_len = LL_RESERVED_SPACE(dev);
185 struct neighbour *neigh;
186 int res;
185 187
186 if (rt->rt_type == RTN_MULTICAST) { 188 if (rt->rt_type == RTN_MULTICAST) {
187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len); 189 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -203,10 +205,22 @@ static inline int ip_finish_output2(struct sk_buff *skb)
203 skb = skb2; 205 skb = skb2;
204 } 206 }
205 207
206 if (dst->hh) 208 rcu_read_lock();
207 return neigh_hh_output(dst->hh, skb); 209 if (dst->hh) {
208 else if (dst->neighbour) 210 int res = neigh_hh_output(dst->hh, skb);
209 return dst->neighbour->output(skb); 211
212 rcu_read_unlock();
213 return res;
214 } else {
215 neigh = dst_get_neighbour(dst);
216 if (neigh) {
217 res = neigh->output(skb);
218
219 rcu_read_unlock();
220 return res;
221 }
222 rcu_read_unlock();
223 }
210 224
211 if (net_ratelimit()) 225 if (net_ratelimit())
212 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n"); 226 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
@@ -734,7 +748,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
734 int getfrag(void *from, char *to, int offset, int len, 748 int getfrag(void *from, char *to, int offset, int len,
735 int odd, struct sk_buff *skb), 749 int odd, struct sk_buff *skb),
736 void *from, int length, int hh_len, int fragheaderlen, 750 void *from, int length, int hh_len, int fragheaderlen,
737 int transhdrlen, int mtu, unsigned int flags) 751 int transhdrlen, int maxfraglen, unsigned int flags)
738{ 752{
739 struct sk_buff *skb; 753 struct sk_buff *skb;
740 int err; 754 int err;
@@ -767,7 +781,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
767 skb->csum = 0; 781 skb->csum = 0;
768 782
769 /* specify the length of each IP datagram fragment */ 783 /* specify the length of each IP datagram fragment */
770 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 784 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
771 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 785 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
772 __skb_queue_tail(queue, skb); 786 __skb_queue_tail(queue, skb);
773 } 787 }
@@ -831,7 +845,7 @@ static int __ip_append_data(struct sock *sk,
831 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { 845 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
832 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 846 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
833 hh_len, fragheaderlen, transhdrlen, 847 hh_len, fragheaderlen, transhdrlen,
834 mtu, flags); 848 maxfraglen, flags);
835 if (err) 849 if (err)
836 goto error; 850 goto error;
837 return 0; 851 return 0;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index ab7e5542c1c..7fbcabafa29 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -252,6 +252,10 @@ static int __init ic_open_devs(void)
252 } 252 }
253 } 253 }
254 254
255 /* no point in waiting if we could not bring up at least one device */
256 if (!ic_first_dev)
257 goto have_carrier;
258
255 /* wait for a carrier on at least one device */ 259 /* wait for a carrier on at least one device */
256 start = jiffies; 260 start = jiffies;
257 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { 261 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 378b20b7ca6..6f06f7f39ea 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
285 if (register_netdevice(dev) < 0) 285 if (register_netdevice(dev) < 0)
286 goto failed_free; 286 goto failed_free;
287 287
288 strcpy(nt->parms.name, dev->name);
289
288 dev_hold(dev); 290 dev_hold(dev);
289 ipip_tunnel_link(ipn, nt); 291 ipip_tunnel_link(ipn, nt);
290 return nt; 292 return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
759 struct ip_tunnel *tunnel = netdev_priv(dev); 761 struct ip_tunnel *tunnel = netdev_priv(dev);
760 762
761 tunnel->dev = dev; 763 tunnel->dev = dev;
762 strcpy(tunnel->parms.name, dev->name);
763 764
764 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 765 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
765 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 766 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
825static int __net_init ipip_init_net(struct net *net) 826static int __net_init ipip_init_net(struct net *net)
826{ 827{
827 struct ipip_net *ipn = net_generic(net, ipip_net_id); 828 struct ipip_net *ipn = net_generic(net, ipip_net_id);
829 struct ip_tunnel *t;
828 int err; 830 int err;
829 831
830 ipn->tunnels[0] = ipn->tunnels_wc; 832 ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
848 if ((err = register_netdev(ipn->fb_tunnel_dev))) 850 if ((err = register_netdev(ipn->fb_tunnel_dev)))
849 goto err_reg_dev; 851 goto err_reg_dev;
850 852
853 t = netdev_priv(ipn->fb_tunnel_dev);
854
855 strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
851 return 0; 856 return 0;
852 857
853err_reg_dev: 858err_reg_dev:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 30a7763c400..f81af8dd2de 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1796,7 +1796,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1796 struct flowi4 fl4 = { 1796 struct flowi4 fl4 = {
1797 .daddr = iph->daddr, 1797 .daddr = iph->daddr,
1798 .saddr = iph->saddr, 1798 .saddr = iph->saddr,
1799 .flowi4_tos = iph->tos, 1799 .flowi4_tos = RT_TOS(iph->tos),
1800 .flowi4_oif = rt->rt_oif, 1800 .flowi4_oif = rt->rt_oif,
1801 .flowi4_iif = rt->rt_iif, 1801 .flowi4_iif = rt->rt_iif,
1802 .flowi4_mark = rt->rt_mark, 1802 .flowi4_mark = rt->rt_mark,
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 2e97e3ec1eb..929b27bdeb7 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
18 struct rtable *rt; 18 struct rtable *rt;
19 struct flowi4 fl4 = {}; 19 struct flowi4 fl4 = {};
20 __be32 saddr = iph->saddr; 20 __be32 saddr = iph->saddr;
21 __u8 flags = 0; 21 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
22 unsigned int hh_len; 22 unsigned int hh_len;
23 23
24 if (!skb->sk && addr_type != RTN_LOCAL) { 24 if (addr_type == RTN_UNSPEC)
25 if (addr_type == RTN_UNSPEC) 25 addr_type = inet_addr_type(net, saddr);
26 addr_type = inet_addr_type(net, saddr); 26 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
27 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) 27 flags |= FLOWI_FLAG_ANYSRC;
28 flags |= FLOWI_FLAG_ANYSRC; 28 else
29 else 29 saddr = 0;
30 saddr = 0;
31 }
32 30
33 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 31 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
34 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 32 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
@@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
38 fl4.flowi4_tos = RT_TOS(iph->tos); 36 fl4.flowi4_tos = RT_TOS(iph->tos);
39 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 37 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
40 fl4.flowi4_mark = skb->mark; 38 fl4.flowi4_mark = skb->mark;
41 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags; 39 fl4.flowi4_flags = flags;
42 rt = ip_route_output_key(net, &fl4); 40 rt = ip_route_output_key(net, &fl4);
43 if (IS_ERR(rt)) 41 if (IS_ERR(rt))
44 return -1; 42 return -1;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a03fd..73b4e91a87e 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -113,6 +113,18 @@ config IP_NF_TARGET_REJECT
113 113
114 To compile it as a module, choose M here. If unsure, say N. 114 To compile it as a module, choose M here. If unsure, say N.
115 115
116config IP_NF_TARGET_REJECT_SKERR
117 bool "Force socket error when rejecting with icmp*"
118 depends on IP_NF_TARGET_REJECT
119 default n
120 help
121 This option enables turning a "--reject-with icmp*" into a matching
122 socket error also.
123 The REJECT target normally allows sending an ICMP message. But it
124 leaves the local socket unaware of any ingress rejects.
125
126 If unsure, say N.
127
116config IP_NF_TARGET_LOG 128config IP_NF_TARGET_LOG
117 tristate "LOG target support" 129 tristate "LOG target support"
118 default m if NETFILTER_ADVANCED=n 130 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 51f13f8ec72..9dd754c7f2b 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -128,6 +128,14 @@ static void send_reset(struct sk_buff *oldskb, int hook)
128static inline void send_unreach(struct sk_buff *skb_in, int code) 128static inline void send_unreach(struct sk_buff *skb_in, int code)
129{ 129{
130 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 130 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
131#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
132 if (skb_in->sk) {
133 skb_in->sk->sk_err = icmp_err_convert[code].errno;
134 skb_in->sk->sk_error_report(skb_in->sk);
135 pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
136 skb_in->sk->sk_err, skb_in, skb_in->sk);
137 }
138#endif
131} 139}
132 140
133static unsigned int 141static unsigned int
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 3e61faf23a9..f52d41ea069 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -12,6 +12,7 @@
12#include <linux/ip.h> 12#include <linux/ip.h>
13 13
14#include <linux/netfilter.h> 14#include <linux/netfilter.h>
15#include <net/secure_seq.h>
15#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
16#include <net/netfilter/nf_nat_core.h> 17#include <net/netfilter/nf_nat_core.h>
17#include <net/netfilter/nf_nat_rule.h> 18#include <net/netfilter/nf_nat_rule.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index aa13ef10511..6b95f74a91d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
91#include <linux/rcupdate.h> 91#include <linux/rcupdate.h>
92#include <linux/times.h> 92#include <linux/times.h>
93#include <linux/slab.h> 93#include <linux/slab.h>
94#include <linux/prefetch.h>
94#include <net/dst.h> 95#include <net/dst.h>
95#include <net/net_namespace.h> 96#include <net/net_namespace.h>
96#include <net/protocol.h> 97#include <net/protocol.h>
@@ -108,6 +109,7 @@
108#ifdef CONFIG_SYSCTL 109#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h> 110#include <linux/sysctl.h>
110#endif 111#endif
112#include <net/secure_seq.h>
111 113
112#define RT_FL_TOS(oldflp4) \ 114#define RT_FL_TOS(oldflp4) \
113 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) 115 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
@@ -131,6 +133,9 @@ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256; 133static int ip_rt_min_advmss __read_mostly = 256;
132static int rt_chain_length_max __read_mostly = 20; 134static int rt_chain_length_max __read_mostly = 20;
133 135
136static struct delayed_work expires_work;
137static unsigned long expires_ljiffies;
138
134/* 139/*
135 * Interface to generic destination cache. 140 * Interface to generic destination cache.
136 */ 141 */
@@ -411,7 +416,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
411 "HHUptod\tSpecDst"); 416 "HHUptod\tSpecDst");
412 else { 417 else {
413 struct rtable *r = v; 418 struct rtable *r = v;
414 int len; 419 struct neighbour *n;
420 int len, HHUptod;
421
422 rcu_read_lock();
423 n = dst_get_neighbour(&r->dst);
424 HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
425 rcu_read_unlock();
415 426
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 427 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 428 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
@@ -426,8 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
426 dst_metric(&r->dst, RTAX_RTTVAR)), 437 dst_metric(&r->dst, RTAX_RTTVAR)),
427 r->rt_key_tos, 438 r->rt_key_tos,
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, 439 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output == 440 HHUptod,
430 dev_queue_xmit) : 0,
431 r->rt_spec_dst, &len); 441 r->rt_spec_dst, &len);
432 442
433 seq_printf(seq, "%*s\n", 127 - len, ""); 443 seq_printf(seq, "%*s\n", 127 - len, "");
@@ -716,7 +726,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1,
716{ 726{
717 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | 727 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 728 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 (rt1->rt_iif ^ rt2->rt_iif)) == 0); 729 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
720} 730}
721 731
722static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) 732static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
@@ -725,8 +735,8 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 735 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) | 736 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_key_tos ^ rt2->rt_key_tos) | 737 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) | 738 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0; 739 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
730} 740}
731 741
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 742static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -820,6 +830,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
820 return ONE; 830 return ONE;
821} 831}
822 832
833static void rt_check_expire(void)
834{
835 static unsigned int rover;
836 unsigned int i = rover, goal;
837 struct rtable *rth;
838 struct rtable __rcu **rthp;
839 unsigned long samples = 0;
840 unsigned long sum = 0, sum2 = 0;
841 unsigned long delta;
842 u64 mult;
843
844 delta = jiffies - expires_ljiffies;
845 expires_ljiffies = jiffies;
846 mult = ((u64)delta) << rt_hash_log;
847 if (ip_rt_gc_timeout > 1)
848 do_div(mult, ip_rt_gc_timeout);
849 goal = (unsigned int)mult;
850 if (goal > rt_hash_mask)
851 goal = rt_hash_mask + 1;
852 for (; goal > 0; goal--) {
853 unsigned long tmo = ip_rt_gc_timeout;
854 unsigned long length;
855
856 i = (i + 1) & rt_hash_mask;
857 rthp = &rt_hash_table[i].chain;
858
859 if (need_resched())
860 cond_resched();
861
862 samples++;
863
864 if (rcu_dereference_raw(*rthp) == NULL)
865 continue;
866 length = 0;
867 spin_lock_bh(rt_hash_lock_addr(i));
868 while ((rth = rcu_dereference_protected(*rthp,
869 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
870 prefetch(rth->dst.rt_next);
871 if (rt_is_expired(rth)) {
872 *rthp = rth->dst.rt_next;
873 rt_free(rth);
874 continue;
875 }
876 if (rth->dst.expires) {
877 /* Entry is expired even if it is in use */
878 if (time_before_eq(jiffies, rth->dst.expires)) {
879nofree:
880 tmo >>= 1;
881 rthp = &rth->dst.rt_next;
882 /*
883 * We only count entries on
884 * a chain with equal hash inputs once
885 * so that entries for different QOS
886 * levels, and other non-hash input
887 * attributes don't unfairly skew
888 * the length computation
889 */
890 length += has_noalias(rt_hash_table[i].chain, rth);
891 continue;
892 }
893 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
894 goto nofree;
895
896 /* Cleanup aged off entries. */
897 *rthp = rth->dst.rt_next;
898 rt_free(rth);
899 }
900 spin_unlock_bh(rt_hash_lock_addr(i));
901 sum += length;
902 sum2 += length*length;
903 }
904 if (samples) {
905 unsigned long avg = sum / samples;
906 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
907 rt_chain_length_max = max_t(unsigned long,
908 ip_rt_gc_elasticity,
909 (avg + 4*sd) >> FRACT_BITS);
910 }
911 rover = i;
912}
913
914/*
915 * rt_worker_func() is run in process context.
916 * we call rt_check_expire() to scan part of the hash table
917 */
918static void rt_worker_func(struct work_struct *work)
919{
920 rt_check_expire();
921 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
922}
923
823/* 924/*
824 * Perturbation of rt_genid by a small quantity [1..256] 925 * Perturbation of rt_genid by a small quantity [1..256]
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() 926 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1268,11 +1369,41 @@ static void rt_del(unsigned hash, struct rtable *rt)
1268 spin_unlock_bh(rt_hash_lock_addr(hash)); 1369 spin_unlock_bh(rt_hash_lock_addr(hash));
1269} 1370}
1270 1371
1372static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1373{
1374 struct rtable *rt = (struct rtable *) dst;
1375 __be32 orig_gw = rt->rt_gateway;
1376 struct neighbour *n, *old_n;
1377
1378 dst_confirm(&rt->dst);
1379
1380 rt->rt_gateway = peer->redirect_learned.a4;
1381 n = __arp_bind_neighbour(&rt->dst, rt->rt_gateway);
1382 if (IS_ERR(n))
1383 return PTR_ERR(n);
1384 old_n = xchg(&rt->dst._neighbour, n);
1385 if (old_n)
1386 neigh_release(old_n);
1387 if (!n || !(n->nud_state & NUD_VALID)) {
1388 if (n)
1389 neigh_event_send(n, NULL);
1390 rt->rt_gateway = orig_gw;
1391 return -EAGAIN;
1392 } else {
1393 rt->rt_flags |= RTCF_REDIRECTED;
1394 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1395 }
1396 return 0;
1397}
1398
1271/* called in rcu_read_lock() section */ 1399/* called in rcu_read_lock() section */
1272void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1400void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1273 __be32 saddr, struct net_device *dev) 1401 __be32 saddr, struct net_device *dev)
1274{ 1402{
1403 int s, i;
1275 struct in_device *in_dev = __in_dev_get_rcu(dev); 1404 struct in_device *in_dev = __in_dev_get_rcu(dev);
1405 __be32 skeys[2] = { saddr, 0 };
1406 int ikeys[2] = { dev->ifindex, 0 };
1276 struct inet_peer *peer; 1407 struct inet_peer *peer;
1277 struct net *net; 1408 struct net *net;
1278 1409
@@ -1295,13 +1426,43 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1295 goto reject_redirect; 1426 goto reject_redirect;
1296 } 1427 }
1297 1428
1298 peer = inet_getpeer_v4(daddr, 1); 1429 for (s = 0; s < 2; s++) {
1299 if (peer) { 1430 for (i = 0; i < 2; i++) {
1300 peer->redirect_learned.a4 = new_gw; 1431 unsigned int hash;
1432 struct rtable __rcu **rthp;
1433 struct rtable *rt;
1301 1434
1302 inet_putpeer(peer); 1435 hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1303 1436
1304 atomic_inc(&__rt_peer_genid); 1437 rthp = &rt_hash_table[hash].chain;
1438
1439 while ((rt = rcu_dereference(*rthp)) != NULL) {
1440 rthp = &rt->dst.rt_next;
1441
1442 if (rt->rt_key_dst != daddr ||
1443 rt->rt_key_src != skeys[s] ||
1444 rt->rt_oif != ikeys[i] ||
1445 rt_is_input_route(rt) ||
1446 rt_is_expired(rt) ||
1447 !net_eq(dev_net(rt->dst.dev), net) ||
1448 rt->dst.error ||
1449 rt->dst.dev != dev ||
1450 rt->rt_gateway != old_gw)
1451 continue;
1452
1453 if (!rt->peer)
1454 rt_bind_peer(rt, rt->rt_dst, 1);
1455
1456 peer = rt->peer;
1457 if (peer) {
1458 if (peer->redirect_learned.a4 != new_gw) {
1459 peer->redirect_learned.a4 = new_gw;
1460 atomic_inc(&__rt_peer_genid);
1461 }
1462 check_peer_redir(&rt->dst, peer);
1463 }
1464 }
1465 }
1305 } 1466 }
1306 return; 1467 return;
1307 1468
@@ -1588,31 +1749,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1588 } 1749 }
1589} 1750}
1590 1751
1591static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1592{
1593 struct rtable *rt = (struct rtable *) dst;
1594 __be32 orig_gw = rt->rt_gateway;
1595
1596 dst_confirm(&rt->dst);
1597
1598 neigh_release(rt->dst.neighbour);
1599 rt->dst.neighbour = NULL;
1600
1601 rt->rt_gateway = peer->redirect_learned.a4;
1602 if (arp_bind_neighbour(&rt->dst) ||
1603 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1604 if (rt->dst.neighbour)
1605 neigh_event_send(rt->dst.neighbour, NULL);
1606 rt->rt_gateway = orig_gw;
1607 return -EAGAIN;
1608 } else {
1609 rt->rt_flags |= RTCF_REDIRECTED;
1610 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1611 rt->dst.neighbour);
1612 }
1613 return 0;
1614}
1615
1616static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1752static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1617{ 1753{
1618 struct rtable *rt = (struct rtable *) dst; 1754 struct rtable *rt = (struct rtable *) dst;
@@ -1703,7 +1839,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1703 memset(&fl4, 0, sizeof(fl4)); 1839 memset(&fl4, 0, sizeof(fl4));
1704 fl4.daddr = iph->daddr; 1840 fl4.daddr = iph->daddr;
1705 fl4.saddr = iph->saddr; 1841 fl4.saddr = iph->saddr;
1706 fl4.flowi4_tos = iph->tos; 1842 fl4.flowi4_tos = RT_TOS(iph->tos);
1707 fl4.flowi4_oif = rt->dst.dev->ifindex; 1843 fl4.flowi4_oif = rt->dst.dev->ifindex;
1708 fl4.flowi4_iif = skb->dev->ifindex; 1844 fl4.flowi4_iif = skb->dev->ifindex;
1709 fl4.flowi4_mark = skb->mark; 1845 fl4.flowi4_mark = skb->mark;
@@ -2280,8 +2416,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2280 rth = rcu_dereference(rth->dst.rt_next)) { 2416 rth = rcu_dereference(rth->dst.rt_next)) {
2281 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | 2417 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2282 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | 2418 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2283 (rth->rt_iif ^ iif) | 2419 (rth->rt_route_iif ^ iif) |
2284 rth->rt_oif |
2285 (rth->rt_key_tos ^ tos)) == 0 && 2420 (rth->rt_key_tos ^ tos)) == 0 &&
2286 rth->rt_mark == skb->mark && 2421 rth->rt_mark == skb->mark &&
2287 net_eq(dev_net(rth->dst.dev), net) && 2422 net_eq(dev_net(rth->dst.dev), net) &&
@@ -3088,6 +3223,13 @@ static ctl_table ipv4_route_table[] = {
3088 .proc_handler = proc_dointvec_jiffies, 3223 .proc_handler = proc_dointvec_jiffies,
3089 }, 3224 },
3090 { 3225 {
3226 .procname = "gc_interval",
3227 .data = &ip_rt_gc_interval,
3228 .maxlen = sizeof(int),
3229 .mode = 0644,
3230 .proc_handler = proc_dointvec_jiffies,
3231 },
3232 {
3091 .procname = "redirect_load", 3233 .procname = "redirect_load",
3092 .data = &ip_rt_redirect_load, 3234 .data = &ip_rt_redirect_load,
3093 .maxlen = sizeof(int), 3235 .maxlen = sizeof(int),
@@ -3297,6 +3439,11 @@ int __init ip_rt_init(void)
3297 devinet_init(); 3439 devinet_init();
3298 ip_fib_init(); 3440 ip_fib_init();
3299 3441
3442 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3443 expires_ljiffies = jiffies;
3444 schedule_delayed_work(&expires_work,
3445 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3446
3300 if (ip_rt_proc_init()) 3447 if (ip_rt_proc_init())
3301 printk(KERN_ERR "Unable to create route proc files\n"); 3448 printk(KERN_ERR "Unable to create route proc files\n");
3302#ifdef CONFIG_XFRM 3449#ifdef CONFIG_XFRM
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26461492a84..895f2157e13 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -276,7 +276,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
276 int mss; 276 int mss;
277 struct rtable *rt; 277 struct rtable *rt;
278 __u8 rcv_wscale; 278 __u8 rcv_wscale;
279 bool ecn_ok; 279 bool ecn_ok = false;
280 struct flowi4 fl4;
280 281
281 if (!sysctl_tcp_syncookies || !th->ack || th->rst) 282 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
282 goto out; 283 goto out;
@@ -344,20 +345,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
344 * hasn't changed since we received the original syn, but I see 345 * hasn't changed since we received the original syn, but I see
345 * no easy way to do this. 346 * no easy way to do this.
346 */ 347 */
347 { 348 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
348 struct flowi4 fl4; 349 RT_SCOPE_UNIVERSE, IPPROTO_TCP,
349 350 inet_sk_flowi_flags(sk),
350 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), 351 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
351 RT_SCOPE_UNIVERSE, IPPROTO_TCP, 352 ireq->loc_addr, th->source, th->dest);
352 inet_sk_flowi_flags(sk), 353 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
353 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, 354 rt = ip_route_output_key(sock_net(sk), &fl4);
354 ireq->loc_addr, th->source, th->dest); 355 if (IS_ERR(rt)) {
355 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 356 reqsk_free(req);
356 rt = ip_route_output_key(sock_net(sk), &fl4); 357 goto out;
357 if (IS_ERR(rt)) {
358 reqsk_free(req);
359 goto out;
360 }
361 } 358 }
362 359
363 /* Try to redo what tcp_v4_send_synack did. */ 360 /* Try to redo what tcp_v4_send_synack did. */
@@ -371,5 +368,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
371 ireq->rcv_wscale = rcv_wscale; 368 ireq->rcv_wscale = rcv_wscale;
372 369
373 ret = get_cookie_sock(sk, skb, req, &rt->dst); 370 ret = get_cookie_sock(sk, skb, req, &rt->dst);
371 /* ip_queue_xmit() depends on our flow being setup
372 * Normal sockets get it right from inet_csk_route_child_sock()
373 */
374 if (ret)
375 inet_sk(ret)->cork.fl.u.ip4 = fl4;
374out: return ret; 376out: return ret;
375} 377}
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 00000000000..0cbbf10026a
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
1/*
2 * net/ipv4/sysfs_net_ipv4.c
3 *
4 * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
5 *
6 * Copyright (C) 2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/kobject.h>
21#include <linux/string.h>
22#include <linux/sysfs.h>
23#include <linux/init.h>
24#include <net/tcp.h>
25
26#define CREATE_IPV4_FILE(_name, _var) \
27static ssize_t _name##_show(struct kobject *kobj, \
28 struct kobj_attribute *attr, char *buf) \
29{ \
30 return sprintf(buf, "%d\n", _var); \
31} \
32static ssize_t _name##_store(struct kobject *kobj, \
33 struct kobj_attribute *attr, \
34 const char *buf, size_t count) \
35{ \
36 int val, ret; \
37 ret = sscanf(buf, "%d", &val); \
38 if (ret != 1) \
39 return -EINVAL; \
40 if (val < 0) \
41 return -EINVAL; \
42 _var = val; \
43 return count; \
44} \
45static struct kobj_attribute _name##_attr = \
46 __ATTR(_name, 0644, _name##_show, _name##_store)
47
48CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
49CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
50CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
51
52CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
53CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
54CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
55
56static struct attribute *ipv4_attrs[] = {
57 &tcp_wmem_min_attr.attr,
58 &tcp_wmem_def_attr.attr,
59 &tcp_wmem_max_attr.attr,
60 &tcp_rmem_min_attr.attr,
61 &tcp_rmem_def_attr.attr,
62 &tcp_rmem_max_attr.attr,
63 NULL
64};
65
66static struct attribute_group ipv4_attr_group = {
67 .attrs = ipv4_attrs,
68};
69
70static __init int sysfs_ipv4_init(void)
71{
72 struct kobject *ipv4_kobject;
73 int ret;
74
75 ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
76 if (!ipv4_kobject)
77 return -ENOMEM;
78
79 ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
80 if (ret) {
81 kobject_put(ipv4_kobject);
82 return ret;
83 }
84
85 return 0;
86}
87
88subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febcacb72..31741cf9bb6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -266,11 +266,15 @@
266#include <linux/crypto.h> 266#include <linux/crypto.h>
267#include <linux/time.h> 267#include <linux/time.h>
268#include <linux/slab.h> 268#include <linux/slab.h>
269#include <linux/uid_stat.h>
269 270
270#include <net/icmp.h> 271#include <net/icmp.h>
271#include <net/tcp.h> 272#include <net/tcp.h>
272#include <net/xfrm.h> 273#include <net/xfrm.h>
273#include <net/ip.h> 274#include <net/ip.h>
275#include <net/ip6_route.h>
276#include <net/ipv6.h>
277#include <net/transp_v6.h>
274#include <net/netdma.h> 278#include <net/netdma.h>
275#include <net/sock.h> 279#include <net/sock.h>
276 280
@@ -850,8 +854,7 @@ new_segment:
850wait_for_sndbuf: 854wait_for_sndbuf:
851 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 855 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
852wait_for_memory: 856wait_for_memory:
853 if (copied) 857 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
854 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
855 858
856 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 859 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
857 goto do_error; 860 goto do_error;
@@ -860,7 +863,7 @@ wait_for_memory:
860 } 863 }
861 864
862out: 865out:
863 if (copied) 866 if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
864 tcp_push(sk, flags, mss_now, tp->nonagle); 867 tcp_push(sk, flags, mss_now, tp->nonagle);
865 return copied; 868 return copied;
866 869
@@ -1112,6 +1115,9 @@ out:
1112 if (copied) 1115 if (copied)
1113 tcp_push(sk, flags, mss_now, tp->nonagle); 1116 tcp_push(sk, flags, mss_now, tp->nonagle);
1114 release_sock(sk); 1117 release_sock(sk);
1118
1119 if (copied > 0)
1120 uid_stat_tcp_snd(current_uid(), copied);
1115 return copied; 1121 return copied;
1116 1122
1117do_fault: 1123do_fault:
@@ -1388,8 +1394,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1388 tcp_rcv_space_adjust(sk); 1394 tcp_rcv_space_adjust(sk);
1389 1395
1390 /* Clean up data we have read: This will do ACK frames. */ 1396 /* Clean up data we have read: This will do ACK frames. */
1391 if (copied > 0) 1397 if (copied > 0) {
1392 tcp_cleanup_rbuf(sk, copied); 1398 tcp_cleanup_rbuf(sk, copied);
1399 uid_stat_tcp_rcv(current_uid(), copied);
1400 }
1401
1393 return copied; 1402 return copied;
1394} 1403}
1395EXPORT_SYMBOL(tcp_read_sock); 1404EXPORT_SYMBOL(tcp_read_sock);
@@ -1771,6 +1780,9 @@ skip_copy:
1771 tcp_cleanup_rbuf(sk, copied); 1780 tcp_cleanup_rbuf(sk, copied);
1772 1781
1773 release_sock(sk); 1782 release_sock(sk);
1783
1784 if (copied > 0)
1785 uid_stat_tcp_rcv(current_uid(), copied);
1774 return copied; 1786 return copied;
1775 1787
1776out: 1788out:
@@ -1779,6 +1791,8 @@ out:
1779 1791
1780recv_urg: 1792recv_urg:
1781 err = tcp_recv_urg(sk, msg, len, flags); 1793 err = tcp_recv_urg(sk, msg, len, flags);
1794 if (err > 0)
1795 uid_stat_tcp_rcv(current_uid(), err);
1782 goto out; 1796 goto out;
1783} 1797}
1784EXPORT_SYMBOL(tcp_recvmsg); 1798EXPORT_SYMBOL(tcp_recvmsg);
@@ -2395,7 +2409,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2395 /* Cap the max timeout in ms TCP will retry/retrans 2409 /* Cap the max timeout in ms TCP will retry/retrans
2396 * before giving up and aborting (ETIMEDOUT) a connection. 2410 * before giving up and aborting (ETIMEDOUT) a connection.
2397 */ 2411 */
2398 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2412 if (val < 0)
2413 err = -EINVAL;
2414 else
2415 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2399 break; 2416 break;
2400 default: 2417 default:
2401 err = -ENOPROTOOPT; 2418 err = -ENOPROTOOPT;
@@ -3221,7 +3238,7 @@ void __init tcp_init(void)
3221{ 3238{
3222 struct sk_buff *skb = NULL; 3239 struct sk_buff *skb = NULL;
3223 unsigned long limit; 3240 unsigned long limit;
3224 int i, max_share, cnt; 3241 int i, max_rshare, max_wshare, cnt;
3225 unsigned long jiffy = jiffies; 3242 unsigned long jiffy = jiffies;
3226 3243
3227 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3244 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3285,15 +3302,16 @@ void __init tcp_init(void)
3285 3302
3286 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3303 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3287 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); 3304 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
3288 max_share = min(4UL*1024*1024, limit); 3305 max_wshare = min(4UL*1024*1024, limit);
3306 max_rshare = min(6UL*1024*1024, limit);
3289 3307
3290 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3308 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3291 sysctl_tcp_wmem[1] = 16*1024; 3309 sysctl_tcp_wmem[1] = 16*1024;
3292 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3310 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3293 3311
3294 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3312 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3295 sysctl_tcp_rmem[1] = 87380; 3313 sysctl_tcp_rmem[1] = 87380;
3296 sysctl_tcp_rmem[2] = max(87380, max_share); 3314 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3297 3315
3298 printk(KERN_INFO "TCP: Hash tables configured " 3316 printk(KERN_INFO "TCP: Hash tables configured "
3299 "(established %u bind %u)\n", 3317 "(established %u bind %u)\n",
@@ -3310,3 +3328,107 @@ void __init tcp_init(void)
3310 tcp_secret_retiring = &tcp_secret_two; 3328 tcp_secret_retiring = &tcp_secret_two;
3311 tcp_secret_secondary = &tcp_secret_two; 3329 tcp_secret_secondary = &tcp_secret_two;
3312} 3330}
3331
3332static int tcp_is_local(struct net *net, __be32 addr) {
3333 struct rtable *rt;
3334 struct flowi4 fl4 = { .daddr = addr };
3335 rt = ip_route_output_key(net, &fl4);
3336 if (IS_ERR_OR_NULL(rt))
3337 return 0;
3338 return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
3339}
3340
3341#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3342static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
3343 struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
3344 return rt6 && rt6->rt6i_dev && (rt6->rt6i_dev->flags & IFF_LOOPBACK);
3345}
3346#endif
3347
3348/*
3349 * tcp_nuke_addr - destroy all sockets on the given local address
3350 * if local address is the unspecified address (0.0.0.0 or ::), destroy all
3351 * sockets with local addresses that are not configured.
3352 */
3353int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
3354{
3355 int family = addr->sa_family;
3356 unsigned int bucket;
3357
3358 struct in_addr *in;
3359#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3360 struct in6_addr *in6;
3361#endif
3362 if (family == AF_INET) {
3363 in = &((struct sockaddr_in *)addr)->sin_addr;
3364#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3365 } else if (family == AF_INET6) {
3366 in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
3367#endif
3368 } else {
3369 return -EAFNOSUPPORT;
3370 }
3371
3372 for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
3373 struct hlist_nulls_node *node;
3374 struct sock *sk;
3375 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
3376
3377restart:
3378 spin_lock_bh(lock);
3379 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
3380 struct inet_sock *inet = inet_sk(sk);
3381
3382 if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
3383 continue;
3384 if (sock_flag(sk, SOCK_DEAD))
3385 continue;
3386
3387 if (family == AF_INET) {
3388 __be32 s4 = inet->inet_rcv_saddr;
3389 if (s4 == LOOPBACK4_IPV6)
3390 continue;
3391
3392 if (in->s_addr != s4 &&
3393 !(in->s_addr == INADDR_ANY &&
3394 !tcp_is_local(net, s4)))
3395 continue;
3396 }
3397
3398#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3399 if (family == AF_INET6) {
3400 struct in6_addr *s6;
3401 if (!inet->pinet6)
3402 continue;
3403
3404 s6 = &inet->pinet6->rcv_saddr;
3405 if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
3406 continue;
3407
3408 if (!ipv6_addr_equal(in6, s6) &&
3409 !(ipv6_addr_equal(in6, &in6addr_any) &&
3410 !tcp_is_local6(net, s6)))
3411 continue;
3412 }
3413#endif
3414
3415 sock_hold(sk);
3416 spin_unlock_bh(lock);
3417
3418 local_bh_disable();
3419 bh_lock_sock(sk);
3420 sk->sk_err = ETIMEDOUT;
3421 sk->sk_error_report(sk);
3422
3423 tcp_done(sk);
3424 bh_unlock_sock(sk);
3425 local_bh_enable();
3426 sock_put(sk);
3427
3428 goto restart;
3429 }
3430 spin_unlock_bh(lock);
3431 }
3432
3433 return 0;
3434}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bef9f04c22b..b76aa2d9624 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -83,7 +83,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
83EXPORT_SYMBOL(sysctl_tcp_ecn); 83EXPORT_SYMBOL(sysctl_tcp_ecn);
84int sysctl_tcp_dsack __read_mostly = 1; 84int sysctl_tcp_dsack __read_mostly = 1;
85int sysctl_tcp_app_win __read_mostly = 31; 85int sysctl_tcp_app_win __read_mostly = 31;
86int sysctl_tcp_adv_win_scale __read_mostly = 2; 86int sysctl_tcp_adv_win_scale __read_mostly = 1;
87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
88 88
89int sysctl_tcp_stdurg __read_mostly; 89int sysctl_tcp_stdurg __read_mostly;
@@ -328,6 +328,7 @@ static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
328 incr = __tcp_grow_window(sk, skb); 328 incr = __tcp_grow_window(sk, skb);
329 329
330 if (incr) { 330 if (incr) {
331 incr = max_t(int, incr, 2 * skb->len);
331 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 332 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
332 tp->window_clamp); 333 tp->window_clamp);
333 inet_csk(sk)->icsk_ack.quick |= 1; 334 inet_csk(sk)->icsk_ack.quick |= 1;
@@ -460,8 +461,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
460 if (!win_dep) { 461 if (!win_dep) {
461 m -= (new_sample >> 3); 462 m -= (new_sample >> 3);
462 new_sample += m; 463 new_sample += m;
463 } else if (m < new_sample) 464 } else {
464 new_sample = m << 3; 465 m <<= 3;
466 if (m < new_sample)
467 new_sample = m;
468 }
465 } else { 469 } else {
466 /* No previous measure. */ 470 /* No previous measure. */
467 new_sample = m << 3; 471 new_sample = m << 3;
@@ -1115,7 +1119,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1115 return 0; 1119 return 0;
1116 1120
1117 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1121 /* ...Then it's D-SACK, and must reside below snd_una completely */
1118 if (!after(end_seq, tp->snd_una)) 1122 if (after(end_seq, tp->snd_una))
1119 return 0; 1123 return 0;
1120 1124
1121 if (!before(start_seq, tp->undo_marker)) 1125 if (!before(start_seq, tp->undo_marker))
@@ -1289,25 +1293,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1289 return in_sack; 1293 return in_sack;
1290} 1294}
1291 1295
1292static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 1296/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1293 struct tcp_sacktag_state *state, 1297static u8 tcp_sacktag_one(struct sock *sk,
1298 struct tcp_sacktag_state *state, u8 sacked,
1299 u32 start_seq, u32 end_seq,
1294 int dup_sack, int pcount) 1300 int dup_sack, int pcount)
1295{ 1301{
1296 struct tcp_sock *tp = tcp_sk(sk); 1302 struct tcp_sock *tp = tcp_sk(sk);
1297 u8 sacked = TCP_SKB_CB(skb)->sacked;
1298 int fack_count = state->fack_count; 1303 int fack_count = state->fack_count;
1299 1304
1300 /* Account D-SACK for retransmitted packet. */ 1305 /* Account D-SACK for retransmitted packet. */
1301 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1306 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1302 if (tp->undo_marker && tp->undo_retrans && 1307 if (tp->undo_marker && tp->undo_retrans &&
1303 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1308 after(end_seq, tp->undo_marker))
1304 tp->undo_retrans--; 1309 tp->undo_retrans--;
1305 if (sacked & TCPCB_SACKED_ACKED) 1310 if (sacked & TCPCB_SACKED_ACKED)
1306 state->reord = min(fack_count, state->reord); 1311 state->reord = min(fack_count, state->reord);
1307 } 1312 }
1308 1313
1309 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1314 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1310 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1315 if (!after(end_seq, tp->snd_una))
1311 return sacked; 1316 return sacked;
1312 1317
1313 if (!(sacked & TCPCB_SACKED_ACKED)) { 1318 if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1326,13 +1331,13 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1326 /* New sack for not retransmitted frame, 1331 /* New sack for not retransmitted frame,
1327 * which was in hole. It is reordering. 1332 * which was in hole. It is reordering.
1328 */ 1333 */
1329 if (before(TCP_SKB_CB(skb)->seq, 1334 if (before(start_seq,
1330 tcp_highest_sack_seq(tp))) 1335 tcp_highest_sack_seq(tp)))
1331 state->reord = min(fack_count, 1336 state->reord = min(fack_count,
1332 state->reord); 1337 state->reord);
1333 1338
1334 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1339 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1335 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1340 if (!after(end_seq, tp->frto_highmark))
1336 state->flag |= FLAG_ONLY_ORIG_SACKED; 1341 state->flag |= FLAG_ONLY_ORIG_SACKED;
1337 } 1342 }
1338 1343
@@ -1350,8 +1355,7 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1350 1355
1351 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1356 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1352 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1357 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1353 before(TCP_SKB_CB(skb)->seq, 1358 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1354 TCP_SKB_CB(tp->lost_skb_hint)->seq))
1355 tp->lost_cnt_hint += pcount; 1359 tp->lost_cnt_hint += pcount;
1356 1360
1357 if (fack_count > tp->fackets_out) 1361 if (fack_count > tp->fackets_out)
@@ -1370,6 +1374,9 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1370 return sacked; 1374 return sacked;
1371} 1375}
1372 1376
1377/* Shift newly-SACKed bytes from this skb to the immediately previous
1378 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1379 */
1373static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1380static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1374 struct tcp_sacktag_state *state, 1381 struct tcp_sacktag_state *state,
1375 unsigned int pcount, int shifted, int mss, 1382 unsigned int pcount, int shifted, int mss,
@@ -1377,12 +1384,21 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1377{ 1384{
1378 struct tcp_sock *tp = tcp_sk(sk); 1385 struct tcp_sock *tp = tcp_sk(sk);
1379 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1386 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1387 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1388 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1380 1389
1381 BUG_ON(!pcount); 1390 BUG_ON(!pcount);
1382 1391
1383 /* Tweak before seqno plays */ 1392 /* Adjust counters and hints for the newly sacked sequence
1384 if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint && 1393 * range but discard the return value since prev is already
1385 !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) 1394 * marked. We must tag the range first because the seq
1395 * advancement below implicitly advances
1396 * tcp_highest_sack_seq() when skb is highest_sack.
1397 */
1398 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1399 start_seq, end_seq, dup_sack, pcount);
1400
1401 if (skb == tp->lost_skb_hint)
1386 tp->lost_cnt_hint += pcount; 1402 tp->lost_cnt_hint += pcount;
1387 1403
1388 TCP_SKB_CB(prev)->end_seq += shifted; 1404 TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1408,9 +1424,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1408 skb_shinfo(skb)->gso_type = 0; 1424 skb_shinfo(skb)->gso_type = 0;
1409 } 1425 }
1410 1426
1411 /* We discard results */
1412 tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1413
1414 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1427 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1415 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1428 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1416 1429
@@ -1558,6 +1571,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1558 } 1571 }
1559 } 1572 }
1560 1573
1574 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1575 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1576 goto fallback;
1577
1561 if (!skb_shift(prev, skb, len)) 1578 if (!skb_shift(prev, skb, len))
1562 goto fallback; 1579 goto fallback;
1563 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) 1580 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
@@ -1648,10 +1665,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1648 break; 1665 break;
1649 1666
1650 if (in_sack) { 1667 if (in_sack) {
1651 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, 1668 TCP_SKB_CB(skb)->sacked =
1652 state, 1669 tcp_sacktag_one(sk,
1653 dup_sack, 1670 state,
1654 tcp_skb_pcount(skb)); 1671 TCP_SKB_CB(skb)->sacked,
1672 TCP_SKB_CB(skb)->seq,
1673 TCP_SKB_CB(skb)->end_seq,
1674 dup_sack,
1675 tcp_skb_pcount(skb));
1655 1676
1656 if (!before(TCP_SKB_CB(skb)->seq, 1677 if (!before(TCP_SKB_CB(skb)->seq,
1657 tcp_highest_sack_seq(tp))) 1678 tcp_highest_sack_seq(tp)))
@@ -2536,6 +2557,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2536 2557
2537 if (cnt > packets) { 2558 if (cnt > packets) {
2538 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2559 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2560 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
2539 (oldcnt >= packets)) 2561 (oldcnt >= packets))
2540 break; 2562 break;
2541 2563
@@ -5318,7 +5340,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5318 if (tp->copied_seq == tp->rcv_nxt && 5340 if (tp->copied_seq == tp->rcv_nxt &&
5319 len - tcp_header_len <= tp->ucopy.len) { 5341 len - tcp_header_len <= tp->ucopy.len) {
5320#ifdef CONFIG_NET_DMA 5342#ifdef CONFIG_NET_DMA
5321 if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 5343 if (tp->ucopy.task == current &&
5344 sock_owned_by_user(sk) &&
5345 tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
5322 copied_early = 1; 5346 copied_early = 1;
5323 eaten = 1; 5347 eaten = 1;
5324 } 5348 }
@@ -5739,6 +5763,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5739 goto discard; 5763 goto discard;
5740 5764
5741 if (th->syn) { 5765 if (th->syn) {
5766 if (th->fin)
5767 goto discard;
5742 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 5768 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
5743 return 1; 5769 return 1;
5744 5770
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 708dc203b03..53a5af66c0b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,6 +72,7 @@
72#include <net/timewait_sock.h> 72#include <net/timewait_sock.h>
73#include <net/xfrm.h> 73#include <net/xfrm.h>
74#include <net/netdma.h> 74#include <net/netdma.h>
75#include <net/secure_seq.h>
75 76
76#include <linux/inet.h> 77#include <linux/inet.h>
77#include <linux/ipv6.h> 78#include <linux/ipv6.h>
@@ -629,7 +630,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
629 arg.iov[0].iov_len = sizeof(rep.th); 630 arg.iov[0].iov_len = sizeof(rep.th);
630 631
631#ifdef CONFIG_TCP_MD5SIG 632#ifdef CONFIG_TCP_MD5SIG
632 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; 633 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
633 if (key) { 634 if (key) {
634 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 635 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
635 (TCPOPT_NOP << 16) | 636 (TCPOPT_NOP << 16) |
@@ -649,6 +650,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
649 arg.iov[0].iov_len, IPPROTO_TCP, 0); 650 arg.iov[0].iov_len, IPPROTO_TCP, 0);
650 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 651 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653 /* When socket is gone, all binding information is lost.
654 * routing might fail in this case. using iif for oif to
655 * make sure we can deliver it
656 */
657 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
652 658
653 net = dev_net(skb_dst(skb)->dev); 659 net = dev_net(skb_dst(skb)->dev);
654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, 660 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
@@ -908,18 +914,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
908 } 914 }
909 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 915 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
910 } 916 }
911 if (tcp_alloc_md5sig_pool(sk) == NULL) { 917
918 md5sig = tp->md5sig_info;
919 if (md5sig->entries4 == 0 &&
920 tcp_alloc_md5sig_pool(sk) == NULL) {
912 kfree(newkey); 921 kfree(newkey);
913 return -ENOMEM; 922 return -ENOMEM;
914 } 923 }
915 md5sig = tp->md5sig_info;
916 924
917 if (md5sig->alloced4 == md5sig->entries4) { 925 if (md5sig->alloced4 == md5sig->entries4) {
918 keys = kmalloc((sizeof(*keys) * 926 keys = kmalloc((sizeof(*keys) *
919 (md5sig->entries4 + 1)), GFP_ATOMIC); 927 (md5sig->entries4 + 1)), GFP_ATOMIC);
920 if (!keys) { 928 if (!keys) {
921 kfree(newkey); 929 kfree(newkey);
922 tcp_free_md5sig_pool(); 930 if (md5sig->entries4 == 0)
931 tcp_free_md5sig_pool();
923 return -ENOMEM; 932 return -ENOMEM;
924 } 933 }
925 934
@@ -963,6 +972,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
963 kfree(tp->md5sig_info->keys4); 972 kfree(tp->md5sig_info->keys4);
964 tp->md5sig_info->keys4 = NULL; 973 tp->md5sig_info->keys4 = NULL;
965 tp->md5sig_info->alloced4 = 0; 974 tp->md5sig_info->alloced4 = 0;
975 tcp_free_md5sig_pool();
966 } else if (tp->md5sig_info->entries4 != i) { 976 } else if (tp->md5sig_info->entries4 != i) {
967 /* Need to do some manipulation */ 977 /* Need to do some manipulation */
968 memmove(&tp->md5sig_info->keys4[i], 978 memmove(&tp->md5sig_info->keys4[i],
@@ -970,7 +980,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
970 (tp->md5sig_info->entries4 - i) * 980 (tp->md5sig_info->entries4 - i) *
971 sizeof(struct tcp4_md5sig_key)); 981 sizeof(struct tcp4_md5sig_key));
972 } 982 }
973 tcp_free_md5sig_pool();
974 return 0; 983 return 0;
975 } 984 }
976 } 985 }
@@ -1445,9 +1454,13 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1445 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1454 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1446 newinet->inet_id = newtp->write_seq ^ jiffies; 1455 newinet->inet_id = newtp->write_seq ^ jiffies;
1447 1456
1448 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) 1457 if (!dst) {
1449 goto put_and_exit; 1458 dst = inet_csk_route_child_sock(sk, newsk, req);
1450 1459 if (!dst)
1460 goto put_and_exit;
1461 } else {
1462 /* syncookie case : see end of cookie_v4_check() */
1463 }
1451 sk_setup_caps(newsk, dst); 1464 sk_setup_caps(newsk, dst);
1452 1465
1453 tcp_mtup_init(newsk); 1466 tcp_mtup_init(newsk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 882e0b0964d..faf257b9415 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1134,11 +1134,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1134 sk_mem_uncharge(sk, len); 1134 sk_mem_uncharge(sk, len);
1135 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1135 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1136 1136
1137 /* Any change of skb->len requires recalculation of tso 1137 /* Any change of skb->len requires recalculation of tso factor. */
1138 * factor and mss.
1139 */
1140 if (tcp_skb_pcount(skb) > 1) 1138 if (tcp_skb_pcount(skb) > 1)
1141 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 1139 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1142 1140
1143 return 0; 1141 return 0;
1144} 1142}
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 63418185f52..e3db3f91511 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
110 110
111 skb_push(skb, sizeof(*iph)); 111 skb_push(skb, sizeof(*iph));
112 skb_reset_network_header(skb); 112 skb_reset_network_header(skb);
113 113 skb_mac_header_rebuild(skb);
114 memmove(skb->data - skb->mac_len, skb_mac_header(skb),
115 skb->mac_len);
116 skb_set_mac_header(skb, -skb->mac_len);
117 114
118 xfrm4_beet_make_header(skb); 115 xfrm4_beet_make_header(skb);
119 116
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 534972e114a..ed4bf11ef9f 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
66 66
67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 67static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
68{ 68{
69 const unsigned char *old_mac;
70 int err = -EINVAL; 69 int err = -EINVAL;
71 70
72 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) 71 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
84 if (!(x->props.flags & XFRM_STATE_NOECN)) 83 if (!(x->props.flags & XFRM_STATE_NOECN))
85 ipip_ecn_decapsulate(skb); 84 ipip_ecn_decapsulate(skb);
86 85
87 old_mac = skb_mac_header(skb);
88 skb_set_mac_header(skb, -skb->mac_len);
89 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
90 skb_reset_network_header(skb); 86 skb_reset_network_header(skb);
87 skb_mac_header_rebuild(skb);
88
91 err = 0; 89 err = 0;
92 90
93out: 91out:
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 981e43eaf70..581fe0ab409 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
79 struct rtable *rt = (struct rtable *)xdst->route; 79 struct rtable *rt = (struct rtable *)xdst->route;
80 const struct flowi4 *fl4 = &fl->u.ip4; 80 const struct flowi4 *fl4 = &fl->u.ip4;
81 81
82 rt->rt_key_dst = fl4->daddr; 82 xdst->u.rt.rt_key_dst = fl4->daddr;
83 rt->rt_key_src = fl4->saddr; 83 xdst->u.rt.rt_key_src = fl4->saddr;
84 rt->rt_key_tos = fl4->flowi4_tos; 84 xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
85 rt->rt_route_iif = fl4->flowi4_iif; 85 xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
86 rt->rt_iif = fl4->flowi4_iif; 86 xdst->u.rt.rt_iif = fl4->flowi4_iif;
87 rt->rt_oif = fl4->flowi4_oif; 87 xdst->u.rt.rt_oif = fl4->flowi4_oif;
88 rt->rt_mark = fl4->flowi4_mark; 88 xdst->u.rt.rt_mark = fl4->flowi4_mark;
89 89
90 xdst->u.dst.dev = dev; 90 xdst->u.dst.dev = dev;
91 dev_hold(dev); 91 dev_hold(dev);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 498b927f68b..8a4bf719c25 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -433,6 +433,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
433 /* Join all-node multicast group */ 433 /* Join all-node multicast group */
434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 434 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
435 435
436 /* Join all-router multicast group if forwarding is set */
437 if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
438 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
439
436 return ndev; 440 return ndev;
437} 441}
438 442
@@ -656,7 +660,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
656 * layer address of our nexhop router 660 * layer address of our nexhop router
657 */ 661 */
658 662
659 if (rt->rt6i_nexthop == NULL) 663 if (dst_get_neighbour_raw(&rt->dst) == NULL)
660 ifa->flags &= ~IFA_F_OPTIMISTIC; 664 ifa->flags &= ~IFA_F_OPTIMISTIC;
661 665
662 ifa->idev = idev; 666 ifa->idev = idev;
@@ -824,12 +828,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
824{ 828{
825 struct inet6_dev *idev = ifp->idev; 829 struct inet6_dev *idev = ifp->idev;
826 struct in6_addr addr, *tmpaddr; 830 struct in6_addr addr, *tmpaddr;
827 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; 831 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
828 unsigned long regen_advance; 832 unsigned long regen_advance;
829 int tmp_plen; 833 int tmp_plen;
830 int ret = 0; 834 int ret = 0;
831 int max_addresses; 835 int max_addresses;
832 u32 addr_flags; 836 u32 addr_flags;
837 unsigned long now = jiffies;
833 838
834 write_lock(&idev->lock); 839 write_lock(&idev->lock);
835 if (ift) { 840 if (ift) {
@@ -874,7 +879,7 @@ retry:
874 goto out; 879 goto out;
875 } 880 }
876 memcpy(&addr.s6_addr[8], idev->rndid, 8); 881 memcpy(&addr.s6_addr[8], idev->rndid, 8);
877 age = (jiffies - ifp->tstamp) / HZ; 882 age = (now - ifp->tstamp) / HZ;
878 tmp_valid_lft = min_t(__u32, 883 tmp_valid_lft = min_t(__u32,
879 ifp->valid_lft, 884 ifp->valid_lft,
880 idev->cnf.temp_valid_lft + age); 885 idev->cnf.temp_valid_lft + age);
@@ -884,7 +889,6 @@ retry:
884 idev->cnf.max_desync_factor); 889 idev->cnf.max_desync_factor);
885 tmp_plen = ifp->prefix_len; 890 tmp_plen = ifp->prefix_len;
886 max_addresses = idev->cnf.max_addresses; 891 max_addresses = idev->cnf.max_addresses;
887 tmp_cstamp = ifp->cstamp;
888 tmp_tstamp = ifp->tstamp; 892 tmp_tstamp = ifp->tstamp;
889 spin_unlock_bh(&ifp->lock); 893 spin_unlock_bh(&ifp->lock);
890 894
@@ -929,7 +933,7 @@ retry:
929 ift->ifpub = ifp; 933 ift->ifpub = ifp;
930 ift->valid_lft = tmp_valid_lft; 934 ift->valid_lft = tmp_valid_lft;
931 ift->prefered_lft = tmp_prefered_lft; 935 ift->prefered_lft = tmp_prefered_lft;
932 ift->cstamp = tmp_cstamp; 936 ift->cstamp = now;
933 ift->tstamp = tmp_tstamp; 937 ift->tstamp = tmp_tstamp;
934 spin_unlock_bh(&ift->lock); 938 spin_unlock_bh(&ift->lock);
935 939
@@ -1988,25 +1992,50 @@ ok:
1988#ifdef CONFIG_IPV6_PRIVACY 1992#ifdef CONFIG_IPV6_PRIVACY
1989 read_lock_bh(&in6_dev->lock); 1993 read_lock_bh(&in6_dev->lock);
1990 /* update all temporary addresses in the list */ 1994 /* update all temporary addresses in the list */
1991 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { 1995 list_for_each_entry(ift, &in6_dev->tempaddr_list,
1992 /* 1996 tmp_list) {
1993 * When adjusting the lifetimes of an existing 1997 int age, max_valid, max_prefered;
1994 * temporary address, only lower the lifetimes. 1998
1995 * Implementations must not increase the
1996 * lifetimes of an existing temporary address
1997 * when processing a Prefix Information Option.
1998 */
1999 if (ifp != ift->ifpub) 1999 if (ifp != ift->ifpub)
2000 continue; 2000 continue;
2001 2001
2002 /*
2003 * RFC 4941 section 3.3:
2004 * If a received option will extend the lifetime
2005 * of a public address, the lifetimes of
2006 * temporary addresses should be extended,
2007 * subject to the overall constraint that no
2008 * temporary addresses should ever remain
2009 * "valid" or "preferred" for a time longer than
2010 * (TEMP_VALID_LIFETIME) or
2011 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
2012 * respectively.
2013 */
2014 age = (now - ift->cstamp) / HZ;
2015 max_valid = in6_dev->cnf.temp_valid_lft - age;
2016 if (max_valid < 0)
2017 max_valid = 0;
2018
2019 max_prefered = in6_dev->cnf.temp_prefered_lft -
2020 in6_dev->cnf.max_desync_factor -
2021 age;
2022 if (max_prefered < 0)
2023 max_prefered = 0;
2024
2025 if (valid_lft > max_valid)
2026 valid_lft = max_valid;
2027
2028 if (prefered_lft > max_prefered)
2029 prefered_lft = max_prefered;
2030
2002 spin_lock(&ift->lock); 2031 spin_lock(&ift->lock);
2003 flags = ift->flags; 2032 flags = ift->flags;
2004 if (ift->valid_lft > valid_lft && 2033 ift->valid_lft = valid_lft;
2005 ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) 2034 ift->prefered_lft = prefered_lft;
2006 ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; 2035 ift->tstamp = now;
2007 if (ift->prefered_lft > prefered_lft && 2036 if (prefered_lft > 0)
2008 ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) 2037 ift->flags &= ~IFA_F_DEPRECATED;
2009 ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; 2038
2010 spin_unlock(&ift->lock); 2039 spin_unlock(&ift->lock);
2011 if (!(flags&IFA_F_TENTATIVE)) 2040 if (!(flags&IFA_F_TENTATIVE))
2012 ipv6_ifa_notify(0, ift); 2041 ipv6_ifa_notify(0, ift);
@@ -2014,9 +2043,11 @@ ok:
2014 2043
2015 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { 2044 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
2016 /* 2045 /*
2017 * When a new public address is created as described in [ADDRCONF], 2046 * When a new public address is created as
2018 * also create a new temporary address. Also create a temporary 2047 * described in [ADDRCONF], also create a new
2019 * address if it's enabled but no temporary address currently exists. 2048 * temporary address. Also create a temporary
2049 * address if it's enabled but no temporary
2050 * address currently exists.
2020 */ 2051 */
2021 read_unlock_bh(&in6_dev->lock); 2052 read_unlock_bh(&in6_dev->lock);
2022 ipv6_create_tempaddr(ifp, NULL); 2053 ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3b5669a2582..7e8340ef5a2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -63,6 +63,20 @@
63#include <asm/system.h> 63#include <asm/system.h>
64#include <linux/mroute6.h> 64#include <linux/mroute6.h>
65 65
66#ifdef CONFIG_ANDROID_PARANOID_NETWORK
67#include <linux/android_aid.h>
68
69static inline int current_has_network(void)
70{
71 return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
72}
73#else
74static inline int current_has_network(void)
75{
76 return 1;
77}
78#endif
79
66MODULE_AUTHOR("Cast of dozens"); 80MODULE_AUTHOR("Cast of dozens");
67MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 81MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
68MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
@@ -109,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
109 int try_loading_module = 0; 123 int try_loading_module = 0;
110 int err; 124 int err;
111 125
126 if (!current_has_network())
127 return -EACCES;
128
112 if (sock->type != SOCK_RAW && 129 if (sock->type != SOCK_RAW &&
113 sock->type != SOCK_DGRAM && 130 sock->type != SOCK_DGRAM &&
114 !inet_ehash_secret) 131 !inet_ehash_secret)
@@ -477,6 +494,21 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
477 494
478EXPORT_SYMBOL(inet6_getname); 495EXPORT_SYMBOL(inet6_getname);
479 496
497int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
498 struct in6_ifreq ireq;
499 struct sockaddr_in6 sin6;
500
501 if (!capable(CAP_NET_ADMIN))
502 return -EACCES;
503
504 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
505 return -EFAULT;
506
507 sin6.sin6_family = AF_INET6;
508 ipv6_addr_copy(&sin6.sin6_addr, &ireq.ifr6_addr);
509 return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
510}
511
480int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 512int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
481{ 513{
482 struct sock *sk = sock->sk; 514 struct sock *sk = sock->sk;
@@ -501,6 +533,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
501 return addrconf_del_ifaddr(net, (void __user *) arg); 533 return addrconf_del_ifaddr(net, (void __user *) arg);
502 case SIOCSIFDSTADDR: 534 case SIOCSIFDSTADDR:
503 return addrconf_set_dstaddr(net, (void __user *) arg); 535 return addrconf_set_dstaddr(net, (void __user *) arg);
536 case SIOCKILLADDR:
537 return inet6_killaddr_ioctl(net, (void __user *) arg);
504 default: 538 default:
505 if (!sk->sk_prot->ioctl) 539 if (!sk->sk_prot->ioctl)
506 return -ENOIOCTLCMD; 540 return -ENOIOCTLCMD;
@@ -1078,6 +1112,8 @@ static int __init inet6_init(void)
1078 goto out; 1112 goto out;
1079 } 1113 }
1080 1114
1115 initialize_hashidentrnd();
1116
1081 err = proto_register(&tcpv6_prot, 1); 1117 err = proto_register(&tcpv6_prot, 1);
1082 if (err) 1118 if (err)
1083 goto out; 1119 goto out;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2195ae65192..4c0f894d084 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
324#endif 324#endif
325 } 325 }
326 326
327 err = ah->nexthdr;
328
329 kfree(AH_SKB_CB(skb)->tmp); 327 kfree(AH_SKB_CB(skb)->tmp);
330 xfrm_output_resume(skb, err); 328 xfrm_output_resume(skb, err);
331} 329}
@@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
466 if (err) 464 if (err)
467 goto out; 465 goto out;
468 466
467 err = ah->nexthdr;
468
469 skb->network_header += ah_hlen; 469 skb->network_header += ah_hlen;
470 memcpy(skb_network_header(skb), work_iph, hdr_len); 470 memcpy(skb_network_header(skb), work_iph, hdr_len);
471 __skb_pull(skb, ah_hlen + hdr_len); 471 __skb_pull(skb, ah_hlen + hdr_len);
472 skb_set_transport_header(skb, -hdr_len); 472 skb_set_transport_header(skb, -hdr_len);
473
474 err = ah->nexthdr;
475out: 473out:
476 kfree(AH_SKB_CB(skb)->tmp); 474 kfree(AH_SKB_CB(skb)->tmp);
477 xfrm_input_resume(skb, err); 475 xfrm_input_resume(skb, err);
@@ -583,8 +581,6 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
583 if (err == -EINPROGRESS) 581 if (err == -EINPROGRESS)
584 goto out; 582 goto out;
585 583
586 if (err == -EBUSY)
587 err = NET_XMIT_DROP;
588 goto out_free; 584 goto out_free;
589 } 585 }
590 586
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1ac7938dd9e..65dd5433f08 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -411,19 +411,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
411 struct esp_data *esp = x->data; 411 struct esp_data *esp = x->data;
412 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 412 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
413 u32 align = max_t(u32, blksize, esp->padlen); 413 u32 align = max_t(u32, blksize, esp->padlen);
414 u32 rem; 414 unsigned int net_adj;
415 415
416 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); 416 if (x->props.mode != XFRM_MODE_TUNNEL)
417 rem = mtu & (align - 1); 417 net_adj = sizeof(struct ipv6hdr);
418 mtu &= ~(align - 1); 418 else
419 419 net_adj = 0;
420 if (x->props.mode != XFRM_MODE_TUNNEL) {
421 u32 padsize = ((blksize - 1) & 7) + 1;
422 mtu -= blksize - padsize;
423 mtu += min_t(u32, blksize - padsize, rem);
424 }
425 420
426 return mtu - 2; 421 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
422 net_adj) & ~(align - 1)) + (net_adj - 2);
427} 423}
428 424
429static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 425static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b5319723370..73f1a00a96a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -20,6 +20,7 @@
20#include <net/inet_connection_sock.h> 20#include <net/inet_connection_sock.h>
21#include <net/inet_hashtables.h> 21#include <net/inet_hashtables.h>
22#include <net/inet6_hashtables.h> 22#include <net/inet6_hashtables.h>
23#include <net/secure_seq.h>
23#include <net/ip.h> 24#include <net/ip.h>
24 25
25int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw) 26int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4076a0b14b2..0f9b37a1c1d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1455,7 +1455,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1455 RT6_TRACE("aging clone %p\n", rt); 1455 RT6_TRACE("aging clone %p\n", rt);
1456 return -1; 1456 return -1;
1457 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1457 } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
1458 (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) { 1458 (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
1459 RT6_TRACE("purging route %p via non-router but gateway\n", 1459 RT6_TRACE("purging route %p via non-router but gateway\n",
1460 rt); 1460 rt);
1461 return -1; 1461 return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9d4b165837d..ae9f6d43617 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -100,6 +100,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
100{ 100{
101 struct dst_entry *dst = skb_dst(skb); 101 struct dst_entry *dst = skb_dst(skb);
102 struct net_device *dev = dst->dev; 102 struct net_device *dev = dst->dev;
103 struct neighbour *neigh;
104 int res;
103 105
104 skb->protocol = htons(ETH_P_IPV6); 106 skb->protocol = htons(ETH_P_IPV6);
105 skb->dev = dev; 107 skb->dev = dev;
@@ -134,10 +136,22 @@ static int ip6_finish_output2(struct sk_buff *skb)
134 skb->len); 136 skb->len);
135 } 137 }
136 138
137 if (dst->hh) 139 rcu_read_lock();
138 return neigh_hh_output(dst->hh, skb); 140 if (dst->hh) {
139 else if (dst->neighbour) 141 res = neigh_hh_output(dst->hh, skb);
140 return dst->neighbour->output(skb); 142
143 rcu_read_unlock();
144 return res;
145 } else {
146 neigh = dst_get_neighbour(dst);
147 if (neigh) {
148 res = neigh->output(skb);
149
150 rcu_read_unlock();
151 return res;
152 }
153 rcu_read_unlock();
154 }
141 155
142 IP6_INC_STATS_BH(dev_net(dst->dev), 156 IP6_INC_STATS_BH(dev_net(dst->dev),
143 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 157 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -385,6 +399,7 @@ int ip6_forward(struct sk_buff *skb)
385 struct ipv6hdr *hdr = ipv6_hdr(skb); 399 struct ipv6hdr *hdr = ipv6_hdr(skb);
386 struct inet6_skb_parm *opt = IP6CB(skb); 400 struct inet6_skb_parm *opt = IP6CB(skb);
387 struct net *net = dev_net(dst->dev); 401 struct net *net = dev_net(dst->dev);
402 struct neighbour *n;
388 u32 mtu; 403 u32 mtu;
389 404
390 if (net->ipv6.devconf_all->forwarding == 0) 405 if (net->ipv6.devconf_all->forwarding == 0)
@@ -459,11 +474,10 @@ int ip6_forward(struct sk_buff *skb)
459 send redirects to source routed frames. 474 send redirects to source routed frames.
460 We don't send redirects to frames decapsulated from IPsec. 475 We don't send redirects to frames decapsulated from IPsec.
461 */ 476 */
462 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 && 477 n = dst_get_neighbour(dst);
463 !skb_sec_path(skb)) { 478 if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
464 struct in6_addr *target = NULL; 479 struct in6_addr *target = NULL;
465 struct rt6_info *rt; 480 struct rt6_info *rt;
466 struct neighbour *n = dst->neighbour;
467 481
468 /* 482 /*
469 * incoming and outgoing devices are the same 483 * incoming and outgoing devices are the same
@@ -596,6 +610,35 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
596 return offset; 610 return offset;
597} 611}
598 612
613static u32 hashidentrnd __read_mostly;
614#define FID_HASH_SZ 16
615static u32 ipv6_fragmentation_id[FID_HASH_SZ];
616
617void __init initialize_hashidentrnd(void)
618{
619 get_random_bytes(&hashidentrnd, sizeof(hashidentrnd));
620}
621
622static u32 __ipv6_select_ident(const struct in6_addr *addr)
623{
624 u32 newid, oldid, hash = jhash2((u32 *)addr, 4, hashidentrnd);
625 u32 *pid = &ipv6_fragmentation_id[hash % FID_HASH_SZ];
626
627 do {
628 oldid = *pid;
629 newid = oldid + 1;
630 if (!(hash + newid))
631 newid++;
632 } while (cmpxchg(pid, oldid, newid) != oldid);
633
634 return hash + newid;
635}
636
637void ipv6_select_ident(struct frag_hdr *fhdr, struct in6_addr *addr)
638{
639 fhdr->identification = htonl(__ipv6_select_ident(addr));
640}
641
599int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 642int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
600{ 643{
601 struct sk_buff *frag; 644 struct sk_buff *frag;
@@ -680,7 +723,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
680 skb_reset_network_header(skb); 723 skb_reset_network_header(skb);
681 memcpy(skb_network_header(skb), tmp_hdr, hlen); 724 memcpy(skb_network_header(skb), tmp_hdr, hlen);
682 725
683 ipv6_select_ident(fh); 726 ipv6_select_ident(fh, &rt->rt6i_dst.addr);
684 fh->nexthdr = nexthdr; 727 fh->nexthdr = nexthdr;
685 fh->reserved = 0; 728 fh->reserved = 0;
686 fh->frag_off = htons(IP6_MF); 729 fh->frag_off = htons(IP6_MF);
@@ -826,7 +869,7 @@ slow_path:
826 fh->nexthdr = nexthdr; 869 fh->nexthdr = nexthdr;
827 fh->reserved = 0; 870 fh->reserved = 0;
828 if (!frag_id) { 871 if (!frag_id) {
829 ipv6_select_ident(fh); 872 ipv6_select_ident(fh, &rt->rt6i_dst.addr);
830 frag_id = fh->identification; 873 frag_id = fh->identification;
831 } else 874 } else
832 fh->identification = frag_id; 875 fh->identification = frag_id;
@@ -920,8 +963,11 @@ out:
920static int ip6_dst_lookup_tail(struct sock *sk, 963static int ip6_dst_lookup_tail(struct sock *sk,
921 struct dst_entry **dst, struct flowi6 *fl6) 964 struct dst_entry **dst, struct flowi6 *fl6)
922{ 965{
923 int err;
924 struct net *net = sock_net(sk); 966 struct net *net = sock_net(sk);
967#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
968 struct neighbour *n;
969#endif
970 int err;
925 971
926 if (*dst == NULL) 972 if (*dst == NULL)
927 *dst = ip6_route_output(net, sk, fl6); 973 *dst = ip6_route_output(net, sk, fl6);
@@ -947,11 +993,14 @@ static int ip6_dst_lookup_tail(struct sock *sk,
947 * dst entry and replace it instead with the 993 * dst entry and replace it instead with the
948 * dst entry of the nexthop router 994 * dst entry of the nexthop router
949 */ 995 */
950 if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) { 996 rcu_read_lock();
997 n = dst_get_neighbour(*dst);
998 if (n && !(n->nud_state & NUD_VALID)) {
951 struct inet6_ifaddr *ifp; 999 struct inet6_ifaddr *ifp;
952 struct flowi6 fl_gw6; 1000 struct flowi6 fl_gw6;
953 int redirect; 1001 int redirect;
954 1002
1003 rcu_read_unlock();
955 ifp = ipv6_get_ifaddr(net, &fl6->saddr, 1004 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
956 (*dst)->dev, 1); 1005 (*dst)->dev, 1);
957 1006
@@ -971,6 +1020,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
971 if ((err = (*dst)->error)) 1020 if ((err = (*dst)->error))
972 goto out_err_release; 1021 goto out_err_release;
973 } 1022 }
1023 } else {
1024 rcu_read_unlock();
974 } 1025 }
975#endif 1026#endif
976 1027
@@ -1072,7 +1123,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1072 int getfrag(void *from, char *to, int offset, int len, 1123 int getfrag(void *from, char *to, int offset, int len,
1073 int odd, struct sk_buff *skb), 1124 int odd, struct sk_buff *skb),
1074 void *from, int length, int hh_len, int fragheaderlen, 1125 void *from, int length, int hh_len, int fragheaderlen,
1075 int transhdrlen, int mtu,unsigned int flags) 1126 int transhdrlen, int mtu,unsigned int flags,
1127 struct rt6_info *rt)
1076 1128
1077{ 1129{
1078 struct sk_buff *skb; 1130 struct sk_buff *skb;
@@ -1116,7 +1168,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1116 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - 1168 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1117 sizeof(struct frag_hdr)) & ~7; 1169 sizeof(struct frag_hdr)) & ~7;
1118 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1170 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1119 ipv6_select_ident(&fhdr); 1171 ipv6_select_ident(&fhdr, &rt->rt6i_dst.addr);
1120 skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1172 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1121 __skb_queue_tail(&sk->sk_write_queue, skb); 1173 __skb_queue_tail(&sk->sk_write_queue, skb);
1122 1174
@@ -1142,6 +1194,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1142 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; 1194 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1143} 1195}
1144 1196
1197static void ip6_append_data_mtu(int *mtu,
1198 int *maxfraglen,
1199 unsigned int fragheaderlen,
1200 struct sk_buff *skb,
1201 struct rt6_info *rt)
1202{
1203 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1204 if (skb == NULL) {
1205 /* first fragment, reserve header_len */
1206 *mtu = *mtu - rt->dst.header_len;
1207
1208 } else {
1209 /*
1210 * this fragment is not first, the headers
1211 * space is regarded as data space.
1212 */
1213 *mtu = dst_mtu(rt->dst.path);
1214 }
1215 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1216 + fragheaderlen - sizeof(struct frag_hdr);
1217 }
1218}
1219
1145int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, 1220int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1146 int offset, int len, int odd, struct sk_buff *skb), 1221 int offset, int len, int odd, struct sk_buff *skb),
1147 void *from, int length, int transhdrlen, 1222 void *from, int length, int transhdrlen,
@@ -1151,7 +1226,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1151 struct inet_sock *inet = inet_sk(sk); 1226 struct inet_sock *inet = inet_sk(sk);
1152 struct ipv6_pinfo *np = inet6_sk(sk); 1227 struct ipv6_pinfo *np = inet6_sk(sk);
1153 struct inet_cork *cork; 1228 struct inet_cork *cork;
1154 struct sk_buff *skb; 1229 struct sk_buff *skb, *skb_prev = NULL;
1155 unsigned int maxfraglen, fragheaderlen; 1230 unsigned int maxfraglen, fragheaderlen;
1156 int exthdrlen; 1231 int exthdrlen;
1157 int hh_len; 1232 int hh_len;
@@ -1208,8 +1283,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1208 inet->cork.fl.u.ip6 = *fl6; 1283 inet->cork.fl.u.ip6 = *fl6;
1209 np->cork.hop_limit = hlimit; 1284 np->cork.hop_limit = hlimit;
1210 np->cork.tclass = tclass; 1285 np->cork.tclass = tclass;
1211 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? 1286 if (rt->dst.flags & DST_XFRM_TUNNEL)
1212 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1287 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1288 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1289 else
1290 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1291 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1213 if (np->frag_size < mtu) { 1292 if (np->frag_size < mtu) {
1214 if (np->frag_size) 1293 if (np->frag_size)
1215 mtu = np->frag_size; 1294 mtu = np->frag_size;
@@ -1282,7 +1361,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1282 1361
1283 err = ip6_ufo_append_data(sk, getfrag, from, length, 1362 err = ip6_ufo_append_data(sk, getfrag, from, length,
1284 hh_len, fragheaderlen, 1363 hh_len, fragheaderlen,
1285 transhdrlen, mtu, flags); 1364 transhdrlen, mtu, flags, rt);
1286 if (err) 1365 if (err)
1287 goto error; 1366 goto error;
1288 return 0; 1367 return 0;
@@ -1304,38 +1383,43 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1304 unsigned int fraglen; 1383 unsigned int fraglen;
1305 unsigned int fraggap; 1384 unsigned int fraggap;
1306 unsigned int alloclen; 1385 unsigned int alloclen;
1307 struct sk_buff *skb_prev;
1308alloc_new_skb: 1386alloc_new_skb:
1309 skb_prev = skb;
1310
1311 /* There's no room in the current skb */ 1387 /* There's no room in the current skb */
1312 if (skb_prev) 1388 if (skb)
1313 fraggap = skb_prev->len - maxfraglen; 1389 fraggap = skb->len - maxfraglen;
1314 else 1390 else
1315 fraggap = 0; 1391 fraggap = 0;
1392 /* update mtu and maxfraglen if necessary */
1393 if (skb == NULL || skb_prev == NULL)
1394 ip6_append_data_mtu(&mtu, &maxfraglen,
1395 fragheaderlen, skb, rt);
1396
1397 skb_prev = skb;
1316 1398
1317 /* 1399 /*
1318 * If remaining data exceeds the mtu, 1400 * If remaining data exceeds the mtu,
1319 * we know we need more fragment(s). 1401 * we know we need more fragment(s).
1320 */ 1402 */
1321 datalen = length + fraggap; 1403 datalen = length + fraggap;
1322 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1323 datalen = maxfraglen - fragheaderlen;
1324 1404
1325 fraglen = datalen + fragheaderlen; 1405 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1406 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1326 if ((flags & MSG_MORE) && 1407 if ((flags & MSG_MORE) &&
1327 !(rt->dst.dev->features&NETIF_F_SG)) 1408 !(rt->dst.dev->features&NETIF_F_SG))
1328 alloclen = mtu; 1409 alloclen = mtu;
1329 else 1410 else
1330 alloclen = datalen + fragheaderlen; 1411 alloclen = datalen + fragheaderlen;
1331 1412
1332 /* 1413 if (datalen != length + fraggap) {
1333 * The last fragment gets additional space at tail. 1414 /*
1334 * Note: we overallocate on fragments with MSG_MODE 1415 * this is not the last fragment, the trailer
1335 * because we have no idea if we're the last one. 1416 * space is regarded as data space.
1336 */ 1417 */
1337 if (datalen == length + fraggap) 1418 datalen += rt->dst.trailer_len;
1338 alloclen += rt->dst.trailer_len; 1419 }
1420
1421 alloclen += rt->dst.trailer_len;
1422 fraglen = datalen + fragheaderlen;
1339 1423
1340 /* 1424 /*
1341 * We just reserve space for fragment header. 1425 * We just reserve space for fragment header.
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 36c2842a86b..848e494fa3c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -289,6 +289,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
289 if ((err = register_netdevice(dev)) < 0) 289 if ((err = register_netdevice(dev)) < 0)
290 goto failed_free; 290 goto failed_free;
291 291
292 strcpy(t->parms.name, dev->name);
293
292 dev_hold(dev); 294 dev_hold(dev);
293 ip6_tnl_link(ip6n, t); 295 ip6_tnl_link(ip6n, t);
294 return t; 296 return t;
@@ -1397,7 +1399,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1397 struct ip6_tnl *t = netdev_priv(dev); 1399 struct ip6_tnl *t = netdev_priv(dev);
1398 1400
1399 t->dev = dev; 1401 t->dev = dev;
1400 strcpy(t->parms.name, dev->name);
1401 dev->tstats = alloc_percpu(struct pcpu_tstats); 1402 dev->tstats = alloc_percpu(struct pcpu_tstats);
1402 if (!dev->tstats) 1403 if (!dev->tstats)
1403 return -ENOMEM; 1404 return -ENOMEM;
@@ -1477,6 +1478,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1477static int __net_init ip6_tnl_init_net(struct net *net) 1478static int __net_init ip6_tnl_init_net(struct net *net)
1478{ 1479{
1479 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1480 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1481 struct ip6_tnl *t = NULL;
1480 int err; 1482 int err;
1481 1483
1482 ip6n->tnls[0] = ip6n->tnls_wc; 1484 ip6n->tnls[0] = ip6n->tnls_wc;
@@ -1497,6 +1499,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
1497 err = register_netdev(ip6n->fb_tnl_dev); 1499 err = register_netdev(ip6n->fb_tnl_dev);
1498 if (err < 0) 1500 if (err < 0)
1499 goto err_register; 1501 goto err_register;
1502
1503 t = netdev_priv(ip6n->fb_tnl_dev);
1504
1505 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
1500 return 0; 1506 return 0;
1501 1507
1502err_register: 1508err_register:
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 82a809901f8..86e3cc10fc2 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 int err; 696 int err;
697 697
698 err = ip6mr_fib_lookup(net, &fl6, &mrt); 698 err = ip6mr_fib_lookup(net, &fl6, &mrt);
699 if (err < 0) 699 if (err < 0) {
700 kfree_skb(skb);
700 return err; 701 return err;
702 }
701 703
702 read_lock(&mrt_lock); 704 read_lock(&mrt_lock);
703 dev->stats.tx_bytes += skb->len; 705 dev->stats.tx_bytes += skb->len;
@@ -2051,8 +2053,10 @@ int ip6_mr_input(struct sk_buff *skb)
2051 int err; 2053 int err;
2052 2054
2053 err = ip6mr_fib_lookup(net, &fl6, &mrt); 2055 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2054 if (err < 0) 2056 if (err < 0) {
2057 kfree_skb(skb);
2055 return err; 2058 return err;
2059 }
2056 2060
2057 read_lock(&mrt_lock); 2061 read_lock(&mrt_lock);
2058 cache = ip6mr_cache_find(mrt, 2062 cache = ip6mr_cache_find(mrt,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 9cb191ecaba..147ede38ab4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
913} 913}
914 914
915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
916 char __user *optval, int __user *optlen) 916 char __user *optval, int __user *optlen, unsigned flags)
917{ 917{
918 struct ipv6_pinfo *np = inet6_sk(sk); 918 struct ipv6_pinfo *np = inet6_sk(sk);
919 int len; 919 int len;
@@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
962 962
963 msg.msg_control = optval; 963 msg.msg_control = optval;
964 msg.msg_controllen = len; 964 msg.msg_controllen = len;
965 msg.msg_flags = 0; 965 msg.msg_flags = flags;
966 966
967 lock_sock(sk); 967 lock_sock(sk);
968 skb = np->pktoptions; 968 skb = np->pktoptions;
@@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
1222 if(level != SOL_IPV6) 1222 if(level != SOL_IPV6)
1223 return -ENOPROTOOPT; 1223 return -ENOPROTOOPT;
1224 1224
1225 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); 1225 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
1226#ifdef CONFIG_NETFILTER 1226#ifdef CONFIG_NETFILTER
1227 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1227 /* we need to exclude all possible ENOPROTOOPTs except default case */
1228 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { 1228 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
@@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1264 return compat_mc_getsockopt(sk, level, optname, optval, optlen, 1264 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1265 ipv6_getsockopt); 1265 ipv6_getsockopt);
1266 1266
1267 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); 1267 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
1268 MSG_CMSG_COMPAT);
1268#ifdef CONFIG_NETFILTER 1269#ifdef CONFIG_NETFILTER
1269 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1270 /* we need to exclude all possible ENOPROTOOPTs except default case */
1270 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { 1271 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 3e6ebcdb477..f2d74ea19a7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -257,7 +257,6 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
257 257
258 if (rt) { 258 if (rt) {
259 dev = rt->rt6i_dev; 259 dev = rt->rt6i_dev;
260 dev_hold(dev);
261 dst_release(&rt->dst); 260 dst_release(&rt->dst);
262 } 261 }
263 } else 262 } else
@@ -1059,7 +1058,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1059 break; 1058 break;
1060 for (i=0; i<nsrcs; i++) { 1059 for (i=0; i<nsrcs; i++) {
1061 /* skip inactive filters */ 1060 /* skip inactive filters */
1062 if (pmc->mca_sfcount[MCAST_INCLUDE] || 1061 if (psf->sf_count[MCAST_INCLUDE] ||
1063 pmc->mca_sfcount[MCAST_EXCLUDE] != 1062 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1064 psf->sf_count[MCAST_EXCLUDE]) 1063 psf->sf_count[MCAST_EXCLUDE])
1065 continue; 1064 continue;
@@ -2055,7 +2054,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2055 if (!delta) 2054 if (!delta)
2056 pmc->mca_sfcount[sfmode]--; 2055 pmc->mca_sfcount[sfmode]--;
2057 for (j=0; j<i; j++) 2056 for (j=0; j<i; j++)
2058 (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2057 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2059 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2058 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2060 struct ip6_sf_list *psf; 2059 struct ip6_sf_list *psf;
2061 2060
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7596f071d30..10a8d411707 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1244,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1244 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); 1244 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
1245 1245
1246 if (rt) 1246 if (rt)
1247 neigh = rt->rt6i_nexthop; 1247 neigh = dst_get_neighbour(&rt->dst);
1248 1248
1249 if (rt && lifetime == 0) { 1249 if (rt && lifetime == 0) {
1250 neigh_clone(neigh); 1250 neigh_clone(neigh);
@@ -1265,7 +1265,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1265 return; 1265 return;
1266 } 1266 }
1267 1267
1268 neigh = rt->rt6i_nexthop; 1268 neigh = dst_get_neighbour(&rt->dst);
1269 if (neigh == NULL) { 1269 if (neigh == NULL) {
1270 ND_PRINTK0(KERN_ERR 1270 ND_PRINTK0(KERN_ERR
1271 "ICMPv6 RA: %s() got default router without neighbour.\n", 1271 "ICMPv6 RA: %s() got default router without neighbour.\n",
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 448464844a2..5bbf5316920 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -174,6 +174,18 @@ config IP6_NF_TARGET_REJECT
174 174
175 To compile it as a module, choose M here. If unsure, say N. 175 To compile it as a module, choose M here. If unsure, say N.
176 176
177config IP6_NF_TARGET_REJECT_SKERR
178 bool "Force socket error when rejecting with icmp*"
179 depends on IP6_NF_TARGET_REJECT
180 default n
181 help
182 This option enables turning a "--reject-with icmp*" into a matching
183 socket error also.
184 The REJECT target normally allows sending an ICMP message. But it
185 leaves the local socket unaware of any ingress rejects.
186
187 If unsure, say N.
188
177config IP6_NF_MANGLE 189config IP6_NF_MANGLE
178 tristate "Packet mangling" 190 tristate "Packet mangling"
179 default m if NETFILTER_ADVANCED=n 191 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcd..14cb310064f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -2292,16 +2292,15 @@ static void __exit ip6_tables_fini(void)
2292 * "No next header". 2292 * "No next header".
2293 * 2293 *
2294 * If target header is found, its offset is set in *offset and return protocol 2294 * If target header is found, its offset is set in *offset and return protocol
2295 * number. Otherwise, return -1. 2295 * number. Otherwise, return -ENOENT or -EBADMSG.
2296 * 2296 *
2297 * If the first fragment doesn't contain the final protocol header or 2297 * If the first fragment doesn't contain the final protocol header or
2298 * NEXTHDR_NONE it is considered invalid. 2298 * NEXTHDR_NONE it is considered invalid.
2299 * 2299 *
2300 * Note that non-1st fragment is special case that "the protocol number 2300 * Note that non-1st fragment is special case that "the protocol number
2301 * of last header" is "next header" field in Fragment header. In this case, 2301 * of last header" is "next header" field in Fragment header. In this case,
2302 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff 2302 * *offset is meaningless. If fragoff is not NULL, the fragment offset is
2303 * isn't NULL. 2303 * stored in *fragoff; if it is NULL, return -EINVAL.
2304 *
2305 */ 2304 */
2306int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, 2305int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2307 int target, unsigned short *fragoff) 2306 int target, unsigned short *fragoff)
@@ -2342,9 +2341,12 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2342 if (target < 0 && 2341 if (target < 0 &&
2343 ((!ipv6_ext_hdr(hp->nexthdr)) || 2342 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2344 hp->nexthdr == NEXTHDR_NONE)) { 2343 hp->nexthdr == NEXTHDR_NONE)) {
2345 if (fragoff) 2344 if (fragoff) {
2346 *fragoff = _frag_off; 2345 *fragoff = _frag_off;
2347 return hp->nexthdr; 2346 return hp->nexthdr;
2347 } else {
2348 return -EINVAL;
2349 }
2348 } 2350 }
2349 return -ENOENT; 2351 return -ENOENT;
2350 } 2352 }
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5dd539..09d30498c92 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -177,6 +177,15 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
177 skb_in->dev = net->loopback_dev; 177 skb_in->dev = net->loopback_dev;
178 178
179 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); 179 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
180#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
181 if (skb_in->sk) {
182 icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
183 &skb_in->sk->sk_err);
184 skb_in->sk->sk_error_report(skb_in->sk);
185 pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
186 skb_in->sk->sk_err, skb_in, skb_in->sk);
187 }
188#endif
180} 189}
181 190
182static unsigned int 191static unsigned int
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0ef1f086feb..7c5b4cb8838 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -356,7 +356,7 @@ out:
356#ifdef CONFIG_IPV6_ROUTER_PREF 356#ifdef CONFIG_IPV6_ROUTER_PREF
357static void rt6_probe(struct rt6_info *rt) 357static void rt6_probe(struct rt6_info *rt)
358{ 358{
359 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL; 359 struct neighbour *neigh;
360 /* 360 /*
361 * Okay, this does not seem to be appropriate 361 * Okay, this does not seem to be appropriate
362 * for now, however, we need to check if it 362 * for now, however, we need to check if it
@@ -365,8 +365,10 @@ static void rt6_probe(struct rt6_info *rt)
365 * Router Reachability Probe MUST be rate-limited 365 * Router Reachability Probe MUST be rate-limited
366 * to no more than one per minute. 366 * to no more than one per minute.
367 */ 367 */
368 rcu_read_lock();
369 neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
368 if (!neigh || (neigh->nud_state & NUD_VALID)) 370 if (!neigh || (neigh->nud_state & NUD_VALID))
369 return; 371 goto out;
370 read_lock_bh(&neigh->lock); 372 read_lock_bh(&neigh->lock);
371 if (!(neigh->nud_state & NUD_VALID) && 373 if (!(neigh->nud_state & NUD_VALID) &&
372 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 374 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -379,8 +381,11 @@ static void rt6_probe(struct rt6_info *rt)
379 target = (struct in6_addr *)&neigh->primary_key; 381 target = (struct in6_addr *)&neigh->primary_key;
380 addrconf_addr_solict_mult(target, &mcaddr); 382 addrconf_addr_solict_mult(target, &mcaddr);
381 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL); 383 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
382 } else 384 } else {
383 read_unlock_bh(&neigh->lock); 385 read_unlock_bh(&neigh->lock);
386 }
387out:
388 rcu_read_unlock();
384} 389}
385#else 390#else
386static inline void rt6_probe(struct rt6_info *rt) 391static inline void rt6_probe(struct rt6_info *rt)
@@ -404,8 +409,11 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
404 409
405static inline int rt6_check_neigh(struct rt6_info *rt) 410static inline int rt6_check_neigh(struct rt6_info *rt)
406{ 411{
407 struct neighbour *neigh = rt->rt6i_nexthop; 412 struct neighbour *neigh;
408 int m; 413 int m;
414
415 rcu_read_lock();
416 neigh = dst_get_neighbour(&rt->dst);
409 if (rt->rt6i_flags & RTF_NONEXTHOP || 417 if (rt->rt6i_flags & RTF_NONEXTHOP ||
410 !(rt->rt6i_flags & RTF_GATEWAY)) 418 !(rt->rt6i_flags & RTF_GATEWAY))
411 m = 1; 419 m = 1;
@@ -422,6 +430,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
422 read_unlock_bh(&neigh->lock); 430 read_unlock_bh(&neigh->lock);
423 } else 431 } else
424 m = 0; 432 m = 0;
433 rcu_read_unlock();
425 return m; 434 return m;
426} 435}
427 436
@@ -745,8 +754,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_add
745 dst_free(&rt->dst); 754 dst_free(&rt->dst);
746 return NULL; 755 return NULL;
747 } 756 }
748 rt->rt6i_nexthop = neigh; 757 dst_set_neighbour(&rt->dst, neigh);
749
750 } 758 }
751 759
752 return rt; 760 return rt;
@@ -760,7 +768,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_a
760 rt->rt6i_dst.plen = 128; 768 rt->rt6i_dst.plen = 128;
761 rt->rt6i_flags |= RTF_CACHE; 769 rt->rt6i_flags |= RTF_CACHE;
762 rt->dst.flags |= DST_HOST; 770 rt->dst.flags |= DST_HOST;
763 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); 771 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
764 } 772 }
765 return rt; 773 return rt;
766} 774}
@@ -794,7 +802,7 @@ restart:
794 dst_hold(&rt->dst); 802 dst_hold(&rt->dst);
795 read_unlock_bh(&table->tb6_lock); 803 read_unlock_bh(&table->tb6_lock);
796 804
797 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 805 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
798 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 806 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
799 else if (!(rt->dst.flags & DST_HOST)) 807 else if (!(rt->dst.flags & DST_HOST))
800 nrt = rt6_alloc_clone(rt, &fl6->daddr); 808 nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -1058,7 +1066,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1058 } 1066 }
1059 1067
1060 rt->rt6i_idev = idev; 1068 rt->rt6i_idev = idev;
1061 rt->rt6i_nexthop = neigh; 1069 dst_set_neighbour(&rt->dst, neigh);
1062 atomic_set(&rt->dst.__refcnt, 1); 1070 atomic_set(&rt->dst.__refcnt, 1);
1063 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1071 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1064 rt->dst.output = ip6_output; 1072 rt->dst.output = ip6_output;
@@ -1338,12 +1346,12 @@ int ip6_route_add(struct fib6_config *cfg)
1338 rt->rt6i_prefsrc.plen = 0; 1346 rt->rt6i_prefsrc.plen = 0;
1339 1347
1340 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { 1348 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1341 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); 1349 struct neighbour *neigh = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1342 if (IS_ERR(rt->rt6i_nexthop)) { 1350 if (IS_ERR(neigh)) {
1343 err = PTR_ERR(rt->rt6i_nexthop); 1351 err = PTR_ERR(neigh);
1344 rt->rt6i_nexthop = NULL;
1345 goto out; 1352 goto out;
1346 } 1353 }
1354 dst_set_neighbour(&rt->dst, neigh);
1347 } 1355 }
1348 1356
1349 rt->rt6i_flags = cfg->fc_flags; 1357 rt->rt6i_flags = cfg->fc_flags;
@@ -1574,7 +1582,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1574 dst_confirm(&rt->dst); 1582 dst_confirm(&rt->dst);
1575 1583
1576 /* Duplicate redirect: silently ignore. */ 1584 /* Duplicate redirect: silently ignore. */
1577 if (neigh == rt->dst.neighbour) 1585 if (neigh == dst_get_neighbour_raw(&rt->dst))
1578 goto out; 1586 goto out;
1579 1587
1580 nrt = ip6_rt_copy(rt); 1588 nrt = ip6_rt_copy(rt);
@@ -1590,7 +1598,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1590 nrt->dst.flags |= DST_HOST; 1598 nrt->dst.flags |= DST_HOST;
1591 1599
1592 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1600 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1593 nrt->rt6i_nexthop = neigh_clone(neigh); 1601 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1594 1602
1595 if (ip6_ins_rt(nrt)) 1603 if (ip6_ins_rt(nrt))
1596 goto out; 1604 goto out;
@@ -1670,7 +1678,7 @@ again:
1670 1. It is connected route. Action: COW 1678 1. It is connected route. Action: COW
1671 2. It is gatewayed route or NONEXTHOP route. Action: clone it. 1679 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1672 */ 1680 */
1673 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 1681 if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1674 nrt = rt6_alloc_cow(rt, daddr, saddr); 1682 nrt = rt6_alloc_cow(rt, daddr, saddr);
1675 else 1683 else
1676 nrt = rt6_alloc_clone(rt, daddr); 1684 nrt = rt6_alloc_clone(rt, daddr);
@@ -2035,7 +2043,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2035 2043
2036 return ERR_CAST(neigh); 2044 return ERR_CAST(neigh);
2037 } 2045 }
2038 rt->rt6i_nexthop = neigh; 2046 dst_set_neighbour(&rt->dst, neigh);
2039 2047
2040 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 2048 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
2041 rt->rt6i_dst.plen = 128; 2049 rt->rt6i_dst.plen = 128;
@@ -2312,6 +2320,7 @@ static int rt6_fill_node(struct net *net,
2312 struct nlmsghdr *nlh; 2320 struct nlmsghdr *nlh;
2313 long expires; 2321 long expires;
2314 u32 table; 2322 u32 table;
2323 struct neighbour *n;
2315 2324
2316 if (prefix) { /* user wants prefix routes only */ 2325 if (prefix) { /* user wants prefix routes only */
2317 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { 2326 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2400,8 +2409,15 @@ static int rt6_fill_node(struct net *net,
2400 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2409 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2401 goto nla_put_failure; 2410 goto nla_put_failure;
2402 2411
2403 if (rt->dst.neighbour) 2412 rcu_read_lock();
2404 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key); 2413 n = dst_get_neighbour(&rt->dst);
2414 if (n) {
2415 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
2416 rcu_read_unlock();
2417 goto nla_put_failure;
2418 }
2419 }
2420 rcu_read_unlock();
2405 2421
2406 if (rt->dst.dev) 2422 if (rt->dst.dev)
2407 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); 2423 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
@@ -2585,6 +2601,7 @@ struct rt6_proc_arg
2585static int rt6_info_route(struct rt6_info *rt, void *p_arg) 2601static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2586{ 2602{
2587 struct seq_file *m = p_arg; 2603 struct seq_file *m = p_arg;
2604 struct neighbour *n;
2588 2605
2589 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); 2606 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2590 2607
@@ -2593,12 +2610,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2593#else 2610#else
2594 seq_puts(m, "00000000000000000000000000000000 00 "); 2611 seq_puts(m, "00000000000000000000000000000000 00 ");
2595#endif 2612#endif
2596 2613 rcu_read_lock();
2597 if (rt->rt6i_nexthop) { 2614 n = dst_get_neighbour(&rt->dst);
2598 seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key); 2615 if (n) {
2616 seq_printf(m, "%pi6", n->primary_key);
2599 } else { 2617 } else {
2600 seq_puts(m, "00000000000000000000000000000000"); 2618 seq_puts(m, "00000000000000000000000000000000");
2601 } 2619 }
2620 rcu_read_unlock();
2602 seq_printf(m, " %08x %08x %08x %08x %8s\n", 2621 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2603 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), 2622 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2604 rt->dst.__use, rt->rt6i_flags, 2623 rt->dst.__use, rt->rt6i_flags,
@@ -2827,10 +2846,6 @@ static int __net_init ip6_route_net_init(struct net *net)
2827 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; 2846 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2828 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; 2847 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2829 2848
2830#ifdef CONFIG_PROC_FS
2831 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2832 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2833#endif
2834 net->ipv6.ip6_rt_gc_expire = 30*HZ; 2849 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2835 2850
2836 ret = 0; 2851 ret = 0;
@@ -2851,10 +2866,6 @@ out_ip6_dst_ops:
2851 2866
2852static void __net_exit ip6_route_net_exit(struct net *net) 2867static void __net_exit ip6_route_net_exit(struct net *net)
2853{ 2868{
2854#ifdef CONFIG_PROC_FS
2855 proc_net_remove(net, "ipv6_route");
2856 proc_net_remove(net, "rt6_stats");
2857#endif
2858 kfree(net->ipv6.ip6_null_entry); 2869 kfree(net->ipv6.ip6_null_entry);
2859#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2870#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2860 kfree(net->ipv6.ip6_prohibit_entry); 2871 kfree(net->ipv6.ip6_prohibit_entry);
@@ -2863,11 +2874,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
2863 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 2874 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2864} 2875}
2865 2876
2877static int __net_init ip6_route_net_init_late(struct net *net)
2878{
2879#ifdef CONFIG_PROC_FS
2880 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2881 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2882#endif
2883 return 0;
2884}
2885
2886static void __net_exit ip6_route_net_exit_late(struct net *net)
2887{
2888#ifdef CONFIG_PROC_FS
2889 proc_net_remove(net, "ipv6_route");
2890 proc_net_remove(net, "rt6_stats");
2891#endif
2892}
2893
2866static struct pernet_operations ip6_route_net_ops = { 2894static struct pernet_operations ip6_route_net_ops = {
2867 .init = ip6_route_net_init, 2895 .init = ip6_route_net_init,
2868 .exit = ip6_route_net_exit, 2896 .exit = ip6_route_net_exit,
2869}; 2897};
2870 2898
2899static struct pernet_operations ip6_route_net_late_ops = {
2900 .init = ip6_route_net_init_late,
2901 .exit = ip6_route_net_exit_late,
2902};
2903
2871static struct notifier_block ip6_route_dev_notifier = { 2904static struct notifier_block ip6_route_dev_notifier = {
2872 .notifier_call = ip6_route_dev_notify, 2905 .notifier_call = ip6_route_dev_notify,
2873 .priority = 0, 2906 .priority = 0,
@@ -2917,19 +2950,25 @@ int __init ip6_route_init(void)
2917 if (ret) 2950 if (ret)
2918 goto xfrm6_init; 2951 goto xfrm6_init;
2919 2952
2953 ret = register_pernet_subsys(&ip6_route_net_late_ops);
2954 if (ret)
2955 goto fib6_rules_init;
2956
2920 ret = -ENOBUFS; 2957 ret = -ENOBUFS;
2921 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) || 2958 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2922 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) || 2959 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2923 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) 2960 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2924 goto fib6_rules_init; 2961 goto out_register_late_subsys;
2925 2962
2926 ret = register_netdevice_notifier(&ip6_route_dev_notifier); 2963 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2927 if (ret) 2964 if (ret)
2928 goto fib6_rules_init; 2965 goto out_register_late_subsys;
2929 2966
2930out: 2967out:
2931 return ret; 2968 return ret;
2932 2969
2970out_register_late_subsys:
2971 unregister_pernet_subsys(&ip6_route_net_late_ops);
2933fib6_rules_init: 2972fib6_rules_init:
2934 fib6_rules_cleanup(); 2973 fib6_rules_cleanup();
2935xfrm6_init: 2974xfrm6_init:
@@ -2948,6 +2987,7 @@ out_kmem_cache:
2948void ip6_route_cleanup(void) 2987void ip6_route_cleanup(void)
2949{ 2988{
2950 unregister_netdevice_notifier(&ip6_route_dev_notifier); 2989 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2990 unregister_pernet_subsys(&ip6_route_net_late_ops);
2951 fib6_rules_cleanup(); 2991 fib6_rules_cleanup();
2952 xfrm6_fini(); 2992 xfrm6_fini();
2953 fib6_gc_cleanup(); 2993 fib6_gc_cleanup();
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1cca5761aea..f56acd09659 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
263 if (register_netdevice(dev) < 0) 263 if (register_netdevice(dev) < 0)
264 goto failed_free; 264 goto failed_free;
265 265
266 strcpy(nt->parms.name, dev->name);
267
266 dev_hold(dev); 268 dev_hold(dev);
267 269
268 ipip6_tunnel_link(sitn, nt); 270 ipip6_tunnel_link(sitn, nt);
@@ -677,7 +679,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
677 struct neighbour *neigh = NULL; 679 struct neighbour *neigh = NULL;
678 680
679 if (skb_dst(skb)) 681 if (skb_dst(skb))
680 neigh = skb_dst(skb)->neighbour; 682 neigh = dst_get_neighbour(skb_dst(skb));
681 683
682 if (neigh == NULL) { 684 if (neigh == NULL) {
683 if (net_ratelimit()) 685 if (net_ratelimit())
@@ -702,7 +704,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
702 struct neighbour *neigh = NULL; 704 struct neighbour *neigh = NULL;
703 705
704 if (skb_dst(skb)) 706 if (skb_dst(skb))
705 neigh = skb_dst(skb)->neighbour; 707 neigh = dst_get_neighbour(skb_dst(skb));
706 708
707 if (neigh == NULL) { 709 if (neigh == NULL) {
708 if (net_ratelimit()) 710 if (net_ratelimit())
@@ -1141,7 +1143,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
1141 struct ip_tunnel *tunnel = netdev_priv(dev); 1143 struct ip_tunnel *tunnel = netdev_priv(dev);
1142 1144
1143 tunnel->dev = dev; 1145 tunnel->dev = dev;
1144 strcpy(tunnel->parms.name, dev->name);
1145 1146
1146 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 1147 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1147 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 1148 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1204,6 +1205,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
1204static int __net_init sit_init_net(struct net *net) 1205static int __net_init sit_init_net(struct net *net)
1205{ 1206{
1206 struct sit_net *sitn = net_generic(net, sit_net_id); 1207 struct sit_net *sitn = net_generic(net, sit_net_id);
1208 struct ip_tunnel *t;
1207 int err; 1209 int err;
1208 1210
1209 sitn->tunnels[0] = sitn->tunnels_wc; 1211 sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1228,6 +1230,9 @@ static int __net_init sit_init_net(struct net *net)
1228 if ((err = register_netdev(sitn->fb_tunnel_dev))) 1230 if ((err = register_netdev(sitn->fb_tunnel_dev)))
1229 goto err_reg_dev; 1231 goto err_reg_dev;
1230 1232
1233 t = netdev_priv(sitn->fb_tunnel_dev);
1234
1235 strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
1231 return 0; 1236 return 0;
1232 1237
1233err_reg_dev: 1238err_reg_dev:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8b9644a8b69..14b83395eed 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
165 int mss; 165 int mss;
166 struct dst_entry *dst; 166 struct dst_entry *dst;
167 __u8 rcv_wscale; 167 __u8 rcv_wscale;
168 bool ecn_ok; 168 bool ecn_ok = false;
169 169
170 if (!sysctl_tcp_syncookies || !th->ack || th->rst) 170 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
171 goto out; 171 goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 87551ca568c..848f9634bbd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -61,6 +61,7 @@
61#include <net/timewait_sock.h> 61#include <net/timewait_sock.h>
62#include <net/netdma.h> 62#include <net/netdma.h>
63#include <net/inet_common.h> 63#include <net/inet_common.h>
64#include <net/secure_seq.h>
64 65
65#include <asm/uaccess.h> 66#include <asm/uaccess.h>
66 67
@@ -604,7 +605,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
604 } 605 }
605 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 606 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
606 } 607 }
607 if (tcp_alloc_md5sig_pool(sk) == NULL) { 608 if (tp->md5sig_info->entries6 == 0 &&
609 tcp_alloc_md5sig_pool(sk) == NULL) {
608 kfree(newkey); 610 kfree(newkey);
609 return -ENOMEM; 611 return -ENOMEM;
610 } 612 }
@@ -613,8 +615,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
613 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 615 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
614 616
615 if (!keys) { 617 if (!keys) {
616 tcp_free_md5sig_pool();
617 kfree(newkey); 618 kfree(newkey);
619 if (tp->md5sig_info->entries6 == 0)
620 tcp_free_md5sig_pool();
618 return -ENOMEM; 621 return -ENOMEM;
619 } 622 }
620 623
@@ -660,6 +663,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
660 kfree(tp->md5sig_info->keys6); 663 kfree(tp->md5sig_info->keys6);
661 tp->md5sig_info->keys6 = NULL; 664 tp->md5sig_info->keys6 = NULL;
662 tp->md5sig_info->alloced6 = 0; 665 tp->md5sig_info->alloced6 = 0;
666 tcp_free_md5sig_pool();
663 } else { 667 } else {
664 /* shrink the database */ 668 /* shrink the database */
665 if (tp->md5sig_info->entries6 != i) 669 if (tp->md5sig_info->entries6 != i)
@@ -668,7 +672,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
668 (tp->md5sig_info->entries6 - i) 672 (tp->md5sig_info->entries6 - i)
669 * sizeof (tp->md5sig_info->keys6[0])); 673 * sizeof (tp->md5sig_info->keys6[0]));
670 } 674 }
671 tcp_free_md5sig_pool();
672 return 0; 675 return 0;
673 } 676 }
674 } 677 }
@@ -1093,7 +1096,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1093 1096
1094#ifdef CONFIG_TCP_MD5SIG 1097#ifdef CONFIG_TCP_MD5SIG
1095 if (sk) 1098 if (sk)
1096 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); 1099 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
1097#endif 1100#endif
1098 1101
1099 if (th->ack) 1102 if (th->ack)
@@ -1406,6 +1409,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1406 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1409 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1407#endif 1410#endif
1408 1411
1412 newnp->ipv6_ac_list = NULL;
1413 newnp->ipv6_fl_list = NULL;
1409 newnp->pktoptions = NULL; 1414 newnp->pktoptions = NULL;
1410 newnp->opt = NULL; 1415 newnp->opt = NULL;
1411 newnp->mcast_oif = inet6_iif(skb); 1416 newnp->mcast_oif = inet6_iif(skb);
@@ -1470,6 +1475,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1470 First: no IPv4 options. 1475 First: no IPv4 options.
1471 */ 1476 */
1472 newinet->inet_opt = NULL; 1477 newinet->inet_opt = NULL;
1478 newnp->ipv6_ac_list = NULL;
1473 newnp->ipv6_fl_list = NULL; 1479 newnp->ipv6_fl_list = NULL;
1474 1480
1475 /* Clone RX bits */ 1481 /* Clone RX bits */
@@ -1508,6 +1514,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1508 tcp_mtup_init(newsk); 1514 tcp_mtup_init(newsk);
1509 tcp_sync_mss(newsk, dst_mtu(dst)); 1515 tcp_sync_mss(newsk, dst_mtu(dst));
1510 newtp->advmss = dst_metric_advmss(dst); 1516 newtp->advmss = dst_metric_advmss(dst);
1517 if (tcp_sk(sk)->rx_opt.user_mss &&
1518 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1519 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1520
1511 tcp_initialize_rcv_mss(newsk); 1521 tcp_initialize_rcv_mss(newsk);
1512 1522
1513 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1523 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 328985c4088..0d920c58de6 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1309,6 +1309,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
1309 u8 frag_hdr_sz = sizeof(struct frag_hdr); 1309 u8 frag_hdr_sz = sizeof(struct frag_hdr);
1310 int offset; 1310 int offset;
1311 __wsum csum; 1311 __wsum csum;
1312 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1312 1313
1313 mss = skb_shinfo(skb)->gso_size; 1314 mss = skb_shinfo(skb)->gso_size;
1314 if (unlikely(skb->len <= mss)) 1315 if (unlikely(skb->len <= mss))
@@ -1359,7 +1360,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
1359 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 1360 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
1360 fptr->nexthdr = nexthdr; 1361 fptr->nexthdr = nexthdr;
1361 fptr->reserved = 0; 1362 fptr->reserved = 0;
1362 ipv6_select_ident(fptr); 1363 ipv6_select_ident(fptr,
1364 rt ? &rt->rt6i_dst.addr : &ipv6_hdr(skb)->daddr);
1363 1365
1364 /* Fragment the skb. ipv6 header and the remaining fields of the 1366 /* Fragment the skb. ipv6 header and the remaining fields of the
1365 * fragment header are updated in ipv6_gso_segment() 1367 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 3437d7d4eed..f37cba9e689 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -80,7 +80,6 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) 80static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
81{ 81{
82 struct ipv6hdr *ip6h; 82 struct ipv6hdr *ip6h;
83 const unsigned char *old_mac;
84 int size = sizeof(struct ipv6hdr); 83 int size = sizeof(struct ipv6hdr);
85 int err; 84 int err;
86 85
@@ -90,10 +89,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
90 89
91 __skb_push(skb, size); 90 __skb_push(skb, size);
92 skb_reset_network_header(skb); 91 skb_reset_network_header(skb);
93 92 skb_mac_header_rebuild(skb);
94 old_mac = skb_mac_header(skb);
95 skb_set_mac_header(skb, -skb->mac_len);
96 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
97 93
98 xfrm6_beet_make_header(skb); 94 xfrm6_beet_make_header(skb);
99 95
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 4d6edff0498..23ecd68a5e6 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -63,7 +63,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 63static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
64{ 64{
65 int err = -EINVAL; 65 int err = -EINVAL;
66 const unsigned char *old_mac;
67 66
68 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) 67 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
69 goto out; 68 goto out;
@@ -80,10 +79,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
80 if (!(x->props.flags & XFRM_STATE_NOECN)) 79 if (!(x->props.flags & XFRM_STATE_NOECN))
81 ipip6_ecn_decapsulate(skb); 80 ipip6_ecn_decapsulate(skb);
82 81
83 old_mac = skb_mac_header(skb);
84 skb_set_mac_header(skb, -skb->mac_len);
85 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
86 skb_reset_network_header(skb); 82 skb_reset_network_header(skb);
83 skb_mac_header_rebuild(skb);
84
87 err = 0; 85 err = 0;
88 86
89out: 87out:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ed8a2335442..71c292e3e03 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1046 uhlen + hdr_len; 1046 uhlen + hdr_len;
1047 old_headroom = skb_headroom(skb); 1047 old_headroom = skb_headroom(skb);
1048 if (skb_cow_head(skb, headroom)) 1048 if (skb_cow_head(skb, headroom)) {
1049 dev_kfree_skb(skb);
1049 goto abort; 1050 goto abort;
1051 }
1050 1052
1051 new_headroom = skb_headroom(skb); 1053 new_headroom = skb_headroom(skb);
1052 skb_orphan(skb); 1054 skb_orphan(skb);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index a8193f52c13..3c55f633928 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = {
103static void l2tp_eth_dev_setup(struct net_device *dev) 103static void l2tp_eth_dev_setup(struct net_device *dev)
104{ 104{
105 ether_setup(dev); 105 ether_setup(dev);
106 106 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
107 dev->netdev_ops = &l2tp_eth_netdev_ops; 107 dev->netdev_ops = &l2tp_eth_netdev_ops;
108 dev->destructor = free_netdev; 108 dev->destructor = free_netdev;
109} 109}
@@ -167,6 +167,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
167 if (dev) { 167 if (dev) {
168 unregister_netdev(dev); 168 unregister_netdev(dev);
169 spriv->dev = NULL; 169 spriv->dev = NULL;
170 module_put(THIS_MODULE);
170 } 171 }
171 } 172 }
172} 173}
@@ -254,6 +255,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
254 if (rc < 0) 255 if (rc < 0)
255 goto out_del_dev; 256 goto out_del_dev;
256 257
258 __module_get(THIS_MODULE);
257 /* Must be done after register_netdev() */ 259 /* Must be done after register_netdev() */
258 strlcpy(session->ifname, dev->name, IFNAMSIZ); 260 strlcpy(session->ifname, dev->name, IFNAMSIZ);
259 261
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index b6466e71f5e..78bc442b2b6 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -251,9 +251,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251{ 251{
252 struct inet_sock *inet = inet_sk(sk); 252 struct inet_sock *inet = inet_sk(sk);
253 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; 253 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
254 int ret = -EINVAL; 254 int ret;
255 int chk_addr_ret; 255 int chk_addr_ret;
256 256
257 if (!sock_flag(sk, SOCK_ZAPPED))
258 return -EINVAL;
259 if (addr_len < sizeof(struct sockaddr_l2tpip))
260 return -EINVAL;
261 if (addr->l2tp_family != AF_INET)
262 return -EINVAL;
263
257 ret = -EADDRINUSE; 264 ret = -EADDRINUSE;
258 read_lock_bh(&l2tp_ip_lock); 265 read_lock_bh(&l2tp_ip_lock);
259 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) 266 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -283,6 +290,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
283 sk_del_node_init(sk); 290 sk_del_node_init(sk);
284 write_unlock_bh(&l2tp_ip_lock); 291 write_unlock_bh(&l2tp_ip_lock);
285 ret = 0; 292 ret = 0;
293 sock_reset_flag(sk, SOCK_ZAPPED);
294
286out: 295out:
287 release_sock(sk); 296 release_sock(sk);
288 297
@@ -303,13 +312,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
303 __be32 saddr; 312 __be32 saddr;
304 int oif, rc; 313 int oif, rc;
305 314
306 rc = -EINVAL; 315 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
316 return -EINVAL;
317
307 if (addr_len < sizeof(*lsa)) 318 if (addr_len < sizeof(*lsa))
308 goto out; 319 return -EINVAL;
309 320
310 rc = -EAFNOSUPPORT;
311 if (lsa->l2tp_family != AF_INET) 321 if (lsa->l2tp_family != AF_INET)
312 goto out; 322 return -EAFNOSUPPORT;
313 323
314 lock_sock(sk); 324 lock_sock(sk);
315 325
@@ -363,6 +373,14 @@ out:
363 return rc; 373 return rc;
364} 374}
365 375
376static int l2tp_ip_disconnect(struct sock *sk, int flags)
377{
378 if (sock_flag(sk, SOCK_ZAPPED))
379 return 0;
380
381 return udp_disconnect(sk, flags);
382}
383
366static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 384static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
367 int *uaddr_len, int peer) 385 int *uaddr_len, int peer)
368{ 386{
@@ -393,11 +411,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
393{ 411{
394 int rc; 412 int rc;
395 413
396 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
397 goto drop;
398
399 nf_reset(skb);
400
401 /* Charge it to the socket, dropping if the queue is full. */ 414 /* Charge it to the socket, dropping if the queue is full. */
402 rc = sock_queue_rcv_skb(sk, skb); 415 rc = sock_queue_rcv_skb(sk, skb);
403 if (rc < 0) 416 if (rc < 0)
@@ -446,8 +459,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
446 459
447 daddr = lip->l2tp_addr.s_addr; 460 daddr = lip->l2tp_addr.s_addr;
448 } else { 461 } else {
462 rc = -EDESTADDRREQ;
449 if (sk->sk_state != TCP_ESTABLISHED) 463 if (sk->sk_state != TCP_ESTABLISHED)
450 return -EDESTADDRREQ; 464 goto out;
451 465
452 daddr = inet->inet_daddr; 466 daddr = inet->inet_daddr;
453 connected = 1; 467 connected = 1;
@@ -595,7 +609,7 @@ static struct proto l2tp_ip_prot = {
595 .close = l2tp_ip_close, 609 .close = l2tp_ip_close,
596 .bind = l2tp_ip_bind, 610 .bind = l2tp_ip_bind,
597 .connect = l2tp_ip_connect, 611 .connect = l2tp_ip_connect,
598 .disconnect = udp_disconnect, 612 .disconnect = l2tp_ip_disconnect,
599 .ioctl = udp_ioctl, 613 .ioctl = udp_ioctl,
600 .destroy = l2tp_ip_destroy_sock, 614 .destroy = l2tp_ip_destroy_sock,
601 .setsockopt = ip_setsockopt, 615 .setsockopt = ip_setsockopt,
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 39a21d0c61c..13f9868e694 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -908,7 +908,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
908 goto end_put_sess; 908 goto end_put_sess;
909 } 909 }
910 910
911 inet = inet_sk(sk); 911 inet = inet_sk(tunnel->sock);
912 if (tunnel->version == 2) { 912 if (tunnel->version == 2) {
913 struct sockaddr_pppol2tp sp; 913 struct sockaddr_pppol2tp sp;
914 len = sizeof(sp); 914 len = sizeof(sp);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index dfd3a648a55..a18e6c3d36e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
833 copied += used; 833 copied += used;
834 len -= used; 834 len -= used;
835 835
836 /* For non stream protcols we get one packet per recvmsg call */
837 if (sk->sk_type != SOCK_STREAM)
838 goto copy_uaddr;
839
836 if (!(flags & MSG_PEEK)) { 840 if (!(flags & MSG_PEEK)) {
837 sk_eat_skb(sk, skb, 0); 841 sk_eat_skb(sk, skb, 0);
838 *seq = 0; 842 *seq = 0;
839 } 843 }
840 844
841 /* For non stream protcols we get one packet per recvmsg call */
842 if (sk->sk_type != SOCK_STREAM)
843 goto copy_uaddr;
844
845 /* Partial read */ 845 /* Partial read */
846 if (used + offset < skb->len) 846 if (used + offset < skb->len)
847 continue; 847 continue;
@@ -857,6 +857,12 @@ copy_uaddr:
857 } 857 }
858 if (llc_sk(sk)->cmsg_flags) 858 if (llc_sk(sk)->cmsg_flags)
859 llc_cmsg_rcv(msg, skb); 859 llc_cmsg_rcv(msg, skb);
860
861 if (!(flags & MSG_PEEK)) {
862 sk_eat_skb(sk, skb, 0);
863 *seq = 0;
864 }
865
860 goto out; 866 goto out;
861} 867}
862 868
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 9c0d76cdca9..1a41b1423d2 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -48,6 +48,8 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
48 container_of(h, struct tid_ampdu_rx, rcu_head); 48 container_of(h, struct tid_ampdu_rx, rcu_head);
49 int i; 49 int i;
50 50
51 del_timer_sync(&tid_rx->reorder_timer);
52
51 for (i = 0; i < tid_rx->buf_size; i++) 53 for (i = 0; i < tid_rx->buf_size; i++)
52 dev_kfree_skb(tid_rx->reorder_buf[i]); 54 dev_kfree_skb(tid_rx->reorder_buf[i]);
53 kfree(tid_rx->reorder_buf); 55 kfree(tid_rx->reorder_buf);
@@ -87,7 +89,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
87 tid, 0, reason); 89 tid, 0, reason);
88 90
89 del_timer_sync(&tid_rx->session_timer); 91 del_timer_sync(&tid_rx->session_timer);
90 del_timer_sync(&tid_rx->reorder_timer);
91 92
92 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 93 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
93} 94}
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8be8eff70d..b7f4f5c1f69 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
162 return -ENOENT; 162 return -ENOENT;
163 } 163 }
164 164
165 /* if we're already stopping ignore any new requests to stop */
166 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
167 spin_unlock_bh(&sta->lock);
168 return -EALREADY;
169 }
170
165 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 171 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
166 /* not even started yet! */ 172 /* not even started yet! */
167 ieee80211_assign_tid_tx(sta, tid, NULL); 173 ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
170 return 0; 176 return 0;
171 } 177 }
172 178
179 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
180
173 spin_unlock_bh(&sta->lock); 181 spin_unlock_bh(&sta->lock);
174 182
175#ifdef CONFIG_MAC80211_HT_DEBUG 183#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
177 sta->sta.addr, tid); 185 sta->sta.addr, tid);
178#endif /* CONFIG_MAC80211_HT_DEBUG */ 186#endif /* CONFIG_MAC80211_HT_DEBUG */
179 187
180 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
181
182 del_timer_sync(&tid_tx->addba_resp_timer); 188 del_timer_sync(&tid_tx->addba_resp_timer);
183 189
184 /* 190 /*
@@ -188,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
188 */ 194 */
189 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 195 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
190 196
197 /*
198 * There might be a few packets being processed right now (on
199 * another CPU) that have already gotten past the aggregation
200 * check when it was still OPERATIONAL and consequently have
201 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
202 * call into the driver at the same time or even before the
203 * TX paths calls into it, which could confuse the driver.
204 *
205 * Wait for all currently running TX paths to finish before
206 * telling the driver. New packets will not go through since
207 * the aggregation session is no longer OPERATIONAL.
208 */
209 synchronize_net();
210
191 tid_tx->stop_initiator = initiator; 211 tid_tx->stop_initiator = initiator;
192 tid_tx->tx_stop = tx; 212 tid_tx->tx_stop = tx;
193 213
@@ -284,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
284 __release(agg_queue); 304 __release(agg_queue);
285} 305}
286 306
307/*
308 * splice packets from the STA's pending to the local pending,
309 * requires a call to ieee80211_agg_splice_finish later
310 */
311static void __acquires(agg_queue)
312ieee80211_agg_splice_packets(struct ieee80211_local *local,
313 struct tid_ampdu_tx *tid_tx, u16 tid)
314{
315 int queue = ieee80211_ac_from_tid(tid);
316 unsigned long flags;
317
318 ieee80211_stop_queue_agg(local, tid);
319
320 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
321 " from the pending queue\n", tid))
322 return;
323
324 if (!skb_queue_empty(&tid_tx->pending)) {
325 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
326 /* copy over remaining packets */
327 skb_queue_splice_tail_init(&tid_tx->pending,
328 &local->pending[queue]);
329 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
330 }
331}
332
333static void __releases(agg_queue)
334ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
335{
336 ieee80211_wake_queue_agg(local, tid);
337}
338
287void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 339void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
288{ 340{
289 struct tid_ampdu_tx *tid_tx; 341 struct tid_ampdu_tx *tid_tx;
@@ -295,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
295 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 347 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
296 348
297 /* 349 /*
298 * While we're asking the driver about the aggregation, 350 * Start queuing up packets for this aggregation session.
299 * stop the AC queue so that we don't have to worry 351 * We're going to release them once the driver is OK with
300 * about frames that came in while we were doing that, 352 * that.
301 * which would require us to put them to the AC pending
302 * afterwards which just makes the code more complex.
303 */ 353 */
304 ieee80211_stop_queue_agg(local, tid);
305
306 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 354 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
307 355
308 /* 356 /*
309 * make sure no packets are being processed to get 357 * Make sure no packets are being processed. This ensures that
310 * valid starting sequence number 358 * we have a valid starting sequence number and that in-flight
359 * packets have been flushed out and no packets for this TID
360 * will go into the driver during the ampdu_action call.
311 */ 361 */
312 synchronize_net(); 362 synchronize_net();
313 363
@@ -321,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
321 " tid %d\n", tid); 371 " tid %d\n", tid);
322#endif 372#endif
323 spin_lock_bh(&sta->lock); 373 spin_lock_bh(&sta->lock);
374 ieee80211_agg_splice_packets(local, tid_tx, tid);
324 ieee80211_assign_tid_tx(sta, tid, NULL); 375 ieee80211_assign_tid_tx(sta, tid, NULL);
376 ieee80211_agg_splice_finish(local, tid);
325 spin_unlock_bh(&sta->lock); 377 spin_unlock_bh(&sta->lock);
326 378
327 ieee80211_wake_queue_agg(local, tid);
328 kfree_rcu(tid_tx, rcu_head); 379 kfree_rcu(tid_tx, rcu_head);
329 return; 380 return;
330 } 381 }
331 382
332 /* we can take packets again now */
333 ieee80211_wake_queue_agg(local, tid);
334
335 /* activate the timer for the recipient's addBA response */ 383 /* activate the timer for the recipient's addBA response */
336 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 384 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
337#ifdef CONFIG_MAC80211_HT_DEBUG 385#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -451,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
451} 499}
452EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 500EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
453 501
454/*
455 * splice packets from the STA's pending to the local pending,
456 * requires a call to ieee80211_agg_splice_finish later
457 */
458static void __acquires(agg_queue)
459ieee80211_agg_splice_packets(struct ieee80211_local *local,
460 struct tid_ampdu_tx *tid_tx, u16 tid)
461{
462 int queue = ieee80211_ac_from_tid(tid);
463 unsigned long flags;
464
465 ieee80211_stop_queue_agg(local, tid);
466
467 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
468 " from the pending queue\n", tid))
469 return;
470
471 if (!skb_queue_empty(&tid_tx->pending)) {
472 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
473 /* copy over remaining packets */
474 skb_queue_splice_tail_init(&tid_tx->pending,
475 &local->pending[queue]);
476 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
477 }
478}
479
480static void __releases(agg_queue)
481ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
482{
483 ieee80211_wake_queue_agg(local, tid);
484}
485
486static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 502static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
487 struct sta_info *sta, u16 tid) 503 struct sta_info *sta, u16 tid)
488{ 504{
@@ -772,12 +788,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
772 goto out; 788 goto out;
773 } 789 }
774 790
775 del_timer(&tid_tx->addba_resp_timer); 791 del_timer_sync(&tid_tx->addba_resp_timer);
776 792
777#ifdef CONFIG_MAC80211_HT_DEBUG 793#ifdef CONFIG_MAC80211_HT_DEBUG
778 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); 794 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
779#endif 795#endif
780 796
797 /*
798 * addba_resp_timer may have fired before we got here, and
799 * caused WANT_STOP to be set. If the stop then was already
800 * processed further, STOPPING might be set.
801 */
802 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
803 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
804#ifdef CONFIG_MAC80211_HT_DEBUG
805 printk(KERN_DEBUG
806 "got addBA resp for tid %d but we already gave up\n",
807 tid);
808#endif
809 goto out;
810 }
811
781 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 812 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
782 == WLAN_STATUS_SUCCESS) { 813 == WLAN_STATUS_SUCCESS) {
783 /* 814 /*
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index be70c70d3f5..143a0064348 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1798,7 +1798,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
1798 * so in that case userspace will have to deal with it. 1798 * so in that case userspace will have to deal with it.
1799 */ 1799 */
1800 1800
1801 if (wk->offchan_tx.wait && wk->offchan_tx.frame) 1801 if (wk->offchan_tx.wait && !wk->offchan_tx.status)
1802 cfg80211_mgmt_tx_status(wk->sdata->dev, 1802 cfg80211_mgmt_tx_status(wk->sdata->dev,
1803 (unsigned long) wk->offchan_tx.frame, 1803 (unsigned long) wk->offchan_tx.frame,
1804 wk->ie, wk->ie_len, false, GFP_KERNEL); 1804 wk->ie, wk->ie_len, false, GFP_KERNEL);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 090b0ec1e05..3fdac77b9cc 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -328,6 +328,7 @@ struct ieee80211_work {
328 struct { 328 struct {
329 struct sk_buff *frame; 329 struct sk_buff *frame;
330 u32 wait; 330 u32 wait;
331 bool status;
331 } offchan_tx; 332 } offchan_tx;
332 }; 333 };
333 334
@@ -372,6 +373,7 @@ struct ieee80211_if_managed {
372 373
373 unsigned long timers_running; /* used for quiesce/restart */ 374 unsigned long timers_running; /* used for quiesce/restart */
374 bool powersave; /* powersave requested for this iface */ 375 bool powersave; /* powersave requested for this iface */
376 bool broken_ap; /* AP is broken -- turn off powersave */
375 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 377 enum ieee80211_smps_mode req_smps, /* requested smps mode */
376 ap_smps, /* smps mode AP thinks we're in */ 378 ap_smps, /* smps mode AP thinks we're in */
377 driver_smps_mode; /* smps mode request */ 379 driver_smps_mode; /* smps mode request */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index dee30aea9ab..65f3764c5aa 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -498,6 +498,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
498 ieee80211_configure_filter(local); 498 ieee80211_configure_filter(local);
499 break; 499 break;
500 default: 500 default:
501 mutex_lock(&local->mtx);
502 if (local->hw_roc_dev == sdata->dev &&
503 local->hw_roc_channel) {
504 /* ignore return value since this is racy */
505 drv_cancel_remain_on_channel(local);
506 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
507 }
508 mutex_unlock(&local->mtx);
509
510 flush_work(&local->hw_roc_start);
511 flush_work(&local->hw_roc_done);
512
501 flush_work(&sdata->work); 513 flush_work(&sdata->work);
502 /* 514 /*
503 * When we get here, the interface is marked down. 515 * When we get here, the interface is marked down.
@@ -699,6 +711,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
699static void ieee80211_if_setup(struct net_device *dev) 711static void ieee80211_if_setup(struct net_device *dev)
700{ 712{
701 ether_setup(dev); 713 ether_setup(dev);
714 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
702 dev->netdev_ops = &ieee80211_dataif_ops; 715 dev->netdev_ops = &ieee80211_dataif_ops;
703 dev->destructor = free_netdev; 716 dev->destructor = free_netdev;
704} 717}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 866f269183c..1e36fb3318c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
911 result); 911 result);
912 912
913 ieee80211_led_init(local);
914
913 rtnl_lock(); 915 rtnl_lock();
914 916
915 result = ieee80211_init_rate_ctrl_alg(local, 917 result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
931 933
932 rtnl_unlock(); 934 rtnl_unlock();
933 935
934 ieee80211_led_init(local);
935
936 local->network_latency_notifier.notifier_call = 936 local->network_latency_notifier.notifier_call =
937 ieee80211_max_network_latency; 937 ieee80211_max_network_latency;
938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29e9980c8e6..370aa94ead1 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -490,6 +490,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
490 490
491 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 491 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
492 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 492 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
493 del_timer_sync(&sdata->u.mesh.mesh_path_timer);
493 /* 494 /*
494 * If the timer fired while we waited for it, it will have 495 * If the timer fired while we waited for it, it will have
495 * requeued the work. Now the work will be running again 496 * requeued the work. Now the work will be running again
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d595265d6c2..1563250a557 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -613,6 +613,9 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
613 if (!mgd->powersave) 613 if (!mgd->powersave)
614 return false; 614 return false;
615 615
616 if (mgd->broken_ap)
617 return false;
618
616 if (!mgd->associated) 619 if (!mgd->associated)
617 return false; 620 return false;
618 621
@@ -1450,10 +1453,21 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1450 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1453 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1451 1454
1452 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1455 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1453 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1456 printk(KERN_DEBUG
1454 "set\n", sdata->name, aid); 1457 "%s: invalid AID value 0x%x; bits 15:14 not set\n",
1458 sdata->name, aid);
1455 aid &= ~(BIT(15) | BIT(14)); 1459 aid &= ~(BIT(15) | BIT(14));
1456 1460
1461 ifmgd->broken_ap = false;
1462
1463 if (aid == 0 || aid > IEEE80211_MAX_AID) {
1464 printk(KERN_DEBUG
1465 "%s: invalid AID value %d (out of range), turn off PS\n",
1466 sdata->name, aid);
1467 aid = 0;
1468 ifmgd->broken_ap = true;
1469 }
1470
1457 pos = mgmt->u.assoc_resp.variable; 1471 pos = mgmt->u.assoc_resp.variable;
1458 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1472 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1459 1473
@@ -2200,6 +2214,9 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2200{ 2214{
2201 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2215 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2202 2216
2217 if (!ifmgd->associated)
2218 return;
2219
2203 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) 2220 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
2204 add_timer(&ifmgd->timer); 2221 add_timer(&ifmgd->timer);
2205 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) 2222 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 13427b194ce..c55eb9d8ea5 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -251,6 +251,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
251 return; 251 return;
252 } 252 }
253 253
254 /* was never transmitted */
255 if (local->hw_roc_skb) {
256 u64 cookie;
257
258 cookie = local->hw_roc_cookie ^ 2;
259
260 cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
261 local->hw_roc_skb->data,
262 local->hw_roc_skb->len, false,
263 GFP_KERNEL);
264
265 kfree_skb(local->hw_roc_skb);
266 local->hw_roc_skb = NULL;
267 local->hw_roc_skb_for_status = NULL;
268 }
269
254 if (!local->hw_roc_for_tx) 270 if (!local->hw_roc_for_tx)
255 cfg80211_remain_on_channel_expired(local->hw_roc_dev, 271 cfg80211_remain_on_channel_expired(local->hw_roc_dev,
256 local->hw_roc_cookie, 272 local->hw_roc_cookie,
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3d5a2cb835c..816590b0d7f 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -314,7 +314,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
314 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 314 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
315 info->control.rates[i].idx = -1; 315 info->control.rates[i].idx = -1;
316 info->control.rates[i].flags = 0; 316 info->control.rates[i].flags = 0;
317 info->control.rates[i].count = 1; 317 info->control.rates[i].count = 0;
318 } 318 }
319 319
320 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 320 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7fa8c6be7bf..667f5590576 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -140,8 +140,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
140 pos++; 140 pos++;
141 141
142 /* IEEE80211_RADIOTAP_RATE */ 142 /* IEEE80211_RADIOTAP_RATE */
143 if (status->flag & RX_FLAG_HT) { 143 if (!rate || status->flag & RX_FLAG_HT) {
144 /* 144 /*
145 * Without rate information don't add it. If we have,
145 * MCS information is a separate field in radiotap, 146 * MCS information is a separate field in radiotap,
146 * added below. The byte here is needed as padding 147 * added below. The byte here is needed as padding
147 * for the channel though, so initialise it to 0. 148 * for the channel though, so initialise it to 0.
@@ -162,12 +163,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
162 else if (status->flag & RX_FLAG_HT) 163 else if (status->flag & RX_FLAG_HT)
163 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 164 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
164 pos); 165 pos);
165 else if (rate->flags & IEEE80211_RATE_ERP_G) 166 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
166 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 167 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
167 pos); 168 pos);
168 else 169 else if (rate)
169 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 170 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
170 pos); 171 pos);
172 else
173 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
171 pos += 2; 174 pos += 2;
172 175
173 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 176 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -607,7 +610,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
607 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 610 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
608 tid_agg_rx->buf_size; 611 tid_agg_rx->buf_size;
609 if (!tid_agg_rx->reorder_buf[index] && 612 if (!tid_agg_rx->reorder_buf[index] &&
610 tid_agg_rx->stored_mpdu_num > 1) { 613 tid_agg_rx->stored_mpdu_num) {
611 /* 614 /*
612 * No buffers ready to be released, but check whether any 615 * No buffers ready to be released, but check whether any
613 * frames in the reorder buffer have timed out. 616 * frames in the reorder buffer have timed out.
@@ -2288,7 +2291,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2288 * frames that we didn't handle, including returning unknown 2291 * frames that we didn't handle, including returning unknown
2289 * ones. For all other modes we will return them to the sender, 2292 * ones. For all other modes we will return them to the sender,
2290 * setting the 0x80 bit in the action category, as required by 2293 * setting the 0x80 bit in the action category, as required by
2291 * 802.11-2007 7.3.1.11. 2294 * 802.11-2012 9.24.4.
2292 * Newer versions of hostapd shall also use the management frame 2295 * Newer versions of hostapd shall also use the management frame
2293 * registration mechanisms, but older ones still use cooked 2296 * registration mechanisms, but older ones still use cooked
2294 * monitor interfaces so push all frames there. 2297 * monitor interfaces so push all frames there.
@@ -2298,6 +2301,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2298 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2301 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2299 return RX_DROP_MONITOR; 2302 return RX_DROP_MONITOR;
2300 2303
2304 if (is_multicast_ether_addr(mgmt->da))
2305 return RX_DROP_MONITOR;
2306
2301 /* do not return rejected action frames */ 2307 /* do not return rejected action frames */
2302 if (mgmt->u.action.category & 0x80) 2308 if (mgmt->u.action.category & 0x80)
2303 return RX_DROP_UNUSABLE; 2309 return RX_DROP_UNUSABLE;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b83870bf60f..3ff633e81b6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -334,6 +334,7 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
334 ieee80211_sta_debugfs_add(sta); 334 ieee80211_sta_debugfs_add(sta);
335 rate_control_add_sta_debugfs(sta); 335 rate_control_add_sta_debugfs(sta);
336 336
337 memset(&sinfo, 0, sizeof(sinfo));
337 sinfo.filled = 0; 338 sinfo.filled = 0;
338 sinfo.generation = local->sta_generation; 339 sinfo.generation = local->sta_generation;
339 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); 340 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
@@ -669,7 +670,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
669 BUG_ON(!sdata->bss); 670 BUG_ON(!sdata->bss);
670 671
671 atomic_dec(&sdata->bss->num_sta_ps); 672 atomic_dec(&sdata->bss->num_sta_ps);
672 __sta_info_clear_tim_bit(sdata->bss, sta); 673 sta_info_clear_tim_bit(sta);
673 } 674 }
674 675
675 local->num_sta--; 676 local->num_sta--;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1658efaa2e8..04cdbaf160b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -336,7 +336,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
336 continue; 336 continue;
337 if (wk->offchan_tx.frame != skb) 337 if (wk->offchan_tx.frame != skb)
338 continue; 338 continue;
339 wk->offchan_tx.frame = NULL; 339 wk->offchan_tx.status = true;
340 break; 340 break;
341 } 341 }
342 rcu_read_unlock(); 342 rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3104c844b54..da878c14182 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1222,7 +1222,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1222 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1222 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1223 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1223 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1224 return TX_DROP; 1224 return TX_DROP;
1225 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1225 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1226 tx->sdata->control_port_protocol == tx->skb->protocol) {
1226 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1227 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1227 } 1228 }
1228 if (!tx->sta) 1229 if (!tx->sta)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d3fe2d23748..11d9d49f22d 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1047,6 +1047,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1047 skb = ieee80211_probereq_get(&local->hw, &sdata->vif, 1047 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
1048 ssid, ssid_len, 1048 ssid, ssid_len,
1049 buf, buf_len); 1049 buf, buf_len);
1050 if (!skb)
1051 goto out;
1050 1052
1051 if (dst) { 1053 if (dst) {
1052 mgmt = (struct ieee80211_mgmt *) skb->data; 1054 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1055,6 +1057,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1055 } 1057 }
1056 1058
1057 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1059 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1060
1061 out:
1058 kfree(buf); 1062 kfree(buf);
1059 1063
1060 return skb; 1064 return skb;
@@ -1250,6 +1254,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1250 } 1254 }
1251 } 1255 }
1252 1256
1257 /* add back keys */
1258 list_for_each_entry(sdata, &local->interfaces, list)
1259 if (ieee80211_sdata_running(sdata))
1260 ieee80211_enable_keys(sdata);
1261
1262 wake_up:
1253 /* 1263 /*
1254 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation 1264 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
1255 * sessions can be established after a resume. 1265 * sessions can be established after a resume.
@@ -1271,12 +1281,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1271 mutex_unlock(&local->sta_mtx); 1281 mutex_unlock(&local->sta_mtx);
1272 } 1282 }
1273 1283
1274 /* add back keys */
1275 list_for_each_entry(sdata, &local->interfaces, list)
1276 if (ieee80211_sdata_running(sdata))
1277 ieee80211_enable_keys(sdata);
1278
1279 wake_up:
1280 ieee80211_wake_queues_by_reason(hw, 1284 ieee80211_wake_queues_by_reason(hw,
1281 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1285 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1282 1286
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index d2e7f0e8667..52b758dbff5 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -553,7 +553,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
553 /* 553 /*
554 * After this, offchan_tx.frame remains but now is no 554 * After this, offchan_tx.frame remains but now is no
555 * longer a valid pointer -- we still need it as the 555 * longer a valid pointer -- we still need it as the
556 * cookie for canceling this work. 556 * cookie for canceling this work/status matching.
557 */ 557 */
558 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame); 558 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
559 559
@@ -1060,14 +1060,13 @@ static void ieee80211_work_work(struct work_struct *work)
1060 continue; 1060 continue;
1061 if (wk->chan != local->tmp_channel) 1061 if (wk->chan != local->tmp_channel)
1062 continue; 1062 continue;
1063 if (ieee80211_work_ct_coexists(wk->chan_type, 1063 if (!ieee80211_work_ct_coexists(wk->chan_type,
1064 local->tmp_channel_type)) 1064 local->tmp_channel_type))
1065 continue; 1065 continue;
1066 remain_off_channel = true; 1066 remain_off_channel = true;
1067 } 1067 }
1068 1068
1069 if (!remain_off_channel && local->tmp_channel) { 1069 if (!remain_off_channel && local->tmp_channel) {
1070 bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
1071 local->tmp_channel = NULL; 1070 local->tmp_channel = NULL;
1072 /* If tmp_channel wasn't operating channel, then 1071 /* If tmp_channel wasn't operating channel, then
1073 * we need to go back on-channel. 1072 * we need to go back on-channel.
@@ -1077,7 +1076,7 @@ static void ieee80211_work_work(struct work_struct *work)
1077 * we still need to do a hardware config. Currently, 1076 * we still need to do a hardware config. Currently,
1078 * we cannot be here while scanning, however. 1077 * we cannot be here while scanning, however.
1079 */ 1078 */
1080 if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan) 1079 if (!ieee80211_cfg_on_oper_channel(local))
1081 ieee80211_hw_config(local, 0); 1080 ieee80211_hw_config(local, 0);
1082 1081
1083 /* At the least, we need to disable offchannel_ps, 1082 /* At the least, we need to disable offchannel_ps,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 8f6a302d2ac..aa1c40ab6a7 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -109,7 +109,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
109 if (status->flag & RX_FLAG_MMIC_ERROR) 109 if (status->flag & RX_FLAG_MMIC_ERROR)
110 goto mic_fail; 110 goto mic_fail;
111 111
112 if (!(status->flag & RX_FLAG_IV_STRIPPED)) 112 if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
113 goto update_iv; 113 goto update_iv;
114 114
115 return RX_CONTINUE; 115 return RX_CONTINUE;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d86cb..5bd5c612a9b 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -902,6 +902,8 @@ config NETFILTER_XT_MATCH_OWNER
902 based on who created the socket: the user or group. It is also 902 based on who created the socket: the user or group. It is also
903 possible to check whether a socket actually exists. 903 possible to check whether a socket actually exists.
904 904
905 Conflicts with '"quota, tag, uid" match'
906
905config NETFILTER_XT_MATCH_POLICY 907config NETFILTER_XT_MATCH_POLICY
906 tristate 'IPsec "policy" match support' 908 tristate 'IPsec "policy" match support'
907 depends on XFRM 909 depends on XFRM
@@ -935,6 +937,22 @@ config NETFILTER_XT_MATCH_PKTTYPE
935 937
936 To compile it as a module, choose M here. If unsure, say N. 938 To compile it as a module, choose M here. If unsure, say N.
937 939
940config NETFILTER_XT_MATCH_QTAGUID
941 bool '"quota, tag, owner" match and stats support'
942 depends on NETFILTER_XT_MATCH_SOCKET
943 depends on NETFILTER_XT_MATCH_OWNER=n
944 help
945 This option replaces the `owner' match. In addition to matching
946 on uid, it keeps stats based on a tag assigned to a socket.
947 The full tag is comprised of a UID and an accounting tag.
948 The tags are assignable to sockets from user space (e.g. a download
949 manager can assign the socket to another UID for accounting).
950 Stats and control are done via /proc/net/xt_qtaguid/.
951 It replaces owner as it takes the same arguments, but should
952 really be recognized by the iptables tool.
953
954 If unsure, say `N'.
955
938config NETFILTER_XT_MATCH_QUOTA 956config NETFILTER_XT_MATCH_QUOTA
939 tristate '"quota" match support' 957 tristate '"quota" match support'
940 depends on NETFILTER_ADVANCED 958 depends on NETFILTER_ADVANCED
@@ -945,6 +963,30 @@ config NETFILTER_XT_MATCH_QUOTA
945 If you want to compile it as a module, say M here and read 963 If you want to compile it as a module, say M here and read
946 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 964 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
947 965
966config NETFILTER_XT_MATCH_QUOTA2
967 tristate '"quota2" match support'
968 depends on NETFILTER_ADVANCED
969 help
970 This option adds a `quota2' match, which allows to match on a
971 byte counter correctly and not per CPU.
972 It allows naming the quotas.
973 This is based on http://xtables-addons.git.sourceforge.net
974
975 If you want to compile it as a module, say M here and read
976 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
977
978config NETFILTER_XT_MATCH_QUOTA2_LOG
979 bool '"quota2" Netfilter LOG support'
980 depends on NETFILTER_XT_MATCH_QUOTA2
981 depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no
982 default n
983 help
984 This option allows `quota2' to log ONCE when a quota limit
985 is passed. It logs via NETLINK using the NETLINK_NFLOG family.
986 It logs similarly to how ipt_ULOG would without data.
987
988 If unsure, say `N'.
989
948config NETFILTER_XT_MATCH_RATEEST 990config NETFILTER_XT_MATCH_RATEEST
949 tristate '"rateest" match support' 991 tristate '"rateest" match support'
950 depends on NETFILTER_ADVANCED 992 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853df86..6d917176c3b 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -95,7 +95,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
95obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o 95obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
96obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o 96obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
97obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o 97obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
98obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
98obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o 99obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
100obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
99obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o 101obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
100obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o 102obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
101obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o 103obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 24c28d238dc..0787bed0418 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -233,6 +233,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
233 __be16 dport = 0; /* destination port to forward */ 233 __be16 dport = 0; /* destination port to forward */
234 unsigned int flags; 234 unsigned int flags;
235 struct ip_vs_conn_param param; 235 struct ip_vs_conn_param param;
236 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
236 union nf_inet_addr snet; /* source network of the client, 237 union nf_inet_addr snet; /* source network of the client,
237 after masking */ 238 after masking */
238 239
@@ -268,7 +269,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
268 { 269 {
269 int protocol = iph.protocol; 270 int protocol = iph.protocol;
270 const union nf_inet_addr *vaddr = &iph.daddr; 271 const union nf_inet_addr *vaddr = &iph.daddr;
271 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
272 __be16 vport = 0; 272 __be16 vport = 0;
273 273
274 if (dst_port == svc->port) { 274 if (dst_port == svc->port) {
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 699c79a5565..a178cb34584 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3771,6 +3771,7 @@ err_sock:
3771void ip_vs_control_cleanup(void) 3771void ip_vs_control_cleanup(void)
3772{ 3772{
3773 EnterFunction(2); 3773 EnterFunction(2);
3774 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3774 ip_vs_genl_unregister(); 3775 ip_vs_genl_unregister();
3775 nf_unregister_sockopt(&ip_vs_sockopts); 3776 nf_unregister_sockopt(&ip_vs_sockopts);
3776 LeaveFunction(2); 3777 LeaveFunction(2);
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 00000000000..08086d680c2
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,2785 @@
1/*
2 * Kernel iptables module to track stats for packets based on user tags.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * There are run-time debug flags enabled via the debug_mask module param, or
13 * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
14 */
15#define DEBUG
16
17#include <linux/file.h>
18#include <linux/inetdevice.h>
19#include <linux/module.h>
20#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter/xt_qtaguid.h>
22#include <linux/skbuff.h>
23#include <linux/workqueue.h>
24#include <net/addrconf.h>
25#include <net/sock.h>
26#include <net/tcp.h>
27#include <net/udp.h>
28
29#include <linux/netfilter/xt_socket.h>
30#include "xt_qtaguid_internal.h"
31#include "xt_qtaguid_print.h"
32
33/*
34 * We only use the xt_socket funcs within a similar context to avoid unexpected
35 * return values.
36 */
37#define XT_SOCKET_SUPPORTED_HOOKS \
38 ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
39
40
41static const char *module_procdirname = "xt_qtaguid";
42static struct proc_dir_entry *xt_qtaguid_procdir;
43
44static unsigned int proc_iface_perms = S_IRUGO;
45module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
46
47static struct proc_dir_entry *xt_qtaguid_stats_file;
48static unsigned int proc_stats_perms = S_IRUGO;
49module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
50
51static struct proc_dir_entry *xt_qtaguid_ctrl_file;
52#ifdef CONFIG_ANDROID_PARANOID_NETWORK
53static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
54#else
55static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUSR;
56#endif
57module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
58
59#ifdef CONFIG_ANDROID_PARANOID_NETWORK
60#include <linux/android_aid.h>
61static gid_t proc_stats_readall_gid = AID_NET_BW_STATS;
62static gid_t proc_ctrl_write_gid = AID_NET_BW_ACCT;
63#else
64/* 0 means, don't limit anybody */
65static gid_t proc_stats_readall_gid;
66static gid_t proc_ctrl_write_gid;
67#endif
68module_param_named(stats_readall_gid, proc_stats_readall_gid, uint,
69 S_IRUGO | S_IWUSR);
70module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint,
71 S_IRUGO | S_IWUSR);
72
73/*
74 * Limit the number of active tags (via socket tags) for a given UID.
75 * Multiple processes could share the UID.
76 */
77static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
78module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
79
80/*
81 * After the kernel has initiallized this module, it is still possible
82 * to make it passive.
83 * Setting passive to Y:
84 * - the iface stats handling will not act on notifications.
85 * - iptables matches will never match.
86 * - ctrl commands silently succeed.
87 * - stats are always empty.
88 * This is mostly usefull when a bug is suspected.
89 */
90static bool module_passive;
91module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
92
93/*
94 * Control how qtaguid data is tracked per proc/uid.
95 * Setting tag_tracking_passive to Y:
96 * - don't create proc specific structs to track tags
97 * - don't check that active tag stats exceed some limits.
98 * - don't clean up socket tags on process exits.
99 * This is mostly usefull when a bug is suspected.
100 */
101static bool qtu_proc_handling_passive;
102module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
103 S_IRUGO | S_IWUSR);
104
105#define QTU_DEV_NAME "xt_qtaguid"
106
107uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
108module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
109
110/*---------------------------------------------------------------------------*/
111static const char *iface_stat_procdirname = "iface_stat";
112static struct proc_dir_entry *iface_stat_procdir;
113static const char *iface_stat_all_procfilename = "iface_stat_all";
114static struct proc_dir_entry *iface_stat_all_procfile;
115
116/*
117 * Ordering of locks:
118 * outer locks:
119 * iface_stat_list_lock
120 * sock_tag_list_lock
121 * inner locks:
122 * uid_tag_data_tree_lock
123 * tag_counter_set_list_lock
124 * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock
125 * is acquired.
126 *
127 * Call tree with all lock holders as of 2011-09-25:
128 *
129 * iface_stat_all_proc_read()
130 * iface_stat_list_lock
131 * (struct iface_stat)
132 *
133 * qtaguid_ctrl_proc_read()
134 * sock_tag_list_lock
135 * (sock_tag_tree)
136 * (struct proc_qtu_data->sock_tag_list)
137 * prdebug_full_state()
138 * sock_tag_list_lock
139 * (sock_tag_tree)
140 * uid_tag_data_tree_lock
141 * (uid_tag_data_tree)
142 * (proc_qtu_data_tree)
143 * iface_stat_list_lock
144 *
145 * qtaguid_stats_proc_read()
146 * iface_stat_list_lock
147 * struct iface_stat->tag_stat_list_lock
148 *
149 * qtudev_open()
150 * uid_tag_data_tree_lock
151 *
152 * qtudev_release()
153 * sock_tag_data_list_lock
154 * uid_tag_data_tree_lock
155 * prdebug_full_state()
156 * sock_tag_list_lock
157 * uid_tag_data_tree_lock
158 * iface_stat_list_lock
159 *
160 * iface_netdev_event_handler()
161 * iface_stat_create()
162 * iface_stat_list_lock
163 * iface_stat_update()
164 * iface_stat_list_lock
165 *
166 * iface_inetaddr_event_handler()
167 * iface_stat_create()
168 * iface_stat_list_lock
169 * iface_stat_update()
170 * iface_stat_list_lock
171 *
172 * iface_inet6addr_event_handler()
173 * iface_stat_create_ipv6()
174 * iface_stat_list_lock
175 * iface_stat_update()
176 * iface_stat_list_lock
177 *
178 * qtaguid_mt()
179 * account_for_uid()
180 * if_tag_stat_update()
181 * get_sock_stat()
182 * sock_tag_list_lock
183 * struct iface_stat->tag_stat_list_lock
184 * tag_stat_update()
185 * get_active_counter_set()
186 * tag_counter_set_list_lock
187 * tag_stat_update()
188 * get_active_counter_set()
189 * tag_counter_set_list_lock
190 *
191 *
192 * qtaguid_ctrl_parse()
193 * ctrl_cmd_delete()
194 * sock_tag_list_lock
195 * tag_counter_set_list_lock
196 * iface_stat_list_lock
197 * struct iface_stat->tag_stat_list_lock
198 * uid_tag_data_tree_lock
199 * ctrl_cmd_counter_set()
200 * tag_counter_set_list_lock
201 * ctrl_cmd_tag()
202 * sock_tag_list_lock
203 * (sock_tag_tree)
204 * get_tag_ref()
205 * uid_tag_data_tree_lock
206 * (uid_tag_data_tree)
207 * uid_tag_data_tree_lock
208 * (proc_qtu_data_tree)
209 * ctrl_cmd_untag()
210 * sock_tag_list_lock
211 * uid_tag_data_tree_lock
212 *
213 */
214static LIST_HEAD(iface_stat_list);
215static DEFINE_SPINLOCK(iface_stat_list_lock);
216
217static struct rb_root sock_tag_tree = RB_ROOT;
218static DEFINE_SPINLOCK(sock_tag_list_lock);
219
220static struct rb_root tag_counter_set_tree = RB_ROOT;
221static DEFINE_SPINLOCK(tag_counter_set_list_lock);
222
223static struct rb_root uid_tag_data_tree = RB_ROOT;
224static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
225
226static struct rb_root proc_qtu_data_tree = RB_ROOT;
227/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
228
229static struct qtaguid_event_counts qtu_events;
230/*----------------------------------------------*/
231static bool can_manipulate_uids(void)
232{
233 /* root pwnd */
234 return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid)
235 || in_egroup_p(proc_ctrl_write_gid);
236}
237
238static bool can_impersonate_uid(uid_t uid)
239{
240 return uid == current_fsuid() || can_manipulate_uids();
241}
242
243static bool can_read_other_uid_stats(uid_t uid)
244{
245 /* root pwnd */
246 return unlikely(!current_fsuid()) || uid == current_fsuid()
247 || unlikely(!proc_stats_readall_gid)
248 || in_egroup_p(proc_stats_readall_gid);
249}
250
251static inline void dc_add_byte_packets(struct data_counters *counters, int set,
252 enum ifs_tx_rx direction,
253 enum ifs_proto ifs_proto,
254 int bytes,
255 int packets)
256{
257 counters->bpc[set][direction][ifs_proto].bytes += bytes;
258 counters->bpc[set][direction][ifs_proto].packets += packets;
259}
260
261static inline uint64_t dc_sum_bytes(struct data_counters *counters,
262 int set,
263 enum ifs_tx_rx direction)
264{
265 return counters->bpc[set][direction][IFS_TCP].bytes
266 + counters->bpc[set][direction][IFS_UDP].bytes
267 + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
268}
269
270static inline uint64_t dc_sum_packets(struct data_counters *counters,
271 int set,
272 enum ifs_tx_rx direction)
273{
274 return counters->bpc[set][direction][IFS_TCP].packets
275 + counters->bpc[set][direction][IFS_UDP].packets
276 + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
277}
278
279static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
280{
281 struct rb_node *node = root->rb_node;
282
283 while (node) {
284 struct tag_node *data = rb_entry(node, struct tag_node, node);
285 int result;
286 RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
287 " node=%p data=%p\n", tag, node, data);
288 result = tag_compare(tag, data->tag);
289 RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
290 " data.tag=0x%llx (uid=%u) res=%d\n",
291 tag, data->tag, get_uid_from_tag(data->tag), result);
292 if (result < 0)
293 node = node->rb_left;
294 else if (result > 0)
295 node = node->rb_right;
296 else
297 return data;
298 }
299 return NULL;
300}
301
302static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
303{
304 struct rb_node **new = &(root->rb_node), *parent = NULL;
305
306 /* Figure out where to put new node */
307 while (*new) {
308 struct tag_node *this = rb_entry(*new, struct tag_node,
309 node);
310 int result = tag_compare(data->tag, this->tag);
311 RB_DEBUG("qtaguid: %s(): tag=0x%llx"
312 " (uid=%u)\n", __func__,
313 this->tag,
314 get_uid_from_tag(this->tag));
315 parent = *new;
316 if (result < 0)
317 new = &((*new)->rb_left);
318 else if (result > 0)
319 new = &((*new)->rb_right);
320 else
321 BUG();
322 }
323
324 /* Add new node and rebalance tree. */
325 rb_link_node(&data->node, parent, new);
326 rb_insert_color(&data->node, root);
327}
328
329static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
330{
331 tag_node_tree_insert(&data->tn, root);
332}
333
334static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
335{
336 struct tag_node *node = tag_node_tree_search(root, tag);
337 if (!node)
338 return NULL;
339 return rb_entry(&node->node, struct tag_stat, tn.node);
340}
341
342static void tag_counter_set_tree_insert(struct tag_counter_set *data,
343 struct rb_root *root)
344{
345 tag_node_tree_insert(&data->tn, root);
346}
347
348static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
349 tag_t tag)
350{
351 struct tag_node *node = tag_node_tree_search(root, tag);
352 if (!node)
353 return NULL;
354 return rb_entry(&node->node, struct tag_counter_set, tn.node);
355
356}
357
358static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
359{
360 tag_node_tree_insert(&data->tn, root);
361}
362
363static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
364{
365 struct tag_node *node = tag_node_tree_search(root, tag);
366 if (!node)
367 return NULL;
368 return rb_entry(&node->node, struct tag_ref, tn.node);
369}
370
371static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
372 const struct sock *sk)
373{
374 struct rb_node *node = root->rb_node;
375
376 while (node) {
377 struct sock_tag *data = rb_entry(node, struct sock_tag,
378 sock_node);
379 if (sk < data->sk)
380 node = node->rb_left;
381 else if (sk > data->sk)
382 node = node->rb_right;
383 else
384 return data;
385 }
386 return NULL;
387}
388
389static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
390{
391 struct rb_node **new = &(root->rb_node), *parent = NULL;
392
393 /* Figure out where to put new node */
394 while (*new) {
395 struct sock_tag *this = rb_entry(*new, struct sock_tag,
396 sock_node);
397 parent = *new;
398 if (data->sk < this->sk)
399 new = &((*new)->rb_left);
400 else if (data->sk > this->sk)
401 new = &((*new)->rb_right);
402 else
403 BUG();
404 }
405
406 /* Add new node and rebalance tree. */
407 rb_link_node(&data->sock_node, parent, new);
408 rb_insert_color(&data->sock_node, root);
409}
410
411static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
412{
413 struct rb_node *node;
414 struct sock_tag *st_entry;
415
416 node = rb_first(st_to_free_tree);
417 while (node) {
418 st_entry = rb_entry(node, struct sock_tag, sock_node);
419 node = rb_next(node);
420 CT_DEBUG("qtaguid: %s(): "
421 "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
422 st_entry->sk,
423 st_entry->tag,
424 get_uid_from_tag(st_entry->tag));
425 rb_erase(&st_entry->sock_node, st_to_free_tree);
426 sockfd_put(st_entry->socket);
427 kfree(st_entry);
428 }
429}
430
431static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
432 const pid_t pid)
433{
434 struct rb_node *node = root->rb_node;
435
436 while (node) {
437 struct proc_qtu_data *data = rb_entry(node,
438 struct proc_qtu_data,
439 node);
440 if (pid < data->pid)
441 node = node->rb_left;
442 else if (pid > data->pid)
443 node = node->rb_right;
444 else
445 return data;
446 }
447 return NULL;
448}
449
450static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
451 struct rb_root *root)
452{
453 struct rb_node **new = &(root->rb_node), *parent = NULL;
454
455 /* Figure out where to put new node */
456 while (*new) {
457 struct proc_qtu_data *this = rb_entry(*new,
458 struct proc_qtu_data,
459 node);
460 parent = *new;
461 if (data->pid < this->pid)
462 new = &((*new)->rb_left);
463 else if (data->pid > this->pid)
464 new = &((*new)->rb_right);
465 else
466 BUG();
467 }
468
469 /* Add new node and rebalance tree. */
470 rb_link_node(&data->node, parent, new);
471 rb_insert_color(&data->node, root);
472}
473
474static void uid_tag_data_tree_insert(struct uid_tag_data *data,
475 struct rb_root *root)
476{
477 struct rb_node **new = &(root->rb_node), *parent = NULL;
478
479 /* Figure out where to put new node */
480 while (*new) {
481 struct uid_tag_data *this = rb_entry(*new,
482 struct uid_tag_data,
483 node);
484 parent = *new;
485 if (data->uid < this->uid)
486 new = &((*new)->rb_left);
487 else if (data->uid > this->uid)
488 new = &((*new)->rb_right);
489 else
490 BUG();
491 }
492
493 /* Add new node and rebalance tree. */
494 rb_link_node(&data->node, parent, new);
495 rb_insert_color(&data->node, root);
496}
497
498static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
499 uid_t uid)
500{
501 struct rb_node *node = root->rb_node;
502
503 while (node) {
504 struct uid_tag_data *data = rb_entry(node,
505 struct uid_tag_data,
506 node);
507 if (uid < data->uid)
508 node = node->rb_left;
509 else if (uid > data->uid)
510 node = node->rb_right;
511 else
512 return data;
513 }
514 return NULL;
515}
516
517/*
518 * Allocates a new uid_tag_data struct if needed.
519 * Returns a pointer to the found or allocated uid_tag_data.
520 * Returns a PTR_ERR on failures, and lock is not held.
521 * If found is not NULL:
522 * sets *found to true if not allocated.
523 * sets *found to false if allocated.
524 */
525struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
526{
527 struct uid_tag_data *utd_entry;
528
529 /* Look for top level uid_tag_data for the UID */
530 utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
531 DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
532
533 if (found_res)
534 *found_res = utd_entry;
535 if (utd_entry)
536 return utd_entry;
537
538 utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
539 if (!utd_entry) {
540 pr_err("qtaguid: get_uid_data(%u): "
541 "tag data alloc failed\n", uid);
542 return ERR_PTR(-ENOMEM);
543 }
544
545 utd_entry->uid = uid;
546 utd_entry->tag_ref_tree = RB_ROOT;
547 uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
548 DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
549 return utd_entry;
550}
551
552/* Never returns NULL. Either PTR_ERR or a valid ptr. */
553static struct tag_ref *new_tag_ref(tag_t new_tag,
554 struct uid_tag_data *utd_entry)
555{
556 struct tag_ref *tr_entry;
557 int res;
558
559 if (utd_entry->num_active_tags + 1 > max_sock_tags) {
560 pr_info("qtaguid: new_tag_ref(0x%llx): "
561 "tag ref alloc quota exceeded. max=%d\n",
562 new_tag, max_sock_tags);
563 res = -EMFILE;
564 goto err_res;
565
566 }
567
568 tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
569 if (!tr_entry) {
570 pr_err("qtaguid: new_tag_ref(0x%llx): "
571 "tag ref alloc failed\n",
572 new_tag);
573 res = -ENOMEM;
574 goto err_res;
575 }
576 tr_entry->tn.tag = new_tag;
577 /* tr_entry->num_sock_tags handled by caller */
578 utd_entry->num_active_tags++;
579 tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
580 DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
581 " inserted new tag ref %p\n",
582 new_tag, tr_entry);
583 return tr_entry;
584
585err_res:
586 return ERR_PTR(res);
587}
588
589static struct tag_ref *lookup_tag_ref(tag_t full_tag,
590 struct uid_tag_data **utd_res)
591{
592 struct uid_tag_data *utd_entry;
593 struct tag_ref *tr_entry;
594 bool found_utd;
595 uid_t uid = get_uid_from_tag(full_tag);
596
597 DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
598 full_tag, uid);
599
600 utd_entry = get_uid_data(uid, &found_utd);
601 if (IS_ERR_OR_NULL(utd_entry)) {
602 if (utd_res)
603 *utd_res = utd_entry;
604 return NULL;
605 }
606
607 tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
608 if (utd_res)
609 *utd_res = utd_entry;
610 DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
611 full_tag, utd_entry, tr_entry);
612 return tr_entry;
613}
614
615/* Never returns NULL. Either PTR_ERR or a valid ptr. */
616static struct tag_ref *get_tag_ref(tag_t full_tag,
617 struct uid_tag_data **utd_res)
618{
619 struct uid_tag_data *utd_entry;
620 struct tag_ref *tr_entry;
621
622 DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
623 full_tag);
624 spin_lock_bh(&uid_tag_data_tree_lock);
625 tr_entry = lookup_tag_ref(full_tag, &utd_entry);
626 BUG_ON(IS_ERR_OR_NULL(utd_entry));
627 if (!tr_entry)
628 tr_entry = new_tag_ref(full_tag, utd_entry);
629
630 spin_unlock_bh(&uid_tag_data_tree_lock);
631 if (utd_res)
632 *utd_res = utd_entry;
633 DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
634 full_tag, utd_entry, tr_entry);
635 return tr_entry;
636}
637
638/* Checks and maybe frees the UID Tag Data entry */
639static void put_utd_entry(struct uid_tag_data *utd_entry)
640{
641 /* Are we done with the UID tag data entry? */
642 if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
643 !utd_entry->num_pqd) {
644 DR_DEBUG("qtaguid: %s(): "
645 "erase utd_entry=%p uid=%u "
646 "by pid=%u tgid=%u uid=%u\n", __func__,
647 utd_entry, utd_entry->uid,
648 current->pid, current->tgid, current_fsuid());
649 BUG_ON(utd_entry->num_active_tags);
650 rb_erase(&utd_entry->node, &uid_tag_data_tree);
651 kfree(utd_entry);
652 } else {
653 DR_DEBUG("qtaguid: %s(): "
654 "utd_entry=%p still has %d tags %d proc_qtu_data\n",
655 __func__, utd_entry, utd_entry->num_active_tags,
656 utd_entry->num_pqd);
657 BUG_ON(!(utd_entry->num_active_tags ||
658 utd_entry->num_pqd));
659 }
660}
661
662/*
663 * If no sock_tags are using this tag_ref,
664 * decrements refcount of utd_entry, removes tr_entry
665 * from utd_entry->tag_ref_tree and frees.
666 */
667static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
668 struct uid_tag_data *utd_entry)
669{
670 DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
671 tr_entry, tr_entry->tn.tag,
672 get_uid_from_tag(tr_entry->tn.tag));
673 if (!tr_entry->num_sock_tags) {
674 BUG_ON(!utd_entry->num_active_tags);
675 utd_entry->num_active_tags--;
676 rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
677 DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
678 kfree(tr_entry);
679 }
680}
681
682static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
683{
684 struct rb_node *node;
685 struct tag_ref *tr_entry;
686 tag_t acct_tag;
687
688 DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
689 full_tag, get_uid_from_tag(full_tag));
690 acct_tag = get_atag_from_tag(full_tag);
691 node = rb_first(&utd_entry->tag_ref_tree);
692 while (node) {
693 tr_entry = rb_entry(node, struct tag_ref, tn.node);
694 node = rb_next(node);
695 if (!acct_tag || tr_entry->tn.tag == full_tag)
696 free_tag_ref_from_utd_entry(tr_entry, utd_entry);
697 }
698}
699
700static int read_proc_u64(char *page, char **start, off_t off,
701 int count, int *eof, void *data)
702{
703 int len;
704 uint64_t value;
705 char *p = page;
706 uint64_t *iface_entry = data;
707
708 if (!data)
709 return 0;
710
711 value = *iface_entry;
712 p += sprintf(p, "%llu\n", value);
713 len = (p - page) - off;
714 *eof = (len <= count) ? 1 : 0;
715 *start = page + off;
716 return len;
717}
718
719static int read_proc_bool(char *page, char **start, off_t off,
720 int count, int *eof, void *data)
721{
722 int len;
723 bool value;
724 char *p = page;
725 bool *bool_entry = data;
726
727 if (!data)
728 return 0;
729
730 value = *bool_entry;
731 p += sprintf(p, "%u\n", value);
732 len = (p - page) - off;
733 *eof = (len <= count) ? 1 : 0;
734 *start = page + off;
735 return len;
736}
737
738static int get_active_counter_set(tag_t tag)
739{
740 int active_set = 0;
741 struct tag_counter_set *tcs;
742
743 MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
744 " (uid=%u)\n",
745 tag, get_uid_from_tag(tag));
746 /* For now we only handle UID tags for active sets */
747 tag = get_utag_from_tag(tag);
748 spin_lock_bh(&tag_counter_set_list_lock);
749 tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
750 if (tcs)
751 active_set = tcs->active_set;
752 spin_unlock_bh(&tag_counter_set_list_lock);
753 return active_set;
754}
755
756/*
757 * Find the entry for tracking the specified interface.
758 * Caller must hold iface_stat_list_lock
759 */
760static struct iface_stat *get_iface_entry(const char *ifname)
761{
762 struct iface_stat *iface_entry;
763
764 /* Find the entry for tracking the specified tag within the interface */
765 if (ifname == NULL) {
766 pr_info("qtaguid: iface_stat: get() NULL device name\n");
767 return NULL;
768 }
769
770 /* Iterate over interfaces */
771 list_for_each_entry(iface_entry, &iface_stat_list, list) {
772 if (!strcmp(ifname, iface_entry->ifname))
773 goto done;
774 }
775 iface_entry = NULL;
776done:
777 return iface_entry;
778}
779
780static int iface_stat_all_proc_read(char *page, char **num_items_returned,
781 off_t items_to_skip, int char_count,
782 int *eof, void *data)
783{
784 char *outp = page;
785 int item_index = 0;
786 int len;
787 struct iface_stat *iface_entry;
788 struct rtnl_link_stats64 dev_stats, *stats;
789 struct rtnl_link_stats64 no_dev_stats = {0};
790
791 if (unlikely(module_passive)) {
792 *eof = 1;
793 return 0;
794 }
795
796 CT_DEBUG("qtaguid:proc iface_stat_all "
797 "page=%p *num_items_returned=%p off=%ld "
798 "char_count=%d *eof=%d\n", page, *num_items_returned,
799 items_to_skip, char_count, *eof);
800
801 if (*eof)
802 return 0;
803
804 /*
805 * This lock will prevent iface_stat_update() from changing active,
806 * and in turn prevent an interface from unregistering itself.
807 */
808 spin_lock_bh(&iface_stat_list_lock);
809 list_for_each_entry(iface_entry, &iface_stat_list, list) {
810 if (item_index++ < items_to_skip)
811 continue;
812
813 if (iface_entry->active) {
814 stats = dev_get_stats(iface_entry->net_dev,
815 &dev_stats);
816 } else {
817 stats = &no_dev_stats;
818 }
819 len = snprintf(outp, char_count,
820 "%s %d "
821 "%llu %llu %llu %llu "
822 "%llu %llu %llu %llu\n",
823 iface_entry->ifname,
824 iface_entry->active,
825 iface_entry->totals[IFS_RX].bytes,
826 iface_entry->totals[IFS_RX].packets,
827 iface_entry->totals[IFS_TX].bytes,
828 iface_entry->totals[IFS_TX].packets,
829 stats->rx_bytes, stats->rx_packets,
830 stats->tx_bytes, stats->tx_packets);
831 if (len >= char_count) {
832 spin_unlock_bh(&iface_stat_list_lock);
833 *outp = '\0';
834 return outp - page;
835 }
836 outp += len;
837 char_count -= len;
838 (*num_items_returned)++;
839 }
840 spin_unlock_bh(&iface_stat_list_lock);
841
842 *eof = 1;
843 return outp - page;
844}
845
846static void iface_create_proc_worker(struct work_struct *work)
847{
848 struct proc_dir_entry *proc_entry;
849 struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
850 iface_work);
851 struct iface_stat *new_iface = isw->iface_entry;
852
853 /* iface_entries are not deleted, so safe to manipulate. */
854 proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
855 if (IS_ERR_OR_NULL(proc_entry)) {
856 pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
857 kfree(isw);
858 return;
859 }
860
861 new_iface->proc_ptr = proc_entry;
862
863 create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry,
864 read_proc_u64, &new_iface->totals[IFS_TX].bytes);
865 create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry,
866 read_proc_u64, &new_iface->totals[IFS_RX].bytes);
867 create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry,
868 read_proc_u64, &new_iface->totals[IFS_TX].packets);
869 create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry,
870 read_proc_u64, &new_iface->totals[IFS_RX].packets);
871 create_proc_read_entry("active", proc_iface_perms, proc_entry,
872 read_proc_bool, &new_iface->active);
873
874 IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
875 "entry=%p dev=%s\n", new_iface, new_iface->ifname);
876 kfree(isw);
877}
878
879/*
880 * Will set the entry's active state, and
881 * update the net_dev accordingly also.
882 */
883static void _iface_stat_set_active(struct iface_stat *entry,
884 struct net_device *net_dev,
885 bool activate)
886{
887 if (activate) {
888 entry->net_dev = net_dev;
889 entry->active = true;
890 IF_DEBUG("qtaguid: %s(%s): "
891 "enable tracking. rfcnt=%d\n", __func__,
892 entry->ifname,
893 percpu_read(*net_dev->pcpu_refcnt));
894 } else {
895 entry->active = false;
896 entry->net_dev = NULL;
897 IF_DEBUG("qtaguid: %s(%s): "
898 "disable tracking. rfcnt=%d\n", __func__,
899 entry->ifname,
900 percpu_read(*net_dev->pcpu_refcnt));
901
902 }
903}
904
905/* Caller must hold iface_stat_list_lock */
906static struct iface_stat *iface_alloc(struct net_device *net_dev)
907{
908 struct iface_stat *new_iface;
909 struct iface_stat_work *isw;
910
911 new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
912 if (new_iface == NULL) {
913 pr_err("qtaguid: iface_stat: create(%s): "
914 "iface_stat alloc failed\n", net_dev->name);
915 return NULL;
916 }
917 new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
918 if (new_iface->ifname == NULL) {
919 pr_err("qtaguid: iface_stat: create(%s): "
920 "ifname alloc failed\n", net_dev->name);
921 kfree(new_iface);
922 return NULL;
923 }
924 spin_lock_init(&new_iface->tag_stat_list_lock);
925 new_iface->tag_stat_tree = RB_ROOT;
926 _iface_stat_set_active(new_iface, net_dev, true);
927
928 /*
929 * ipv6 notifier chains are atomic :(
930 * No create_proc_read_entry() for you!
931 */
932 isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
933 if (!isw) {
934 pr_err("qtaguid: iface_stat: create(%s): "
935 "work alloc failed\n", new_iface->ifname);
936 _iface_stat_set_active(new_iface, net_dev, false);
937 kfree(new_iface->ifname);
938 kfree(new_iface);
939 return NULL;
940 }
941 isw->iface_entry = new_iface;
942 INIT_WORK(&isw->iface_work, iface_create_proc_worker);
943 schedule_work(&isw->iface_work);
944 list_add(&new_iface->list, &iface_stat_list);
945 return new_iface;
946}
947
948static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
949 struct iface_stat *iface)
950{
951 struct rtnl_link_stats64 dev_stats, *stats;
952 bool stats_rewound;
953
954 stats = dev_get_stats(net_dev, &dev_stats);
955 /* No empty packets */
956 stats_rewound =
957 (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
958 || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
959
960 IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
961 "bytes rx/tx=%llu/%llu "
962 "active=%d last_known=%d "
963 "stats_rewound=%d\n", __func__,
964 net_dev ? net_dev->name : "?",
965 iface, net_dev,
966 stats->rx_bytes, stats->tx_bytes,
967 iface->active, iface->last_known_valid, stats_rewound);
968
969 if (iface->active && iface->last_known_valid && stats_rewound) {
970 pr_warn_once("qtaguid: iface_stat: %s(%s): "
971 "iface reset its stats unexpectedly\n", __func__,
972 net_dev->name);
973
974 iface->totals[IFS_TX].bytes += iface->last_known[IFS_TX].bytes;
975 iface->totals[IFS_TX].packets +=
976 iface->last_known[IFS_TX].packets;
977 iface->totals[IFS_RX].bytes += iface->last_known[IFS_RX].bytes;
978 iface->totals[IFS_RX].packets +=
979 iface->last_known[IFS_RX].packets;
980 iface->last_known_valid = false;
981 IF_DEBUG("qtaguid: %s(%s): iface=%p "
982 "used last known bytes rx/tx=%llu/%llu\n", __func__,
983 iface->ifname, iface, iface->last_known[IFS_RX].bytes,
984 iface->last_known[IFS_TX].bytes);
985 }
986}
987
988/*
989 * Create a new entry for tracking the specified interface.
990 * Do nothing if the entry already exists.
991 * Called when an interface is configured with a valid IP address.
992 */
993static void iface_stat_create(struct net_device *net_dev,
994 struct in_ifaddr *ifa)
995{
996 struct in_device *in_dev = NULL;
997 const char *ifname;
998 struct iface_stat *entry;
999 __be32 ipaddr = 0;
1000 struct iface_stat *new_iface;
1001
1002 IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
1003 net_dev ? net_dev->name : "?",
1004 ifa, net_dev);
1005 if (!net_dev) {
1006 pr_err("qtaguid: iface_stat: create(): no net dev\n");
1007 return;
1008 }
1009
1010 ifname = net_dev->name;
1011 if (!ifa) {
1012 in_dev = in_dev_get(net_dev);
1013 if (!in_dev) {
1014 pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
1015 ifname);
1016 return;
1017 }
1018 IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
1019 ifname, in_dev);
1020 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1021 IF_DEBUG("qtaguid: iface_stat: create(%s): "
1022 "ifa=%p ifa_label=%s\n",
1023 ifname, ifa,
1024 ifa->ifa_label ? ifa->ifa_label : "(null)");
1025 if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
1026 break;
1027 }
1028 }
1029
1030 if (!ifa) {
1031 IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
1032 ifname);
1033 goto done_put;
1034 }
1035 ipaddr = ifa->ifa_local;
1036
1037 spin_lock_bh(&iface_stat_list_lock);
1038 entry = get_iface_entry(ifname);
1039 if (entry != NULL) {
1040 bool activate = !ipv4_is_loopback(ipaddr);
1041 IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
1042 ifname, entry);
1043 iface_check_stats_reset_and_adjust(net_dev, entry);
1044 _iface_stat_set_active(entry, net_dev, activate);
1045 IF_DEBUG("qtaguid: %s(%s): "
1046 "tracking now %d on ip=%pI4\n", __func__,
1047 entry->ifname, activate, &ipaddr);
1048 goto done_unlock_put;
1049 } else if (ipv4_is_loopback(ipaddr)) {
1050 IF_DEBUG("qtaguid: iface_stat: create(%s): "
1051 "ignore loopback dev. ip=%pI4\n", ifname, &ipaddr);
1052 goto done_unlock_put;
1053 }
1054
1055 new_iface = iface_alloc(net_dev);
1056 IF_DEBUG("qtaguid: iface_stat: create(%s): done "
1057 "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
1058done_unlock_put:
1059 spin_unlock_bh(&iface_stat_list_lock);
1060done_put:
1061 if (in_dev)
1062 in_dev_put(in_dev);
1063}
1064
1065static void iface_stat_create_ipv6(struct net_device *net_dev,
1066 struct inet6_ifaddr *ifa)
1067{
1068 struct in_device *in_dev;
1069 const char *ifname;
1070 struct iface_stat *entry;
1071 struct iface_stat *new_iface;
1072 int addr_type;
1073
1074 IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
1075 ifa, net_dev, net_dev ? net_dev->name : "");
1076 if (!net_dev) {
1077 pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
1078 return;
1079 }
1080 ifname = net_dev->name;
1081
1082 in_dev = in_dev_get(net_dev);
1083 if (!in_dev) {
1084 pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
1085 ifname);
1086 return;
1087 }
1088
1089 IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
1090 ifname, in_dev);
1091
1092 if (!ifa) {
1093 IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
1094 ifname);
1095 goto done_put;
1096 }
1097 addr_type = ipv6_addr_type(&ifa->addr);
1098
1099 spin_lock_bh(&iface_stat_list_lock);
1100 entry = get_iface_entry(ifname);
1101 if (entry != NULL) {
1102 bool activate = !(addr_type & IPV6_ADDR_LOOPBACK);
1103 IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
1104 ifname, entry);
1105 iface_check_stats_reset_and_adjust(net_dev, entry);
1106 _iface_stat_set_active(entry, net_dev, activate);
1107 IF_DEBUG("qtaguid: %s(%s): "
1108 "tracking now %d on ip=%pI6c\n", __func__,
1109 entry->ifname, activate, &ifa->addr);
1110 goto done_unlock_put;
1111 } else if (addr_type & IPV6_ADDR_LOOPBACK) {
1112 IF_DEBUG("qtaguid: %s(%s): "
1113 "ignore loopback dev. ip=%pI6c\n", __func__,
1114 ifname, &ifa->addr);
1115 goto done_unlock_put;
1116 }
1117
1118 new_iface = iface_alloc(net_dev);
1119 IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
1120 "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
1121
1122done_unlock_put:
1123 spin_unlock_bh(&iface_stat_list_lock);
1124done_put:
1125 in_dev_put(in_dev);
1126}
1127
1128static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
1129{
1130 MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
1131 return sock_tag_tree_search(&sock_tag_tree, sk);
1132}
1133
1134static struct sock_tag *get_sock_stat(const struct sock *sk)
1135{
1136 struct sock_tag *sock_tag_entry;
1137 MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
1138 if (!sk)
1139 return NULL;
1140 spin_lock_bh(&sock_tag_list_lock);
1141 sock_tag_entry = get_sock_stat_nl(sk);
1142 spin_unlock_bh(&sock_tag_list_lock);
1143 return sock_tag_entry;
1144}
1145
1146static void
1147data_counters_update(struct data_counters *dc, int set,
1148 enum ifs_tx_rx direction, int proto, int bytes)
1149{
1150 switch (proto) {
1151 case IPPROTO_TCP:
1152 dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
1153 break;
1154 case IPPROTO_UDP:
1155 dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
1156 break;
1157 case IPPROTO_IP:
1158 default:
1159 dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
1160 1);
1161 break;
1162 }
1163}
1164
1165/*
1166 * Update stats for the specified interface. Do nothing if the entry
1167 * does not exist (when a device was never configured with an IP address).
1168 * Called when an device is being unregistered.
1169 */
1170static void iface_stat_update(struct net_device *net_dev, bool stash_only)
1171{
1172 struct rtnl_link_stats64 dev_stats, *stats;
1173 struct iface_stat *entry;
1174
1175 stats = dev_get_stats(net_dev, &dev_stats);
1176 spin_lock_bh(&iface_stat_list_lock);
1177 entry = get_iface_entry(net_dev->name);
1178 if (entry == NULL) {
1179 IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
1180 net_dev->name);
1181 spin_unlock_bh(&iface_stat_list_lock);
1182 return;
1183 }
1184
1185 IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
1186 net_dev->name, entry);
1187 if (!entry->active) {
1188 IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
1189 net_dev->name);
1190 spin_unlock_bh(&iface_stat_list_lock);
1191 return;
1192 }
1193
1194 if (stash_only) {
1195 entry->last_known[IFS_TX].bytes = stats->tx_bytes;
1196 entry->last_known[IFS_TX].packets = stats->tx_packets;
1197 entry->last_known[IFS_RX].bytes = stats->rx_bytes;
1198 entry->last_known[IFS_RX].packets = stats->rx_packets;
1199 entry->last_known_valid = true;
1200 IF_DEBUG("qtaguid: %s(%s): "
1201 "dev stats stashed rx/tx=%llu/%llu\n", __func__,
1202 net_dev->name, stats->rx_bytes, stats->tx_bytes);
1203 spin_unlock_bh(&iface_stat_list_lock);
1204 return;
1205 }
1206 entry->totals[IFS_TX].bytes += stats->tx_bytes;
1207 entry->totals[IFS_TX].packets += stats->tx_packets;
1208 entry->totals[IFS_RX].bytes += stats->rx_bytes;
1209 entry->totals[IFS_RX].packets += stats->rx_packets;
1210 /* We don't need the last_known[] anymore */
1211 entry->last_known_valid = false;
1212 _iface_stat_set_active(entry, net_dev, false);
1213 IF_DEBUG("qtaguid: %s(%s): "
1214 "disable tracking. rx/tx=%llu/%llu\n", __func__,
1215 net_dev->name, stats->rx_bytes, stats->tx_bytes);
1216 spin_unlock_bh(&iface_stat_list_lock);
1217}
1218
1219static void tag_stat_update(struct tag_stat *tag_entry,
1220 enum ifs_tx_rx direction, int proto, int bytes)
1221{
1222 int active_set;
1223 active_set = get_active_counter_set(tag_entry->tn.tag);
1224 MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
1225 "dir=%d proto=%d bytes=%d)\n",
1226 tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
1227 active_set, direction, proto, bytes);
1228 data_counters_update(&tag_entry->counters, active_set, direction,
1229 proto, bytes);
1230 if (tag_entry->parent_counters)
1231 data_counters_update(tag_entry->parent_counters, active_set,
1232 direction, proto, bytes);
1233}
1234
1235/*
1236 * Create a new entry for tracking the specified {acct_tag,uid_tag} within
1237 * the interface.
1238 * iface_entry->tag_stat_list_lock should be held.
1239 */
1240static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
1241 tag_t tag)
1242{
1243 struct tag_stat *new_tag_stat_entry = NULL;
1244 IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
1245 " (uid=%u)\n", __func__,
1246 iface_entry, tag, get_uid_from_tag(tag));
1247 new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
1248 if (!new_tag_stat_entry) {
1249 pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
1250 goto done;
1251 }
1252 new_tag_stat_entry->tn.tag = tag;
1253 tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
1254done:
1255 return new_tag_stat_entry;
1256}
1257
1258static void if_tag_stat_update(const char *ifname, uid_t uid,
1259 const struct sock *sk, enum ifs_tx_rx direction,
1260 int proto, int bytes)
1261{
1262 struct tag_stat *tag_stat_entry;
1263 tag_t tag, acct_tag;
1264 tag_t uid_tag;
1265 struct data_counters *uid_tag_counters;
1266 struct sock_tag *sock_tag_entry;
1267 struct iface_stat *iface_entry;
1268 struct tag_stat *new_tag_stat;
1269 MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
1270 "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
1271 ifname, uid, sk, direction, proto, bytes);
1272
1273
1274 iface_entry = get_iface_entry(ifname);
1275 if (!iface_entry) {
1276 pr_err("qtaguid: iface_stat: stat_update() %s not found\n",
1277 ifname);
1278 return;
1279 }
1280 /* It is ok to process data when an iface_entry is inactive */
1281
1282 MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
1283 ifname, iface_entry);
1284
1285 /*
1286 * Look for a tagged sock.
1287 * It will have an acct_uid.
1288 */
1289 sock_tag_entry = get_sock_stat(sk);
1290 if (sock_tag_entry) {
1291 tag = sock_tag_entry->tag;
1292 acct_tag = get_atag_from_tag(tag);
1293 uid_tag = get_utag_from_tag(tag);
1294 } else {
1295 acct_tag = make_atag_from_value(0);
1296 tag = combine_atag_with_uid(acct_tag, uid);
1297 uid_tag = make_tag_from_uid(uid);
1298 }
1299 MT_DEBUG("qtaguid: iface_stat: stat_update(): "
1300 " looking for tag=0x%llx (uid=%u) in ife=%p\n",
1301 tag, get_uid_from_tag(tag), iface_entry);
1302 /* Loop over tag list under this interface for {acct_tag,uid_tag} */
1303 spin_lock_bh(&iface_entry->tag_stat_list_lock);
1304
1305 tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
1306 tag);
1307 if (tag_stat_entry) {
1308 /*
1309 * Updating the {acct_tag, uid_tag} entry handles both stats:
1310 * {0, uid_tag} will also get updated.
1311 */
1312 tag_stat_update(tag_stat_entry, direction, proto, bytes);
1313 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
1314 return;
1315 }
1316
1317 /* Loop over tag list under this interface for {0,uid_tag} */
1318 tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
1319 uid_tag);
1320 if (!tag_stat_entry) {
1321 /* Here: the base uid_tag did not exist */
1322 /*
1323 * No parent counters. So
1324 * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
1325 */
1326 new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
1327 uid_tag_counters = &new_tag_stat->counters;
1328 } else {
1329 uid_tag_counters = &tag_stat_entry->counters;
1330 }
1331
1332 if (acct_tag) {
1333 new_tag_stat = create_if_tag_stat(iface_entry, tag);
1334 new_tag_stat->parent_counters = uid_tag_counters;
1335 }
1336 tag_stat_update(new_tag_stat, direction, proto, bytes);
1337 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
1338}
1339
1340static int iface_netdev_event_handler(struct notifier_block *nb,
1341 unsigned long event, void *ptr) {
1342 struct net_device *dev = ptr;
1343
1344 if (unlikely(module_passive))
1345 return NOTIFY_DONE;
1346
1347 IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
1348 "ev=0x%lx/%s netdev=%p->name=%s\n",
1349 event, netdev_evt_str(event), dev, dev ? dev->name : "");
1350
1351 switch (event) {
1352 case NETDEV_UP:
1353 iface_stat_create(dev, NULL);
1354 atomic64_inc(&qtu_events.iface_events);
1355 break;
1356 case NETDEV_DOWN:
1357 case NETDEV_UNREGISTER:
1358 iface_stat_update(dev, event == NETDEV_DOWN);
1359 atomic64_inc(&qtu_events.iface_events);
1360 break;
1361 }
1362 return NOTIFY_DONE;
1363}
1364
1365static int iface_inet6addr_event_handler(struct notifier_block *nb,
1366 unsigned long event, void *ptr)
1367{
1368 struct inet6_ifaddr *ifa = ptr;
1369 struct net_device *dev;
1370
1371 if (unlikely(module_passive))
1372 return NOTIFY_DONE;
1373
1374 IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
1375 "ev=0x%lx/%s ifa=%p\n",
1376 event, netdev_evt_str(event), ifa);
1377
1378 switch (event) {
1379 case NETDEV_UP:
1380 BUG_ON(!ifa || !ifa->idev);
1381 dev = (struct net_device *)ifa->idev->dev;
1382 iface_stat_create_ipv6(dev, ifa);
1383 atomic64_inc(&qtu_events.iface_events);
1384 break;
1385 case NETDEV_DOWN:
1386 case NETDEV_UNREGISTER:
1387 BUG_ON(!ifa || !ifa->idev);
1388 dev = (struct net_device *)ifa->idev->dev;
1389 iface_stat_update(dev, event == NETDEV_DOWN);
1390 atomic64_inc(&qtu_events.iface_events);
1391 break;
1392 }
1393 return NOTIFY_DONE;
1394}
1395
1396static int iface_inetaddr_event_handler(struct notifier_block *nb,
1397 unsigned long event, void *ptr)
1398{
1399 struct in_ifaddr *ifa = ptr;
1400 struct net_device *dev;
1401
1402 if (unlikely(module_passive))
1403 return NOTIFY_DONE;
1404
1405 IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
1406 "ev=0x%lx/%s ifa=%p\n",
1407 event, netdev_evt_str(event), ifa);
1408
1409 switch (event) {
1410 case NETDEV_UP:
1411 BUG_ON(!ifa || !ifa->ifa_dev);
1412 dev = ifa->ifa_dev->dev;
1413 iface_stat_create(dev, ifa);
1414 atomic64_inc(&qtu_events.iface_events);
1415 break;
1416 case NETDEV_DOWN:
1417 case NETDEV_UNREGISTER:
1418 BUG_ON(!ifa || !ifa->ifa_dev);
1419 dev = ifa->ifa_dev->dev;
1420 iface_stat_update(dev, event == NETDEV_DOWN);
1421 atomic64_inc(&qtu_events.iface_events);
1422 break;
1423 }
1424 return NOTIFY_DONE;
1425}
1426
1427static struct notifier_block iface_netdev_notifier_blk = {
1428 .notifier_call = iface_netdev_event_handler,
1429};
1430
1431static struct notifier_block iface_inetaddr_notifier_blk = {
1432 .notifier_call = iface_inetaddr_event_handler,
1433};
1434
1435static struct notifier_block iface_inet6addr_notifier_blk = {
1436 .notifier_call = iface_inet6addr_event_handler,
1437};
1438
1439static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
1440{
1441 int err;
1442
1443 iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
1444 if (!iface_stat_procdir) {
1445 pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
1446 err = -1;
1447 goto err;
1448 }
1449
1450 iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename,
1451 proc_iface_perms,
1452 parent_procdir);
1453 if (!iface_stat_all_procfile) {
1454 pr_err("qtaguid: iface_stat: init "
1455 " failed to create stat_all proc entry\n");
1456 err = -1;
1457 goto err_zap_entry;
1458 }
1459 iface_stat_all_procfile->read_proc = iface_stat_all_proc_read;
1460
1461
1462 err = register_netdevice_notifier(&iface_netdev_notifier_blk);
1463 if (err) {
1464 pr_err("qtaguid: iface_stat: init "
1465 "failed to register dev event handler\n");
1466 goto err_zap_all_stats_entry;
1467 }
1468 err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
1469 if (err) {
1470 pr_err("qtaguid: iface_stat: init "
1471 "failed to register ipv4 dev event handler\n");
1472 goto err_unreg_nd;
1473 }
1474
1475 err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
1476 if (err) {
1477 pr_err("qtaguid: iface_stat: init "
1478 "failed to register ipv6 dev event handler\n");
1479 goto err_unreg_ip4_addr;
1480 }
1481 return 0;
1482
1483err_unreg_ip4_addr:
1484 unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
1485err_unreg_nd:
1486 unregister_netdevice_notifier(&iface_netdev_notifier_blk);
1487err_zap_all_stats_entry:
1488 remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
1489err_zap_entry:
1490 remove_proc_entry(iface_stat_procdirname, parent_procdir);
1491err:
1492 return err;
1493}
1494
1495static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
1496 struct xt_action_param *par)
1497{
1498 struct sock *sk;
1499 unsigned int hook_mask = (1 << par->hooknum);
1500
1501 MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
1502 par->hooknum, par->family);
1503
1504 /*
1505 * Let's not abuse the the xt_socket_get*_sk(), or else it will
1506 * return garbage SKs.
1507 */
1508 if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
1509 return NULL;
1510
1511 switch (par->family) {
1512 case NFPROTO_IPV6:
1513 sk = xt_socket_get6_sk(skb, par);
1514 break;
1515 case NFPROTO_IPV4:
1516 sk = xt_socket_get4_sk(skb, par);
1517 break;
1518 default:
1519 return NULL;
1520 }
1521
1522 /*
1523 * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs.
1524 * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959
1525 * Not fixed in 3.0-r3 :(
1526 */
1527 if (sk) {
1528 MT_DEBUG("qtaguid: %p->sk_proto=%u "
1529 "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
1530 if (sk->sk_state == TCP_TIME_WAIT) {
1531 xt_socket_put_sk(sk);
1532 sk = NULL;
1533 }
1534 }
1535 return sk;
1536}
1537
1538static void account_for_uid(const struct sk_buff *skb,
1539 const struct sock *alternate_sk, uid_t uid,
1540 struct xt_action_param *par)
1541{
1542 const struct net_device *el_dev;
1543
1544 if (!skb->dev) {
1545 MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
1546 el_dev = par->in ? : par->out;
1547 } else {
1548 const struct net_device *other_dev;
1549 el_dev = skb->dev;
1550 other_dev = par->in ? : par->out;
1551 if (el_dev != other_dev) {
1552 MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
1553 "par->(in/out)=%p %s\n",
1554 par->hooknum, el_dev, el_dev->name, other_dev,
1555 other_dev->name);
1556 }
1557 }
1558
1559 if (unlikely(!el_dev)) {
1560 pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
1561 } else if (unlikely(!el_dev->name)) {
1562 pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
1563 } else {
1564 MT_DEBUG("qtaguid[%d]: dev name=%s type=%d\n",
1565 par->hooknum,
1566 el_dev->name,
1567 el_dev->type);
1568
1569 if_tag_stat_update(el_dev->name, uid,
1570 skb->sk ? skb->sk : alternate_sk,
1571 par->in ? IFS_RX : IFS_TX,
1572 ip_hdr(skb)->protocol, skb->len);
1573 }
1574}
1575
1576static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
1577{
1578 const struct xt_qtaguid_match_info *info = par->matchinfo;
1579 const struct file *filp;
1580 bool got_sock = false;
1581 struct sock *sk;
1582 uid_t sock_uid;
1583 bool res;
1584
1585 if (unlikely(module_passive))
1586 return (info->match ^ info->invert) == 0;
1587
1588 MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
1589 par->hooknum, skb, par->in, par->out, par->family);
1590
1591 atomic64_inc(&qtu_events.match_calls);
1592 if (skb == NULL) {
1593 res = (info->match ^ info->invert) == 0;
1594 goto ret_res;
1595 }
1596
1597 sk = skb->sk;
1598
1599 if (sk == NULL) {
1600 /*
1601 * A missing sk->sk_socket happens when packets are in-flight
1602 * and the matching socket is already closed and gone.
1603 */
1604 sk = qtaguid_find_sk(skb, par);
1605 /*
1606 * If we got the socket from the find_sk(), we will need to put
1607 * it back, as nf_tproxy_get_sock_v4() got it.
1608 */
1609 got_sock = sk;
1610 if (sk)
1611 atomic64_inc(&qtu_events.match_found_sk_in_ct);
1612 else
1613 atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
1614 } else {
1615 atomic64_inc(&qtu_events.match_found_sk);
1616 }
1617 MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d proto=%d\n",
1618 par->hooknum, sk, got_sock, ip_hdr(skb)->protocol);
1619 if (sk != NULL) {
1620 MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
1621 par->hooknum, sk, sk->sk_socket,
1622 sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
1623 filp = sk->sk_socket ? sk->sk_socket->file : NULL;
1624 MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
1625 par->hooknum, filp ? filp->f_cred->fsuid : -1);
1626 }
1627
1628 if (sk == NULL || sk->sk_socket == NULL) {
1629 /*
1630 * Here, the qtaguid_find_sk() using connection tracking
1631 * couldn't find the owner, so for now we just count them
1632 * against the system.
1633 */
1634 /*
1635 * TODO: unhack how to force just accounting.
1636 * For now we only do iface stats when the uid-owner is not
1637 * requested.
1638 */
1639 if (!(info->match & XT_QTAGUID_UID))
1640 account_for_uid(skb, sk, 0, par);
1641 MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
1642 par->hooknum,
1643 sk ? sk->sk_socket : NULL);
1644 res = (info->match ^ info->invert) == 0;
1645 atomic64_inc(&qtu_events.match_no_sk);
1646 goto put_sock_ret_res;
1647 } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
1648 res = false;
1649 goto put_sock_ret_res;
1650 }
1651 filp = sk->sk_socket->file;
1652 if (filp == NULL) {
1653 MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
1654 account_for_uid(skb, sk, 0, par);
1655 res = ((info->match ^ info->invert) &
1656 (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
1657 atomic64_inc(&qtu_events.match_no_sk_file);
1658 goto put_sock_ret_res;
1659 }
1660 sock_uid = filp->f_cred->fsuid;
1661 /*
1662 * TODO: unhack how to force just accounting.
1663 * For now we only do iface stats when the uid-owner is not requested
1664 */
1665 if (!(info->match & XT_QTAGUID_UID))
1666 account_for_uid(skb, sk, sock_uid, par);
1667
1668 /*
1669 * The following two tests fail the match when:
1670 * id not in range AND no inverted condition requested
1671 * or id in range AND inverted condition requested
1672 * Thus (!a && b) || (a && !b) == a ^ b
1673 */
1674 if (info->match & XT_QTAGUID_UID)
1675 if ((filp->f_cred->fsuid >= info->uid_min &&
1676 filp->f_cred->fsuid <= info->uid_max) ^
1677 !(info->invert & XT_QTAGUID_UID)) {
1678 MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
1679 par->hooknum);
1680 res = false;
1681 goto put_sock_ret_res;
1682 }
1683 if (info->match & XT_QTAGUID_GID)
1684 if ((filp->f_cred->fsgid >= info->gid_min &&
1685 filp->f_cred->fsgid <= info->gid_max) ^
1686 !(info->invert & XT_QTAGUID_GID)) {
1687 MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
1688 par->hooknum);
1689 res = false;
1690 goto put_sock_ret_res;
1691 }
1692
1693 MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
1694 res = true;
1695
1696put_sock_ret_res:
1697 if (got_sock)
1698 xt_socket_put_sk(sk);
1699ret_res:
1700 MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
1701 return res;
1702}
1703
1704#ifdef DDEBUG
1705/* This function is not in xt_qtaguid_print.c because of locks visibility */
1706static void prdebug_full_state(int indent_level, const char *fmt, ...)
1707{
1708 va_list args;
1709 char *fmt_buff;
1710 char *buff;
1711
1712 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
1713 return;
1714
1715 fmt_buff = kasprintf(GFP_ATOMIC,
1716 "qtaguid: %s(): %s {\n", __func__, fmt);
1717 BUG_ON(!fmt_buff);
1718 va_start(args, fmt);
1719 buff = kvasprintf(GFP_ATOMIC,
1720 fmt_buff, args);
1721 BUG_ON(!buff);
1722 pr_debug("%s", buff);
1723 kfree(fmt_buff);
1724 kfree(buff);
1725 va_end(args);
1726
1727 spin_lock_bh(&sock_tag_list_lock);
1728 prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
1729 spin_unlock_bh(&sock_tag_list_lock);
1730
1731 spin_lock_bh(&sock_tag_list_lock);
1732 spin_lock_bh(&uid_tag_data_tree_lock);
1733 prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
1734 prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
1735 spin_unlock_bh(&uid_tag_data_tree_lock);
1736 spin_unlock_bh(&sock_tag_list_lock);
1737
1738 spin_lock_bh(&iface_stat_list_lock);
1739 prdebug_iface_stat_list(indent_level, &iface_stat_list);
1740 spin_unlock_bh(&iface_stat_list_lock);
1741
1742 pr_debug("qtaguid: %s(): }\n", __func__);
1743}
1744#else
1745static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
1746#endif
1747
1748/*
1749 * Procfs reader to get all active socket tags using style "1)" as described in
1750 * fs/proc/generic.c
1751 */
1752static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
1753 off_t items_to_skip, int char_count, int *eof,
1754 void *data)
1755{
1756 char *outp = page;
1757 int len;
1758 uid_t uid;
1759 struct rb_node *node;
1760 struct sock_tag *sock_tag_entry;
1761 int item_index = 0;
1762 int indent_level = 0;
1763 long f_count;
1764
1765 if (unlikely(module_passive)) {
1766 *eof = 1;
1767 return 0;
1768 }
1769
1770 if (*eof)
1771 return 0;
1772
1773 CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n",
1774 page, items_to_skip, char_count, *eof);
1775
1776 spin_lock_bh(&sock_tag_list_lock);
1777 for (node = rb_first(&sock_tag_tree);
1778 node;
1779 node = rb_next(node)) {
1780 if (item_index++ < items_to_skip)
1781 continue;
1782 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
1783 uid = get_uid_from_tag(sock_tag_entry->tag);
1784 CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
1785 "pid=%u\n",
1786 sock_tag_entry->sk,
1787 sock_tag_entry->tag,
1788 uid,
1789 sock_tag_entry->pid
1790 );
1791 f_count = atomic_long_read(
1792 &sock_tag_entry->socket->file->f_count);
1793 len = snprintf(outp, char_count,
1794 "sock=%p tag=0x%llx (uid=%u) pid=%u "
1795 "f_count=%lu\n",
1796 sock_tag_entry->sk,
1797 sock_tag_entry->tag, uid,
1798 sock_tag_entry->pid, f_count);
1799 if (len >= char_count) {
1800 spin_unlock_bh(&sock_tag_list_lock);
1801 *outp = '\0';
1802 return outp - page;
1803 }
1804 outp += len;
1805 char_count -= len;
1806 (*num_items_returned)++;
1807 }
1808 spin_unlock_bh(&sock_tag_list_lock);
1809
1810 if (item_index++ >= items_to_skip) {
1811 len = snprintf(outp, char_count,
1812 "events: sockets_tagged=%llu "
1813 "sockets_untagged=%llu "
1814 "counter_set_changes=%llu "
1815 "delete_cmds=%llu "
1816 "iface_events=%llu "
1817 "match_calls=%llu "
1818 "match_found_sk=%llu "
1819 "match_found_sk_in_ct=%llu "
1820 "match_found_no_sk_in_ct=%llu "
1821 "match_no_sk=%llu "
1822 "match_no_sk_file=%llu\n",
1823 atomic64_read(&qtu_events.sockets_tagged),
1824 atomic64_read(&qtu_events.sockets_untagged),
1825 atomic64_read(&qtu_events.counter_set_changes),
1826 atomic64_read(&qtu_events.delete_cmds),
1827 atomic64_read(&qtu_events.iface_events),
1828 atomic64_read(&qtu_events.match_calls),
1829 atomic64_read(&qtu_events.match_found_sk),
1830 atomic64_read(&qtu_events.match_found_sk_in_ct),
1831 atomic64_read(
1832 &qtu_events.match_found_no_sk_in_ct),
1833 atomic64_read(&qtu_events.match_no_sk),
1834 atomic64_read(&qtu_events.match_no_sk_file));
1835 if (len >= char_count) {
1836 *outp = '\0';
1837 return outp - page;
1838 }
1839 outp += len;
1840 char_count -= len;
1841 (*num_items_returned)++;
1842 }
1843
1844 /* Count the following as part of the last item_index */
1845 if (item_index > items_to_skip) {
1846 prdebug_full_state(indent_level, "proc ctrl");
1847 }
1848
1849 *eof = 1;
1850 return outp - page;
1851}
1852
1853/*
1854 * Delete socket tags, and stat tags associated with a given
1855 * accouting tag and uid.
1856 */
1857static int ctrl_cmd_delete(const char *input)
1858{
1859 char cmd;
1860 uid_t uid;
1861 uid_t entry_uid;
1862 tag_t acct_tag;
1863 tag_t tag;
1864 int res, argc;
1865 struct iface_stat *iface_entry;
1866 struct rb_node *node;
1867 struct sock_tag *st_entry;
1868 struct rb_root st_to_free_tree = RB_ROOT;
1869 struct tag_stat *ts_entry;
1870 struct tag_counter_set *tcs_entry;
1871 struct tag_ref *tr_entry;
1872 struct uid_tag_data *utd_entry;
1873
1874 argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
1875 CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
1876 "user_tag=0x%llx uid=%u\n", input, argc, cmd,
1877 acct_tag, uid);
1878 if (argc < 2) {
1879 res = -EINVAL;
1880 goto err;
1881 }
1882 if (!valid_atag(acct_tag)) {
1883 pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
1884 res = -EINVAL;
1885 goto err;
1886 }
1887 if (argc < 3) {
1888 uid = current_fsuid();
1889 } else if (!can_impersonate_uid(uid)) {
1890 pr_info("qtaguid: ctrl_delete(%s): "
1891 "insufficient priv from pid=%u tgid=%u uid=%u\n",
1892 input, current->pid, current->tgid, current_fsuid());
1893 res = -EPERM;
1894 goto err;
1895 }
1896
1897 tag = combine_atag_with_uid(acct_tag, uid);
1898 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1899 "looking for tag=0x%llx (uid=%u)\n",
1900 input, tag, uid);
1901
1902 /* Delete socket tags */
1903 spin_lock_bh(&sock_tag_list_lock);
1904 node = rb_first(&sock_tag_tree);
1905 while (node) {
1906 st_entry = rb_entry(node, struct sock_tag, sock_node);
1907 entry_uid = get_uid_from_tag(st_entry->tag);
1908 node = rb_next(node);
1909 if (entry_uid != uid)
1910 continue;
1911
1912 CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
1913 input, st_entry->tag, entry_uid);
1914
1915 if (!acct_tag || st_entry->tag == tag) {
1916 rb_erase(&st_entry->sock_node, &sock_tag_tree);
1917 /* Can't sockfd_put() within spinlock, do it later. */
1918 sock_tag_tree_insert(st_entry, &st_to_free_tree);
1919 tr_entry = lookup_tag_ref(st_entry->tag, NULL);
1920 BUG_ON(tr_entry->num_sock_tags <= 0);
1921 tr_entry->num_sock_tags--;
1922 /*
1923 * TODO: remove if, and start failing.
1924 * This is a hack to work around the fact that in some
1925 * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
1926 * and are trying to work around apps
1927 * that didn't open the /dev/xt_qtaguid.
1928 */
1929 if (st_entry->list.next && st_entry->list.prev)
1930 list_del(&st_entry->list);
1931 }
1932 }
1933 spin_unlock_bh(&sock_tag_list_lock);
1934
1935 sock_tag_tree_erase(&st_to_free_tree);
1936
1937 /* Delete tag counter-sets */
1938 spin_lock_bh(&tag_counter_set_list_lock);
1939 /* Counter sets are only on the uid tag, not full tag */
1940 tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
1941 if (tcs_entry) {
1942 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1943 "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
1944 input,
1945 tcs_entry->tn.tag,
1946 get_uid_from_tag(tcs_entry->tn.tag),
1947 tcs_entry->active_set);
1948 rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
1949 kfree(tcs_entry);
1950 }
1951 spin_unlock_bh(&tag_counter_set_list_lock);
1952
1953 /*
1954 * If acct_tag is 0, then all entries belonging to uid are
1955 * erased.
1956 */
1957 spin_lock_bh(&iface_stat_list_lock);
1958 list_for_each_entry(iface_entry, &iface_stat_list, list) {
1959 spin_lock_bh(&iface_entry->tag_stat_list_lock);
1960 node = rb_first(&iface_entry->tag_stat_tree);
1961 while (node) {
1962 ts_entry = rb_entry(node, struct tag_stat, tn.node);
1963 entry_uid = get_uid_from_tag(ts_entry->tn.tag);
1964 node = rb_next(node);
1965
1966 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1967 "ts tag=0x%llx (uid=%u)\n",
1968 input, ts_entry->tn.tag, entry_uid);
1969
1970 if (entry_uid != uid)
1971 continue;
1972 if (!acct_tag || ts_entry->tn.tag == tag) {
1973 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1974 "erase ts: %s 0x%llx %u\n",
1975 input, iface_entry->ifname,
1976 get_atag_from_tag(ts_entry->tn.tag),
1977 entry_uid);
1978 rb_erase(&ts_entry->tn.node,
1979 &iface_entry->tag_stat_tree);
1980 kfree(ts_entry);
1981 }
1982 }
1983 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
1984 }
1985 spin_unlock_bh(&iface_stat_list_lock);
1986
1987 /* Cleanup the uid_tag_data */
1988 spin_lock_bh(&uid_tag_data_tree_lock);
1989 node = rb_first(&uid_tag_data_tree);
1990 while (node) {
1991 utd_entry = rb_entry(node, struct uid_tag_data, node);
1992 entry_uid = utd_entry->uid;
1993 node = rb_next(node);
1994
1995 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1996 "utd uid=%u\n",
1997 input, entry_uid);
1998
1999 if (entry_uid != uid)
2000 continue;
2001 /*
2002 * Go over the tag_refs, and those that don't have
2003 * sock_tags using them are freed.
2004 */
2005 put_tag_ref_tree(tag, utd_entry);
2006 put_utd_entry(utd_entry);
2007 }
2008 spin_unlock_bh(&uid_tag_data_tree_lock);
2009
2010 atomic64_inc(&qtu_events.delete_cmds);
2011 res = 0;
2012
2013err:
2014 return res;
2015}
2016
2017static int ctrl_cmd_counter_set(const char *input)
2018{
2019 char cmd;
2020 uid_t uid = 0;
2021 tag_t tag;
2022 int res, argc;
2023 struct tag_counter_set *tcs;
2024 int counter_set;
2025
2026 argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
2027 CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
2028 "set=%d uid=%u\n", input, argc, cmd,
2029 counter_set, uid);
2030 if (argc != 3) {
2031 res = -EINVAL;
2032 goto err;
2033 }
2034 if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
2035 pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
2036 input);
2037 res = -EINVAL;
2038 goto err;
2039 }
2040 if (!can_manipulate_uids()) {
2041 pr_info("qtaguid: ctrl_counterset(%s): "
2042 "insufficient priv from pid=%u tgid=%u uid=%u\n",
2043 input, current->pid, current->tgid, current_fsuid());
2044 res = -EPERM;
2045 goto err;
2046 }
2047
2048 tag = make_tag_from_uid(uid);
2049 spin_lock_bh(&tag_counter_set_list_lock);
2050 tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
2051 if (!tcs) {
2052 tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
2053 if (!tcs) {
2054 spin_unlock_bh(&tag_counter_set_list_lock);
2055 pr_err("qtaguid: ctrl_counterset(%s): "
2056 "failed to alloc counter set\n",
2057 input);
2058 res = -ENOMEM;
2059 goto err;
2060 }
2061 tcs->tn.tag = tag;
2062 tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
2063 CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
2064 "(uid=%u) set=%d\n",
2065 input, tag, get_uid_from_tag(tag), counter_set);
2066 }
2067 tcs->active_set = counter_set;
2068 spin_unlock_bh(&tag_counter_set_list_lock);
2069 atomic64_inc(&qtu_events.counter_set_changes);
2070 res = 0;
2071
2072err:
2073 return res;
2074}
2075
2076static int ctrl_cmd_tag(const char *input)
2077{
2078 char cmd;
2079 int sock_fd = 0;
2080 uid_t uid = 0;
2081 tag_t acct_tag = make_atag_from_value(0);
2082 tag_t full_tag;
2083 struct socket *el_socket;
2084 int res, argc;
2085 struct sock_tag *sock_tag_entry;
2086 struct tag_ref *tag_ref_entry;
2087 struct uid_tag_data *uid_tag_data_entry;
2088 struct proc_qtu_data *pqd_entry;
2089
2090 /* Unassigned args will get defaulted later. */
2091 argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
2092 CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
2093 "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
2094 acct_tag, uid);
2095 if (argc < 2) {
2096 res = -EINVAL;
2097 goto err;
2098 }
2099 el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
2100 if (!el_socket) {
2101 pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
2102 " sock_fd=%d err=%d\n", input, sock_fd, res);
2103 goto err;
2104 }
2105 CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
2106 input, atomic_long_read(&el_socket->file->f_count),
2107 el_socket->sk);
2108 if (argc < 3) {
2109 acct_tag = make_atag_from_value(0);
2110 } else if (!valid_atag(acct_tag)) {
2111 pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
2112 res = -EINVAL;
2113 goto err_put;
2114 }
2115 CT_DEBUG("qtaguid: ctrl_tag(%s): "
2116 "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
2117 "in_group=%d in_egroup=%d\n",
2118 input, current->pid, current->tgid, current_uid(),
2119 current_euid(), current_fsuid(),
2120 in_group_p(proc_ctrl_write_gid),
2121 in_egroup_p(proc_ctrl_write_gid));
2122 if (argc < 4) {
2123 uid = current_fsuid();
2124 } else if (!can_impersonate_uid(uid)) {
2125 pr_info("qtaguid: ctrl_tag(%s): "
2126 "insufficient priv from pid=%u tgid=%u uid=%u\n",
2127 input, current->pid, current->tgid, current_fsuid());
2128 res = -EPERM;
2129 goto err_put;
2130 }
2131 full_tag = combine_atag_with_uid(acct_tag, uid);
2132
2133 spin_lock_bh(&sock_tag_list_lock);
2134 sock_tag_entry = get_sock_stat_nl(el_socket->sk);
2135 tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
2136 if (IS_ERR(tag_ref_entry)) {
2137 res = PTR_ERR(tag_ref_entry);
2138 spin_unlock_bh(&sock_tag_list_lock);
2139 goto err_put;
2140 }
2141 tag_ref_entry->num_sock_tags++;
2142 if (sock_tag_entry) {
2143 struct tag_ref *prev_tag_ref_entry;
2144
2145 CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
2146 "st@%p ...->f_count=%ld\n",
2147 input, el_socket->sk, sock_tag_entry,
2148 atomic_long_read(&el_socket->file->f_count));
2149 /*
2150 * This is a re-tagging, so release the sock_fd that was
2151 * locked at the time of the 1st tagging.
2152 * There is still the ref from this call's sockfd_lookup() so
2153 * it can be done within the spinlock.
2154 */
2155 sockfd_put(sock_tag_entry->socket);
2156 prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
2157 &uid_tag_data_entry);
2158 BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
2159 BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
2160 prev_tag_ref_entry->num_sock_tags--;
2161 sock_tag_entry->tag = full_tag;
2162 } else {
2163 CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
2164 input, el_socket->sk);
2165 sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
2166 GFP_ATOMIC);
2167 if (!sock_tag_entry) {
2168 pr_err("qtaguid: ctrl_tag(%s): "
2169 "socket tag alloc failed\n",
2170 input);
2171 spin_unlock_bh(&sock_tag_list_lock);
2172 res = -ENOMEM;
2173 goto err_tag_unref_put;
2174 }
2175 sock_tag_entry->sk = el_socket->sk;
2176 sock_tag_entry->socket = el_socket;
2177 sock_tag_entry->pid = current->tgid;
2178 sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
2179 uid);
2180 spin_lock_bh(&uid_tag_data_tree_lock);
2181 pqd_entry = proc_qtu_data_tree_search(
2182 &proc_qtu_data_tree, current->tgid);
2183 /*
2184 * TODO: remove if, and start failing.
2185 * At first, we want to catch user-space code that is not
2186 * opening the /dev/xt_qtaguid.
2187 */
2188 if (IS_ERR_OR_NULL(pqd_entry))
2189 pr_warn_once(
2190 "qtaguid: %s(): "
2191 "User space forgot to open /dev/xt_qtaguid? "
2192 "pid=%u tgid=%u uid=%u\n", __func__,
2193 current->pid, current->tgid,
2194 current_fsuid());
2195 else
2196 list_add(&sock_tag_entry->list,
2197 &pqd_entry->sock_tag_list);
2198 spin_unlock_bh(&uid_tag_data_tree_lock);
2199
2200 sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
2201 atomic64_inc(&qtu_events.sockets_tagged);
2202 }
2203 spin_unlock_bh(&sock_tag_list_lock);
2204 /* We keep the ref to the socket (file) until it is untagged */
2205 CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
2206 input, sock_tag_entry,
2207 atomic_long_read(&el_socket->file->f_count));
2208 return 0;
2209
2210err_tag_unref_put:
2211 BUG_ON(tag_ref_entry->num_sock_tags <= 0);
2212 tag_ref_entry->num_sock_tags--;
2213 free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
2214err_put:
2215 CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
2216 input, atomic_long_read(&el_socket->file->f_count) - 1);
2217 /* Release the sock_fd that was grabbed by sockfd_lookup(). */
2218 sockfd_put(el_socket);
2219 return res;
2220
2221err:
2222 CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
2223 return res;
2224}
2225
2226static int ctrl_cmd_untag(const char *input)
2227{
2228 char cmd;
2229 int sock_fd = 0;
2230 struct socket *el_socket;
2231 int res, argc;
2232 struct sock_tag *sock_tag_entry;
2233 struct tag_ref *tag_ref_entry;
2234 struct uid_tag_data *utd_entry;
2235 struct proc_qtu_data *pqd_entry;
2236
2237 argc = sscanf(input, "%c %d", &cmd, &sock_fd);
2238 CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
2239 input, argc, cmd, sock_fd);
2240 if (argc < 2) {
2241 res = -EINVAL;
2242 goto err;
2243 }
2244 el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
2245 if (!el_socket) {
2246 pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
2247 " sock_fd=%d err=%d\n", input, sock_fd, res);
2248 goto err;
2249 }
2250 CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
2251 input, atomic_long_read(&el_socket->file->f_count),
2252 el_socket->sk);
2253 spin_lock_bh(&sock_tag_list_lock);
2254 sock_tag_entry = get_sock_stat_nl(el_socket->sk);
2255 if (!sock_tag_entry) {
2256 spin_unlock_bh(&sock_tag_list_lock);
2257 res = -EINVAL;
2258 goto err_put;
2259 }
2260 /*
2261 * The socket already belongs to the current process
2262 * so it can do whatever it wants to it.
2263 */
2264 rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
2265
2266 tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
2267 BUG_ON(!tag_ref_entry);
2268 BUG_ON(tag_ref_entry->num_sock_tags <= 0);
2269 spin_lock_bh(&uid_tag_data_tree_lock);
2270 pqd_entry = proc_qtu_data_tree_search(
2271 &proc_qtu_data_tree, current->tgid);
2272 /*
2273 * TODO: remove if, and start failing.
2274 * At first, we want to catch user-space code that is not
2275 * opening the /dev/xt_qtaguid.
2276 */
2277 if (IS_ERR_OR_NULL(pqd_entry))
2278 pr_warn_once("qtaguid: %s(): "
2279 "User space forgot to open /dev/xt_qtaguid? "
2280 "pid=%u tgid=%u uid=%u\n", __func__,
2281 current->pid, current->tgid, current_fsuid());
2282 else
2283 list_del(&sock_tag_entry->list);
2284 spin_unlock_bh(&uid_tag_data_tree_lock);
2285 /*
2286 * We don't free tag_ref from the utd_entry here,
2287 * only during a cmd_delete().
2288 */
2289 tag_ref_entry->num_sock_tags--;
2290 spin_unlock_bh(&sock_tag_list_lock);
2291 /*
2292 * Release the sock_fd that was grabbed at tag time,
2293 * and once more for the sockfd_lookup() here.
2294 */
2295 sockfd_put(sock_tag_entry->socket);
2296 CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
2297 input, sock_tag_entry,
2298 atomic_long_read(&el_socket->file->f_count) - 1);
2299 sockfd_put(el_socket);
2300
2301 kfree(sock_tag_entry);
2302 atomic64_inc(&qtu_events.sockets_untagged);
2303
2304 return 0;
2305
2306err_put:
2307 CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
2308 input, atomic_long_read(&el_socket->file->f_count) - 1);
2309 /* Release the sock_fd that was grabbed by sockfd_lookup(). */
2310 sockfd_put(el_socket);
2311 return res;
2312
2313err:
2314 CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
2315 return res;
2316}
2317
2318static int qtaguid_ctrl_parse(const char *input, int count)
2319{
2320 char cmd;
2321 int res;
2322
2323 cmd = input[0];
2324 /* Collect params for commands */
2325 switch (cmd) {
2326 case 'd':
2327 res = ctrl_cmd_delete(input);
2328 break;
2329
2330 case 's':
2331 res = ctrl_cmd_counter_set(input);
2332 break;
2333
2334 case 't':
2335 res = ctrl_cmd_tag(input);
2336 break;
2337
2338 case 'u':
2339 res = ctrl_cmd_untag(input);
2340 break;
2341
2342 default:
2343 res = -EINVAL;
2344 goto err;
2345 }
2346 if (!res)
2347 res = count;
2348err:
2349 CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res);
2350 return res;
2351}
2352
2353#define MAX_QTAGUID_CTRL_INPUT_LEN 255
2354static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
2355 unsigned long count, void *data)
2356{
2357 char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
2358
2359 if (unlikely(module_passive))
2360 return count;
2361
2362 if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
2363 return -EINVAL;
2364
2365 if (copy_from_user(input_buf, buffer, count))
2366 return -EFAULT;
2367
2368 input_buf[count] = '\0';
2369 return qtaguid_ctrl_parse(input_buf, count);
2370}
2371
2372struct proc_print_info {
2373 char *outp;
2374 char **num_items_returned;
2375 struct iface_stat *iface_entry;
2376 struct tag_stat *ts_entry;
2377 int item_index;
2378 int items_to_skip;
2379 int char_count;
2380};
2381
2382static int pp_stats_line(struct proc_print_info *ppi, int cnt_set)
2383{
2384 int len;
2385 struct data_counters *cnts;
2386
2387 if (!ppi->item_index) {
2388 if (ppi->item_index++ < ppi->items_to_skip)
2389 return 0;
2390 len = snprintf(ppi->outp, ppi->char_count,
2391 "idx iface acct_tag_hex uid_tag_int cnt_set "
2392 "rx_bytes rx_packets "
2393 "tx_bytes tx_packets "
2394 "rx_tcp_bytes rx_tcp_packets "
2395 "rx_udp_bytes rx_udp_packets "
2396 "rx_other_bytes rx_other_packets "
2397 "tx_tcp_bytes tx_tcp_packets "
2398 "tx_udp_bytes tx_udp_packets "
2399 "tx_other_bytes tx_other_packets\n");
2400 } else {
2401 tag_t tag = ppi->ts_entry->tn.tag;
2402 uid_t stat_uid = get_uid_from_tag(tag);
2403
2404 if (!can_read_other_uid_stats(stat_uid)) {
2405 CT_DEBUG("qtaguid: stats line: "
2406 "%s 0x%llx %u: insufficient priv "
2407 "from pid=%u tgid=%u uid=%u\n",
2408 ppi->iface_entry->ifname,
2409 get_atag_from_tag(tag), stat_uid,
2410 current->pid, current->tgid, current_fsuid());
2411 return 0;
2412 }
2413 if (ppi->item_index++ < ppi->items_to_skip)
2414 return 0;
2415 cnts = &ppi->ts_entry->counters;
2416 len = snprintf(
2417 ppi->outp, ppi->char_count,
2418 "%d %s 0x%llx %u %u "
2419 "%llu %llu "
2420 "%llu %llu "
2421 "%llu %llu "
2422 "%llu %llu "
2423 "%llu %llu "
2424 "%llu %llu "
2425 "%llu %llu "
2426 "%llu %llu\n",
2427 ppi->item_index,
2428 ppi->iface_entry->ifname,
2429 get_atag_from_tag(tag),
2430 stat_uid,
2431 cnt_set,
2432 dc_sum_bytes(cnts, cnt_set, IFS_RX),
2433 dc_sum_packets(cnts, cnt_set, IFS_RX),
2434 dc_sum_bytes(cnts, cnt_set, IFS_TX),
2435 dc_sum_packets(cnts, cnt_set, IFS_TX),
2436 cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
2437 cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
2438 cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
2439 cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
2440 cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
2441 cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
2442 cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
2443 cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
2444 cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
2445 cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
2446 cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
2447 cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
2448 }
2449 return len;
2450}
2451
2452static bool pp_sets(struct proc_print_info *ppi)
2453{
2454 int len;
2455 int counter_set;
2456 for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
2457 counter_set++) {
2458 len = pp_stats_line(ppi, counter_set);
2459 if (len >= ppi->char_count) {
2460 *ppi->outp = '\0';
2461 return false;
2462 }
2463 if (len) {
2464 ppi->outp += len;
2465 ppi->char_count -= len;
2466 (*ppi->num_items_returned)++;
2467 }
2468 }
2469 return true;
2470}
2471
2472/*
2473 * Procfs reader to get all tag stats using style "1)" as described in
2474 * fs/proc/generic.c
2475 * Groups all protocols tx/rx bytes.
2476 */
2477static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
2478 off_t items_to_skip, int char_count, int *eof,
2479 void *data)
2480{
2481 struct proc_print_info ppi;
2482 int len;
2483
2484 ppi.outp = page;
2485 ppi.item_index = 0;
2486 ppi.char_count = char_count;
2487 ppi.num_items_returned = num_items_returned;
2488 ppi.items_to_skip = items_to_skip;
2489
2490 if (unlikely(module_passive)) {
2491 len = pp_stats_line(&ppi, 0);
2492 /* The header should always be shorter than the buffer. */
2493 BUG_ON(len >= ppi.char_count);
2494 (*num_items_returned)++;
2495 *eof = 1;
2496 return len;
2497 }
2498
2499 CT_DEBUG("qtaguid:proc stats page=%p *num_items_returned=%p off=%ld "
2500 "char_count=%d *eof=%d\n", page, *num_items_returned,
2501 items_to_skip, char_count, *eof);
2502
2503 if (*eof)
2504 return 0;
2505
2506 /* The idx is there to help debug when things go belly up. */
2507 len = pp_stats_line(&ppi, 0);
2508 /* Don't advance the outp unless the whole line was printed */
2509 if (len >= ppi.char_count) {
2510 *ppi.outp = '\0';
2511 return ppi.outp - page;
2512 }
2513 if (len) {
2514 ppi.outp += len;
2515 ppi.char_count -= len;
2516 (*num_items_returned)++;
2517 }
2518
2519 spin_lock_bh(&iface_stat_list_lock);
2520 list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) {
2521 struct rb_node *node;
2522 spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock);
2523 for (node = rb_first(&ppi.iface_entry->tag_stat_tree);
2524 node;
2525 node = rb_next(node)) {
2526 ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node);
2527 if (!pp_sets(&ppi)) {
2528 spin_unlock_bh(
2529 &ppi.iface_entry->tag_stat_list_lock);
2530 spin_unlock_bh(&iface_stat_list_lock);
2531 return ppi.outp - page;
2532 }
2533 }
2534 spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock);
2535 }
2536 spin_unlock_bh(&iface_stat_list_lock);
2537
2538 *eof = 1;
2539 return ppi.outp - page;
2540}
2541
2542/*------------------------------------------*/
2543static int qtudev_open(struct inode *inode, struct file *file)
2544{
2545 struct uid_tag_data *utd_entry;
2546 struct proc_qtu_data *pqd_entry;
2547 struct proc_qtu_data *new_pqd_entry;
2548 int res;
2549 bool utd_entry_found;
2550
2551 if (unlikely(qtu_proc_handling_passive))
2552 return 0;
2553
2554 DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
2555 current->pid, current->tgid, current_fsuid());
2556
2557 spin_lock_bh(&uid_tag_data_tree_lock);
2558
2559 /* Look for existing uid data, or alloc one. */
2560 utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
2561 if (IS_ERR_OR_NULL(utd_entry)) {
2562 res = PTR_ERR(utd_entry);
2563 goto err;
2564 }
2565
2566 /* Look for existing PID based proc_data */
2567 pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
2568 current->tgid);
2569 if (pqd_entry) {
2570 pr_err("qtaguid: qtudev_open(): %u/%u %u "
2571 "%s already opened\n",
2572 current->pid, current->tgid, current_fsuid(),
2573 QTU_DEV_NAME);
2574 res = -EBUSY;
2575 goto err_unlock_free_utd;
2576 }
2577
2578 new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
2579 if (!new_pqd_entry) {
2580 pr_err("qtaguid: qtudev_open(): %u/%u %u: "
2581 "proc data alloc failed\n",
2582 current->pid, current->tgid, current_fsuid());
2583 res = -ENOMEM;
2584 goto err_unlock_free_utd;
2585 }
2586 new_pqd_entry->pid = current->tgid;
2587 INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
2588 new_pqd_entry->parent_tag_data = utd_entry;
2589 utd_entry->num_pqd++;
2590
2591 proc_qtu_data_tree_insert(new_pqd_entry,
2592 &proc_qtu_data_tree);
2593
2594 spin_unlock_bh(&uid_tag_data_tree_lock);
2595 DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
2596 current_fsuid(), new_pqd_entry);
2597 file->private_data = new_pqd_entry;
2598 return 0;
2599
2600err_unlock_free_utd:
2601 if (!utd_entry_found) {
2602 rb_erase(&utd_entry->node, &uid_tag_data_tree);
2603 kfree(utd_entry);
2604 }
2605 spin_unlock_bh(&uid_tag_data_tree_lock);
2606err:
2607 return res;
2608}
2609
2610static int qtudev_release(struct inode *inode, struct file *file)
2611{
2612 struct proc_qtu_data *pqd_entry = file->private_data;
2613 struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data;
2614 struct sock_tag *st_entry;
2615 struct rb_root st_to_free_tree = RB_ROOT;
2616 struct list_head *entry, *next;
2617 struct tag_ref *tr;
2618
2619 if (unlikely(qtu_proc_handling_passive))
2620 return 0;
2621
2622 /*
2623 * Do not trust the current->pid, it might just be a kworker cleaning
2624 * up after a dead proc.
2625 */
2626 DR_DEBUG("qtaguid: qtudev_release(): "
2627 "pid=%u tgid=%u uid=%u "
2628 "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
2629 current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
2630 pqd_entry, pqd_entry->pid, utd_entry,
2631 utd_entry->num_active_tags);
2632
2633 spin_lock_bh(&sock_tag_list_lock);
2634 spin_lock_bh(&uid_tag_data_tree_lock);
2635
2636 list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
2637 st_entry = list_entry(entry, struct sock_tag, list);
2638 DR_DEBUG("qtaguid: %s(): "
2639 "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
2640 __func__,
2641 st_entry, st_entry->sk,
2642 current->pid, current->tgid,
2643 pqd_entry->parent_tag_data->uid);
2644
2645 utd_entry = uid_tag_data_tree_search(
2646 &uid_tag_data_tree,
2647 get_uid_from_tag(st_entry->tag));
2648 BUG_ON(IS_ERR_OR_NULL(utd_entry));
2649 DR_DEBUG("qtaguid: %s(): "
2650 "looking for tag=0x%llx in utd_entry=%p\n", __func__,
2651 st_entry->tag, utd_entry);
2652 tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
2653 st_entry->tag);
2654 BUG_ON(!tr);
2655 BUG_ON(tr->num_sock_tags <= 0);
2656 tr->num_sock_tags--;
2657 free_tag_ref_from_utd_entry(tr, utd_entry);
2658
2659 rb_erase(&st_entry->sock_node, &sock_tag_tree);
2660 list_del(&st_entry->list);
2661 /* Can't sockfd_put() within spinlock, do it later. */
2662 sock_tag_tree_insert(st_entry, &st_to_free_tree);
2663
2664 /*
2665 * Try to free the utd_entry if no other proc_qtu_data is
2666 * using it (num_pqd is 0) and it doesn't have active tags
2667 * (num_active_tags is 0).
2668 */
2669 put_utd_entry(utd_entry);
2670 }
2671
2672 rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
2673 BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
2674 pqd_entry->parent_tag_data->num_pqd--;
2675 put_utd_entry(pqd_entry->parent_tag_data);
2676 kfree(pqd_entry);
2677 file->private_data = NULL;
2678
2679 spin_unlock_bh(&uid_tag_data_tree_lock);
2680 spin_unlock_bh(&sock_tag_list_lock);
2681
2682
2683 sock_tag_tree_erase(&st_to_free_tree);
2684
2685 prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
2686 current->pid, current->tgid);
2687 return 0;
2688}
2689
2690/*------------------------------------------*/
2691static const struct file_operations qtudev_fops = {
2692 .owner = THIS_MODULE,
2693 .open = qtudev_open,
2694 .release = qtudev_release,
2695};
2696
2697static struct miscdevice qtu_device = {
2698 .minor = MISC_DYNAMIC_MINOR,
2699 .name = QTU_DEV_NAME,
2700 .fops = &qtudev_fops,
2701 /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
2702};
2703
2704/*------------------------------------------*/
2705static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
2706{
2707 int ret;
2708 *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
2709 if (!*res_procdir) {
2710 pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
2711 ret = -ENOMEM;
2712 goto no_dir;
2713 }
2714
2715 xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms,
2716 *res_procdir);
2717 if (!xt_qtaguid_ctrl_file) {
2718 pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
2719 " file\n");
2720 ret = -ENOMEM;
2721 goto no_ctrl_entry;
2722 }
2723 xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read;
2724 xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write;
2725
2726 xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms,
2727 *res_procdir);
2728 if (!xt_qtaguid_stats_file) {
2729 pr_err("qtaguid: failed to create xt_qtaguid/stats "
2730 "file\n");
2731 ret = -ENOMEM;
2732 goto no_stats_entry;
2733 }
2734 xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read;
2735 /*
2736 * TODO: add support counter hacking
2737 * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
2738 */
2739 return 0;
2740
2741no_stats_entry:
2742 remove_proc_entry("ctrl", *res_procdir);
2743no_ctrl_entry:
2744 remove_proc_entry("xt_qtaguid", NULL);
2745no_dir:
2746 return ret;
2747}
2748
2749static struct xt_match qtaguid_mt_reg __read_mostly = {
2750 /*
2751 * This module masquerades as the "owner" module so that iptables
2752 * tools can deal with it.
2753 */
2754 .name = "owner",
2755 .revision = 1,
2756 .family = NFPROTO_UNSPEC,
2757 .match = qtaguid_mt,
2758 .matchsize = sizeof(struct xt_qtaguid_match_info),
2759 .me = THIS_MODULE,
2760};
2761
2762static int __init qtaguid_mt_init(void)
2763{
2764 if (qtaguid_proc_register(&xt_qtaguid_procdir)
2765 || iface_stat_init(xt_qtaguid_procdir)
2766 || xt_register_match(&qtaguid_mt_reg)
2767 || misc_register(&qtu_device))
2768 return -1;
2769 return 0;
2770}
2771
2772/*
2773 * TODO: allow unloading of the module.
2774 * For now stats are permanent.
2775 * Kconfig forces'y/n' and never an 'm'.
2776 */
2777
2778module_init(qtaguid_mt_init);
2779MODULE_AUTHOR("jpa <jpa@google.com>");
2780MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
2781MODULE_LICENSE("GPL");
2782MODULE_ALIAS("ipt_owner");
2783MODULE_ALIAS("ip6t_owner");
2784MODULE_ALIAS("ipt_qtaguid");
2785MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 00000000000..02479d6d317
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,330 @@
1/*
2 * Kernel iptables module to track stats for packets based on user tags.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __XT_QTAGUID_INTERNAL_H__
11#define __XT_QTAGUID_INTERNAL_H__
12
13#include <linux/types.h>
14#include <linux/rbtree.h>
15#include <linux/spinlock_types.h>
16#include <linux/workqueue.h>
17
18/* Iface handling */
19#define IDEBUG_MASK (1<<0)
20/* Iptable Matching. Per packet. */
21#define MDEBUG_MASK (1<<1)
22/* Red-black tree handling. Per packet. */
23#define RDEBUG_MASK (1<<2)
24/* procfs ctrl/stats handling */
25#define CDEBUG_MASK (1<<3)
26/* dev and resource tracking */
27#define DDEBUG_MASK (1<<4)
28
29/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
30#define DEFAULT_DEBUG_MASK 0
31
32/*
33 * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
34 * All undef: text size ~ 0x3030; all def: ~ 0x4404.
35 */
36#define IDEBUG
37#define MDEBUG
38#define RDEBUG
39#define CDEBUG
40#define DDEBUG
41
42#define MSK_DEBUG(mask, ...) do { \
43 if (unlikely(qtaguid_debug_mask & (mask))) \
44 pr_debug(__VA_ARGS__); \
45 } while (0)
46#ifdef IDEBUG
47#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
48#else
49#define IF_DEBUG(...) no_printk(__VA_ARGS__)
50#endif
51#ifdef MDEBUG
52#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
53#else
54#define MT_DEBUG(...) no_printk(__VA_ARGS__)
55#endif
56#ifdef RDEBUG
57#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
58#else
59#define RB_DEBUG(...) no_printk(__VA_ARGS__)
60#endif
61#ifdef CDEBUG
62#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
63#else
64#define CT_DEBUG(...) no_printk(__VA_ARGS__)
65#endif
66#ifdef DDEBUG
67#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
68#else
69#define DR_DEBUG(...) no_printk(__VA_ARGS__)
70#endif
71
72extern uint qtaguid_debug_mask;
73
74/*---------------------------------------------------------------------------*/
75/*
76 * Tags:
77 *
78 * They represent what the data usage counters will be tracked against.
79 * By default a tag is just based on the UID.
80 * The UID is used as the base for policing, and can not be ignored.
81 * So a tag will always at least represent a UID (uid_tag).
82 *
83 * A tag can be augmented with an "accounting tag" which is associated
84 * with a UID.
85 * User space can set the acct_tag portion of the tag which is then used
86 * with sockets: all data belonging to that socket will be counted against the
87 * tag. The policing is then based on the tag's uid_tag portion,
88 * and stats are collected for the acct_tag portion separately.
89 *
90 * There could be
91 * a: {acct_tag=1, uid_tag=10003}
92 * b: {acct_tag=2, uid_tag=10003}
93 * c: {acct_tag=3, uid_tag=10003}
94 * d: {acct_tag=0, uid_tag=10003}
95 * a, b, and c represent tags associated with specific sockets.
96 * d is for the totals for that uid, including all untagged traffic.
97 * Typically d is used with policing/quota rules.
98 *
99 * We want tag_t big enough to distinguish uid_t and acct_tag.
100 * It might become a struct if needed.
101 * Nothing should be using it as an int.
102 */
103typedef uint64_t tag_t; /* Only used via accessors */
104
105#define TAG_UID_MASK 0xFFFFFFFFULL
106#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
107
108static inline int tag_compare(tag_t t1, tag_t t2)
109{
110 return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
111}
112
113static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
114{
115 return acct_tag | uid;
116}
117static inline tag_t make_tag_from_uid(uid_t uid)
118{
119 return uid;
120}
121static inline uid_t get_uid_from_tag(tag_t tag)
122{
123 return tag & TAG_UID_MASK;
124}
125static inline tag_t get_utag_from_tag(tag_t tag)
126{
127 return tag & TAG_UID_MASK;
128}
129static inline tag_t get_atag_from_tag(tag_t tag)
130{
131 return tag & TAG_ACCT_MASK;
132}
133
134static inline bool valid_atag(tag_t tag)
135{
136 return !(tag & TAG_UID_MASK);
137}
138static inline tag_t make_atag_from_value(uint32_t value)
139{
140 return (uint64_t)value << 32;
141}
142/*---------------------------------------------------------------------------*/
143
144/*
145 * Maximum number of socket tags that a UID is allowed to have active.
146 * Multiple processes belonging to the same UID contribute towards this limit.
147 * Special UIDs that can impersonate a UID also contribute (e.g. download
148 * manager, ...)
149 */
150#define DEFAULT_MAX_SOCK_TAGS 1024
151
152/*
153 * For now we only track 2 sets of counters.
154 * The default set is 0.
155 * Userspace can activate another set for a given uid being tracked.
156 */
157#define IFS_MAX_COUNTER_SETS 2
158
159enum ifs_tx_rx {
160 IFS_TX,
161 IFS_RX,
162 IFS_MAX_DIRECTIONS
163};
164
165/* For now, TCP, UDP, the rest */
166enum ifs_proto {
167 IFS_TCP,
168 IFS_UDP,
169 IFS_PROTO_OTHER,
170 IFS_MAX_PROTOS
171};
172
173struct byte_packet_counters {
174 uint64_t bytes;
175 uint64_t packets;
176};
177
178struct data_counters {
179 struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
180};
181
182/* Generic X based nodes used as a base for rb_tree ops */
183struct tag_node {
184 struct rb_node node;
185 tag_t tag;
186};
187
188struct tag_stat {
189 struct tag_node tn;
190 struct data_counters counters;
191 /*
192 * If this tag is acct_tag based, we need to count against the
193 * matching parent uid_tag.
194 */
195 struct data_counters *parent_counters;
196};
197
198struct iface_stat {
199 struct list_head list; /* in iface_stat_list */
200 char *ifname;
201 bool active;
202 /* net_dev is only valid for active iface_stat */
203 struct net_device *net_dev;
204
205 struct byte_packet_counters totals[IFS_MAX_DIRECTIONS];
206 /*
207 * We keep the last_known, because some devices reset their counters
208 * just before NETDEV_UP, while some will reset just before
209 * NETDEV_REGISTER (which is more normal).
210 * So now, if the device didn't do a NETDEV_UNREGISTER and we see
211 * its current dev stats smaller that what was previously known, we
212 * assume an UNREGISTER and just use the last_known.
213 */
214 struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
215 /* last_known is usable when last_known_valid is true */
216 bool last_known_valid;
217
218 struct proc_dir_entry *proc_ptr;
219
220 struct rb_root tag_stat_tree;
221 spinlock_t tag_stat_list_lock;
222};
223
224/* This is needed to create proc_dir_entries from atomic context. */
225struct iface_stat_work {
226 struct work_struct iface_work;
227 struct iface_stat *iface_entry;
228};
229
230/*
231 * Track tag that this socket is transferring data for, and not necessarily
232 * the uid that owns the socket.
233 * This is the tag against which tag_stat.counters will be billed.
234 * These structs need to be looked up by sock and pid.
235 */
236struct sock_tag {
237 struct rb_node sock_node;
238 struct sock *sk; /* Only used as a number, never dereferenced */
239 /* The socket is needed for sockfd_put() */
240 struct socket *socket;
241 /* Used to associate with a given pid */
242 struct list_head list; /* in proc_qtu_data.sock_tag_list */
243 pid_t pid;
244
245 tag_t tag;
246};
247
248struct qtaguid_event_counts {
249 /* Various successful events */
250 atomic64_t sockets_tagged;
251 atomic64_t sockets_untagged;
252 atomic64_t counter_set_changes;
253 atomic64_t delete_cmds;
254 atomic64_t iface_events; /* Number of NETDEV_* events handled */
255
256 atomic64_t match_calls; /* Number of times iptables called mt */
257 /*
258 * match_found_sk_*: numbers related to the netfilter matching
259 * function finding a sock for the sk_buff.
260 * Total skbs processed is sum(match_found*).
261 */
262 atomic64_t match_found_sk; /* An sk was already in the sk_buff. */
263 /* The connection tracker had or didn't have the sk. */
264 atomic64_t match_found_sk_in_ct;
265 atomic64_t match_found_no_sk_in_ct;
266 /*
267 * No sk could be found. No apparent owner. Could happen with
268 * unsolicited traffic.
269 */
270 atomic64_t match_no_sk;
271 /*
272 * The file ptr in the sk_socket wasn't there.
273 * This might happen for traffic while the socket is being closed.
274 */
275 atomic64_t match_no_sk_file;
276};
277
278/* Track the set active_set for the given tag. */
279struct tag_counter_set {
280 struct tag_node tn;
281 int active_set;
282};
283
284/*----------------------------------------------*/
285/*
286 * The qtu uid data is used to track resources that are created directly or
287 * indirectly by processes (uid tracked).
288 * It is shared by the processes with the same uid.
289 * Some of the resource will be counted to prevent further rogue allocations,
290 * some will need freeing once the owner process (uid) exits.
291 */
292struct uid_tag_data {
293 struct rb_node node;
294 uid_t uid;
295
296 /*
297 * For the uid, how many accounting tags have been set.
298 */
299 int num_active_tags;
300 /* Track the number of proc_qtu_data that reference it */
301 int num_pqd;
302 struct rb_root tag_ref_tree;
303 /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
304};
305
306struct tag_ref {
307 struct tag_node tn;
308
309 /*
310 * This tracks the number of active sockets that have a tag on them
311 * which matches this tag_ref.tn.tag.
312 * A tag ref can live on after the sockets are untagged.
313 * A tag ref can only be removed during a tag delete command.
314 */
315 int num_sock_tags;
316};
317
318struct proc_qtu_data {
319 struct rb_node node;
320 pid_t pid;
321
322 struct uid_tag_data *parent_tag_data;
323
324 /* Tracks the sock_tags that need freeing upon this proc's death */
325 struct list_head sock_tag_list;
326 /* No spinlock_t sock_tag_list_lock; use the global one. */
327};
328
329/*----------------------------------------------*/
330#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 00000000000..39176785c91
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,556 @@
1/*
2 * Pretty printing Support for iptables xt_qtaguid module.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * Most of the functions in this file just waste time if DEBUG is not defined.
13 * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
14 * debug flags ore not defined.
15 * Those funcs that fail to allocate memory will panic as there is no need to
16 * hobble allong just pretending to do the requested work.
17 */
18
19#define DEBUG
20
21#include <linux/fs.h>
22#include <linux/gfp.h>
23#include <linux/net.h>
24#include <linux/rbtree.h>
25#include <linux/slab.h>
26#include <linux/spinlock_types.h>
27
28
29#include "xt_qtaguid_internal.h"
30#include "xt_qtaguid_print.h"
31
32#ifdef DDEBUG
33
34static void _bug_on_err_or_null(void *ptr)
35{
36 if (IS_ERR_OR_NULL(ptr)) {
37 pr_err("qtaguid: kmalloc failed\n");
38 BUG();
39 }
40}
41
42char *pp_tag_t(tag_t *tag)
43{
44 char *res;
45
46 if (!tag)
47 res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
48 else
49 res = kasprintf(GFP_ATOMIC,
50 "tag_t@%p{tag=0x%llx, uid=%u}",
51 tag, *tag, get_uid_from_tag(*tag));
52 _bug_on_err_or_null(res);
53 return res;
54}
55
56char *pp_data_counters(struct data_counters *dc, bool showValues)
57{
58 char *res;
59
60 if (!dc)
61 res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
62 else if (showValues)
63 res = kasprintf(
64 GFP_ATOMIC, "data_counters@%p{"
65 "set0{"
66 "rx{"
67 "tcp{b=%llu, p=%llu}, "
68 "udp{b=%llu, p=%llu},"
69 "other{b=%llu, p=%llu}}, "
70 "tx{"
71 "tcp{b=%llu, p=%llu}, "
72 "udp{b=%llu, p=%llu},"
73 "other{b=%llu, p=%llu}}}, "
74 "set1{"
75 "rx{"
76 "tcp{b=%llu, p=%llu}, "
77 "udp{b=%llu, p=%llu},"
78 "other{b=%llu, p=%llu}}, "
79 "tx{"
80 "tcp{b=%llu, p=%llu}, "
81 "udp{b=%llu, p=%llu},"
82 "other{b=%llu, p=%llu}}}}",
83 dc,
84 dc->bpc[0][IFS_RX][IFS_TCP].bytes,
85 dc->bpc[0][IFS_RX][IFS_TCP].packets,
86 dc->bpc[0][IFS_RX][IFS_UDP].bytes,
87 dc->bpc[0][IFS_RX][IFS_UDP].packets,
88 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
89 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
90 dc->bpc[0][IFS_TX][IFS_TCP].bytes,
91 dc->bpc[0][IFS_TX][IFS_TCP].packets,
92 dc->bpc[0][IFS_TX][IFS_UDP].bytes,
93 dc->bpc[0][IFS_TX][IFS_UDP].packets,
94 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
95 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
96 dc->bpc[1][IFS_RX][IFS_TCP].bytes,
97 dc->bpc[1][IFS_RX][IFS_TCP].packets,
98 dc->bpc[1][IFS_RX][IFS_UDP].bytes,
99 dc->bpc[1][IFS_RX][IFS_UDP].packets,
100 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
101 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
102 dc->bpc[1][IFS_TX][IFS_TCP].bytes,
103 dc->bpc[1][IFS_TX][IFS_TCP].packets,
104 dc->bpc[1][IFS_TX][IFS_UDP].bytes,
105 dc->bpc[1][IFS_TX][IFS_UDP].packets,
106 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
107 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
108 else
109 res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
110 _bug_on_err_or_null(res);
111 return res;
112}
113
114char *pp_tag_node(struct tag_node *tn)
115{
116 char *tag_str;
117 char *res;
118
119 if (!tn) {
120 res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
121 _bug_on_err_or_null(res);
122 return res;
123 }
124 tag_str = pp_tag_t(&tn->tag);
125 res = kasprintf(GFP_ATOMIC,
126 "tag_node@%p{tag=%s}",
127 tn, tag_str);
128 _bug_on_err_or_null(res);
129 kfree(tag_str);
130 return res;
131}
132
133char *pp_tag_ref(struct tag_ref *tr)
134{
135 char *tn_str;
136 char *res;
137
138 if (!tr) {
139 res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
140 _bug_on_err_or_null(res);
141 return res;
142 }
143 tn_str = pp_tag_node(&tr->tn);
144 res = kasprintf(GFP_ATOMIC,
145 "tag_ref@%p{%s, num_sock_tags=%d}",
146 tr, tn_str, tr->num_sock_tags);
147 _bug_on_err_or_null(res);
148 kfree(tn_str);
149 return res;
150}
151
152char *pp_tag_stat(struct tag_stat *ts)
153{
154 char *tn_str;
155 char *counters_str;
156 char *parent_counters_str;
157 char *res;
158
159 if (!ts) {
160 res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
161 _bug_on_err_or_null(res);
162 return res;
163 }
164 tn_str = pp_tag_node(&ts->tn);
165 counters_str = pp_data_counters(&ts->counters, true);
166 parent_counters_str = pp_data_counters(ts->parent_counters, false);
167 res = kasprintf(GFP_ATOMIC,
168 "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
169 ts, tn_str, counters_str, parent_counters_str);
170 _bug_on_err_or_null(res);
171 kfree(tn_str);
172 kfree(counters_str);
173 kfree(parent_counters_str);
174 return res;
175}
176
177char *pp_iface_stat(struct iface_stat *is)
178{
179 char *res;
180 if (!is)
181 res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
182 else
183 res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
184 "list=list_head{...}, "
185 "ifname=%s, "
186 "total={rx={bytes=%llu, "
187 "packets=%llu}, "
188 "tx={bytes=%llu, "
189 "packets=%llu}}, "
190 "last_known_valid=%d, "
191 "last_known={rx={bytes=%llu, "
192 "packets=%llu}, "
193 "tx={bytes=%llu, "
194 "packets=%llu}}, "
195 "active=%d, "
196 "net_dev=%p, "
197 "proc_ptr=%p, "
198 "tag_stat_tree=rb_root{...}}",
199 is,
200 is->ifname,
201 is->totals[IFS_RX].bytes,
202 is->totals[IFS_RX].packets,
203 is->totals[IFS_TX].bytes,
204 is->totals[IFS_TX].packets,
205 is->last_known_valid,
206 is->last_known[IFS_RX].bytes,
207 is->last_known[IFS_RX].packets,
208 is->last_known[IFS_TX].bytes,
209 is->last_known[IFS_TX].packets,
210 is->active,
211 is->net_dev,
212 is->proc_ptr);
213 _bug_on_err_or_null(res);
214 return res;
215}
216
217char *pp_sock_tag(struct sock_tag *st)
218{
219 char *tag_str;
220 char *res;
221
222 if (!st) {
223 res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
224 _bug_on_err_or_null(res);
225 return res;
226 }
227 tag_str = pp_tag_t(&st->tag);
228 res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
229 "sock_node=rb_node{...}, "
230 "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
231 "pid=%u, tag=%s}",
232 st, st->sk, st->socket, atomic_long_read(
233 &st->socket->file->f_count),
234 st->pid, tag_str);
235 _bug_on_err_or_null(res);
236 kfree(tag_str);
237 return res;
238}
239
240char *pp_uid_tag_data(struct uid_tag_data *utd)
241{
242 char *res;
243
244 if (!utd)
245 res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
246 else
247 res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
248 "uid=%u, num_active_acct_tags=%d, "
249 "num_pqd=%d, "
250 "tag_node_tree=rb_root{...}, "
251 "proc_qtu_data_tree=rb_root{...}}",
252 utd, utd->uid,
253 utd->num_active_tags, utd->num_pqd);
254 _bug_on_err_or_null(res);
255 return res;
256}
257
258char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
259{
260 char *parent_tag_data_str;
261 char *res;
262
263 if (!pqd) {
264 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
265 _bug_on_err_or_null(res);
266 return res;
267 }
268 parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
269 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
270 "node=rb_node{...}, pid=%u, "
271 "parent_tag_data=%s, "
272 "sock_tag_list=list_head{...}}",
273 pqd, pqd->pid, parent_tag_data_str
274 );
275 _bug_on_err_or_null(res);
276 kfree(parent_tag_data_str);
277 return res;
278}
279
280/*------------------------------------------*/
281void prdebug_sock_tag_tree(int indent_level,
282 struct rb_root *sock_tag_tree)
283{
284 struct rb_node *node;
285 struct sock_tag *sock_tag_entry;
286 char *str;
287
288 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
289 return;
290
291 if (RB_EMPTY_ROOT(sock_tag_tree)) {
292 str = "sock_tag_tree=rb_root{}";
293 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
294 return;
295 }
296
297 str = "sock_tag_tree=rb_root{";
298 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
299 indent_level++;
300 for (node = rb_first(sock_tag_tree);
301 node;
302 node = rb_next(node)) {
303 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
304 str = pp_sock_tag(sock_tag_entry);
305 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
306 kfree(str);
307 }
308 indent_level--;
309 str = "}";
310 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
311}
312
313void prdebug_sock_tag_list(int indent_level,
314 struct list_head *sock_tag_list)
315{
316 struct sock_tag *sock_tag_entry;
317 char *str;
318
319 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
320 return;
321
322 if (list_empty(sock_tag_list)) {
323 str = "sock_tag_list=list_head{}";
324 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
325 return;
326 }
327
328 str = "sock_tag_list=list_head{";
329 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
330 indent_level++;
331 list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
332 str = pp_sock_tag(sock_tag_entry);
333 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
334 kfree(str);
335 }
336 indent_level--;
337 str = "}";
338 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
339}
340
341void prdebug_proc_qtu_data_tree(int indent_level,
342 struct rb_root *proc_qtu_data_tree)
343{
344 char *str;
345 struct rb_node *node;
346 struct proc_qtu_data *proc_qtu_data_entry;
347
348 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
349 return;
350
351 if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
352 str = "proc_qtu_data_tree=rb_root{}";
353 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
354 return;
355 }
356
357 str = "proc_qtu_data_tree=rb_root{";
358 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
359 indent_level++;
360 for (node = rb_first(proc_qtu_data_tree);
361 node;
362 node = rb_next(node)) {
363 proc_qtu_data_entry = rb_entry(node,
364 struct proc_qtu_data,
365 node);
366 str = pp_proc_qtu_data(proc_qtu_data_entry);
367 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
368 str);
369 kfree(str);
370 indent_level++;
371 prdebug_sock_tag_list(indent_level,
372 &proc_qtu_data_entry->sock_tag_list);
373 indent_level--;
374
375 }
376 indent_level--;
377 str = "}";
378 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
379}
380
381void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
382{
383 char *str;
384 struct rb_node *node;
385 struct tag_ref *tag_ref_entry;
386
387 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
388 return;
389
390 if (RB_EMPTY_ROOT(tag_ref_tree)) {
391 str = "tag_ref_tree{}";
392 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
393 return;
394 }
395
396 str = "tag_ref_tree{";
397 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
398 indent_level++;
399 for (node = rb_first(tag_ref_tree);
400 node;
401 node = rb_next(node)) {
402 tag_ref_entry = rb_entry(node,
403 struct tag_ref,
404 tn.node);
405 str = pp_tag_ref(tag_ref_entry);
406 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
407 str);
408 kfree(str);
409 }
410 indent_level--;
411 str = "}";
412 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
413}
414
415void prdebug_uid_tag_data_tree(int indent_level,
416 struct rb_root *uid_tag_data_tree)
417{
418 char *str;
419 struct rb_node *node;
420 struct uid_tag_data *uid_tag_data_entry;
421
422 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
423 return;
424
425 if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
426 str = "uid_tag_data_tree=rb_root{}";
427 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
428 return;
429 }
430
431 str = "uid_tag_data_tree=rb_root{";
432 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
433 indent_level++;
434 for (node = rb_first(uid_tag_data_tree);
435 node;
436 node = rb_next(node)) {
437 uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
438 node);
439 str = pp_uid_tag_data(uid_tag_data_entry);
440 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
441 kfree(str);
442 if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
443 indent_level++;
444 prdebug_tag_ref_tree(indent_level,
445 &uid_tag_data_entry->tag_ref_tree);
446 indent_level--;
447 }
448 }
449 indent_level--;
450 str = "}";
451 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
452}
453
454void prdebug_tag_stat_tree(int indent_level,
455 struct rb_root *tag_stat_tree)
456{
457 char *str;
458 struct rb_node *node;
459 struct tag_stat *ts_entry;
460
461 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
462 return;
463
464 if (RB_EMPTY_ROOT(tag_stat_tree)) {
465 str = "tag_stat_tree{}";
466 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
467 return;
468 }
469
470 str = "tag_stat_tree{";
471 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
472 indent_level++;
473 for (node = rb_first(tag_stat_tree);
474 node;
475 node = rb_next(node)) {
476 ts_entry = rb_entry(node, struct tag_stat, tn.node);
477 str = pp_tag_stat(ts_entry);
478 pr_debug("%*d: %s\n", indent_level*2, indent_level,
479 str);
480 kfree(str);
481 }
482 indent_level--;
483 str = "}";
484 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
485}
486
487void prdebug_iface_stat_list(int indent_level,
488 struct list_head *iface_stat_list)
489{
490 char *str;
491 struct iface_stat *iface_entry;
492
493 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
494 return;
495
496 if (list_empty(iface_stat_list)) {
497 str = "iface_stat_list=list_head{}";
498 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
499 return;
500 }
501
502 str = "iface_stat_list=list_head{";
503 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
504 indent_level++;
505 list_for_each_entry(iface_entry, iface_stat_list, list) {
506 str = pp_iface_stat(iface_entry);
507 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
508 kfree(str);
509
510 spin_lock_bh(&iface_entry->tag_stat_list_lock);
511 if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
512 indent_level++;
513 prdebug_tag_stat_tree(indent_level,
514 &iface_entry->tag_stat_tree);
515 indent_level--;
516 }
517 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
518 }
519 indent_level--;
520 str = "}";
521 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
522}
523
524#endif /* ifdef DDEBUG */
525/*------------------------------------------*/
526static const char * const netdev_event_strings[] = {
527 "netdev_unknown",
528 "NETDEV_UP",
529 "NETDEV_DOWN",
530 "NETDEV_REBOOT",
531 "NETDEV_CHANGE",
532 "NETDEV_REGISTER",
533 "NETDEV_UNREGISTER",
534 "NETDEV_CHANGEMTU",
535 "NETDEV_CHANGEADDR",
536 "NETDEV_GOING_DOWN",
537 "NETDEV_CHANGENAME",
538 "NETDEV_FEAT_CHANGE",
539 "NETDEV_BONDING_FAILOVER",
540 "NETDEV_PRE_UP",
541 "NETDEV_PRE_TYPE_CHANGE",
542 "NETDEV_POST_TYPE_CHANGE",
543 "NETDEV_POST_INIT",
544 "NETDEV_UNREGISTER_BATCH",
545 "NETDEV_RELEASE",
546 "NETDEV_NOTIFY_PEERS",
547 "NETDEV_JOIN",
548};
549
550const char *netdev_evt_str(int netdev_event)
551{
552 if (netdev_event < 0
553 || netdev_event >= ARRAY_SIZE(netdev_event_strings))
554 return "bad event num";
555 return netdev_event_strings[netdev_event];
556}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 00000000000..b63871a0be5
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
1/*
2 * Pretty printing Support for iptables xt_qtaguid module.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __XT_QTAGUID_PRINT_H__
11#define __XT_QTAGUID_PRINT_H__
12
13#include "xt_qtaguid_internal.h"
14
15#ifdef DDEBUG
16
17char *pp_tag_t(tag_t *tag);
18char *pp_data_counters(struct data_counters *dc, bool showValues);
19char *pp_tag_node(struct tag_node *tn);
20char *pp_tag_ref(struct tag_ref *tr);
21char *pp_tag_stat(struct tag_stat *ts);
22char *pp_iface_stat(struct iface_stat *is);
23char *pp_sock_tag(struct sock_tag *st);
24char *pp_uid_tag_data(struct uid_tag_data *qtd);
25char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
26
27/*------------------------------------------*/
28void prdebug_sock_tag_list(int indent_level,
29 struct list_head *sock_tag_list);
30void prdebug_sock_tag_tree(int indent_level,
31 struct rb_root *sock_tag_tree);
32void prdebug_proc_qtu_data_tree(int indent_level,
33 struct rb_root *proc_qtu_data_tree);
34void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
35void prdebug_uid_tag_data_tree(int indent_level,
36 struct rb_root *uid_tag_data_tree);
37void prdebug_tag_stat_tree(int indent_level,
38 struct rb_root *tag_stat_tree);
39void prdebug_iface_stat_list(int indent_level,
40 struct list_head *iface_stat_list);
41
42#else
43
44/*------------------------------------------*/
45static inline char *pp_tag_t(tag_t *tag)
46{
47 return NULL;
48}
49static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
50{
51 return NULL;
52}
53static inline char *pp_tag_node(struct tag_node *tn)
54{
55 return NULL;
56}
57static inline char *pp_tag_ref(struct tag_ref *tr)
58{
59 return NULL;
60}
61static inline char *pp_tag_stat(struct tag_stat *ts)
62{
63 return NULL;
64}
65static inline char *pp_iface_stat(struct iface_stat *is)
66{
67 return NULL;
68}
69static inline char *pp_sock_tag(struct sock_tag *st)
70{
71 return NULL;
72}
73static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
74{
75 return NULL;
76}
77static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
78{
79 return NULL;
80}
81
82/*------------------------------------------*/
83static inline
84void prdebug_sock_tag_list(int indent_level,
85 struct list_head *sock_tag_list)
86{
87}
88static inline
89void prdebug_sock_tag_tree(int indent_level,
90 struct rb_root *sock_tag_tree)
91{
92}
93static inline
94void prdebug_proc_qtu_data_tree(int indent_level,
95 struct rb_root *proc_qtu_data_tree)
96{
97}
98static inline
99void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
100{
101}
102static inline
103void prdebug_uid_tag_data_tree(int indent_level,
104 struct rb_root *uid_tag_data_tree)
105{
106}
107static inline
108void prdebug_tag_stat_tree(int indent_level,
109 struct rb_root *tag_stat_tree)
110{
111}
112static inline
113void prdebug_iface_stat_list(int indent_level,
114 struct list_head *iface_stat_list)
115{
116}
117#endif
118/*------------------------------------------*/
119const char *netdev_evt_str(int netdev_event);
120#endif /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 00000000000..3c72bea2dd6
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,381 @@
1/*
2 * xt_quota2 - enhanced xt_quota that can count upwards and in packets
3 * as a minimal accounting match.
4 * by Jan Engelhardt <jengelh@medozas.de>, 2008
5 *
6 * Originally based on xt_quota.c:
7 * netfilter module to enforce network quotas
8 * Sam Johnston <samj@samj.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License; either
12 * version 2 of the License, as published by the Free Software Foundation.
13 */
14#include <linux/list.h>
15#include <linux/proc_fs.h>
16#include <linux/skbuff.h>
17#include <linux/spinlock.h>
18#include <asm/atomic.h>
19
20#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter/xt_quota2.h>
22#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
23#include <linux/netfilter_ipv4/ipt_ULOG.h>
24#endif
25
26/**
27 * @lock: lock to protect quota writers from each other
28 */
29struct xt_quota_counter {
30 u_int64_t quota;
31 spinlock_t lock;
32 struct list_head list;
33 atomic_t ref;
34 char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
35 struct proc_dir_entry *procfs_entry;
36};
37
38#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
39/* Harald's favorite number +1 :D From ipt_ULOG.C */
40static int qlog_nl_event = 112;
41module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(event_num,
43 "Event number for NETLINK_NFLOG message. 0 disables log."
44 "111 is what ipt_ULOG uses.");
45static struct sock *nflognl;
46#endif
47
48static LIST_HEAD(counter_list);
49static DEFINE_SPINLOCK(counter_list_lock);
50
51static struct proc_dir_entry *proc_xt_quota;
52static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
53static unsigned int quota_list_uid = 0;
54static unsigned int quota_list_gid = 0;
55module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
56module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
57module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
58
59
60#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
61static void quota2_log(unsigned int hooknum,
62 const struct sk_buff *skb,
63 const struct net_device *in,
64 const struct net_device *out,
65 const char *prefix)
66{
67 ulog_packet_msg_t *pm;
68 struct sk_buff *log_skb;
69 size_t size;
70 struct nlmsghdr *nlh;
71
72 if (!qlog_nl_event)
73 return;
74
75 size = NLMSG_SPACE(sizeof(*pm));
76 size = max(size, (size_t)NLMSG_GOODSIZE);
77 log_skb = alloc_skb(size, GFP_ATOMIC);
78 if (!log_skb) {
79 pr_err("xt_quota2: cannot alloc skb for logging\n");
80 return;
81 }
82
83 /* NLMSG_PUT() uses "goto nlmsg_failure" */
84 nlh = NLMSG_PUT(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
85 sizeof(*pm));
86 pm = NLMSG_DATA(nlh);
87 if (skb->tstamp.tv64 == 0)
88 __net_timestamp((struct sk_buff *)skb);
89 pm->data_len = 0;
90 pm->hook = hooknum;
91 if (prefix != NULL)
92 strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
93 else
94 *(pm->prefix) = '\0';
95 if (in)
96 strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
97 else
98 pm->indev_name[0] = '\0';
99
100 if (out)
101 strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
102 else
103 pm->outdev_name[0] = '\0';
104
105 NETLINK_CB(log_skb).dst_group = 1;
106 pr_debug("throwing 1 packets to netlink group 1\n");
107 netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
108
109nlmsg_failure: /* Used within NLMSG_PUT() */
110 pr_debug("xt_quota2: error during NLMSG_PUT\n");
111}
112#else
113static void quota2_log(unsigned int hooknum,
114 const struct sk_buff *skb,
115 const struct net_device *in,
116 const struct net_device *out,
117 const char *prefix)
118{
119}
120#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
121
122static int quota_proc_read(char *page, char **start, off_t offset,
123 int count, int *eof, void *data)
124{
125 struct xt_quota_counter *e = data;
126 int ret;
127
128 spin_lock_bh(&e->lock);
129 ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota);
130 spin_unlock_bh(&e->lock);
131 return ret;
132}
133
134static int quota_proc_write(struct file *file, const char __user *input,
135 unsigned long size, void *data)
136{
137 struct xt_quota_counter *e = data;
138 char buf[sizeof("18446744073709551616")];
139
140 if (size > sizeof(buf))
141 size = sizeof(buf);
142 if (copy_from_user(buf, input, size) != 0)
143 return -EFAULT;
144 buf[sizeof(buf)-1] = '\0';
145
146 spin_lock_bh(&e->lock);
147 e->quota = simple_strtoull(buf, NULL, 0);
148 spin_unlock_bh(&e->lock);
149 return size;
150}
151
152static struct xt_quota_counter *
153q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
154{
155 struct xt_quota_counter *e;
156 unsigned int size;
157
158 /* Do not need all the procfs things for anonymous counters. */
159 size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
160 e = kmalloc(size, GFP_KERNEL);
161 if (e == NULL)
162 return NULL;
163
164 e->quota = q->quota;
165 spin_lock_init(&e->lock);
166 if (!anon) {
167 INIT_LIST_HEAD(&e->list);
168 atomic_set(&e->ref, 1);
169 strlcpy(e->name, q->name, sizeof(e->name));
170 }
171 return e;
172}
173
174/**
175 * q2_get_counter - get ref to counter or create new
176 * @name: name of counter
177 */
178static struct xt_quota_counter *
179q2_get_counter(const struct xt_quota_mtinfo2 *q)
180{
181 struct proc_dir_entry *p;
182 struct xt_quota_counter *e = NULL;
183 struct xt_quota_counter *new_e;
184
185 if (*q->name == '\0')
186 return q2_new_counter(q, true);
187
188 /* No need to hold a lock while getting a new counter */
189 new_e = q2_new_counter(q, false);
190 if (new_e == NULL)
191 goto out;
192
193 spin_lock_bh(&counter_list_lock);
194 list_for_each_entry(e, &counter_list, list)
195 if (strcmp(e->name, q->name) == 0) {
196 atomic_inc(&e->ref);
197 spin_unlock_bh(&counter_list_lock);
198 kfree(new_e);
199 pr_debug("xt_quota2: old counter name=%s", e->name);
200 return e;
201 }
202 e = new_e;
203 pr_debug("xt_quota2: new_counter name=%s", e->name);
204 list_add_tail(&e->list, &counter_list);
205 /* The entry having a refcount of 1 is not directly destructible.
206 * This func has not yet returned the new entry, thus iptables
207 * has not references for destroying this entry.
208 * For another rule to try to destroy it, it would 1st need for this
209 * func* to be re-invoked, acquire a new ref for the same named quota.
210 * Nobody will access the e->procfs_entry either.
211 * So release the lock. */
212 spin_unlock_bh(&counter_list_lock);
213
214 /* create_proc_entry() is not spin_lock happy */
215 p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms,
216 proc_xt_quota);
217
218 if (IS_ERR_OR_NULL(p)) {
219 spin_lock_bh(&counter_list_lock);
220 list_del(&e->list);
221 spin_unlock_bh(&counter_list_lock);
222 goto out;
223 }
224 p->data = e;
225 p->read_proc = quota_proc_read;
226 p->write_proc = quota_proc_write;
227 p->uid = quota_list_uid;
228 p->gid = quota_list_gid;
229 return e;
230
231 out:
232 kfree(e);
233 return NULL;
234}
235
236static int quota_mt2_check(const struct xt_mtchk_param *par)
237{
238 struct xt_quota_mtinfo2 *q = par->matchinfo;
239
240 pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
241
242 if (q->flags & ~XT_QUOTA_MASK)
243 return -EINVAL;
244
245 q->name[sizeof(q->name)-1] = '\0';
246 if (*q->name == '.' || strchr(q->name, '/') != NULL) {
247 printk(KERN_ERR "xt_quota.3: illegal name\n");
248 return -EINVAL;
249 }
250
251 q->master = q2_get_counter(q);
252 if (q->master == NULL) {
253 printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
254 return -ENOMEM;
255 }
256
257 return 0;
258}
259
260static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
261{
262 struct xt_quota_mtinfo2 *q = par->matchinfo;
263 struct xt_quota_counter *e = q->master;
264
265 if (*q->name == '\0') {
266 kfree(e);
267 return;
268 }
269
270 spin_lock_bh(&counter_list_lock);
271 if (!atomic_dec_and_test(&e->ref)) {
272 spin_unlock_bh(&counter_list_lock);
273 return;
274 }
275
276 list_del(&e->list);
277 remove_proc_entry(e->name, proc_xt_quota);
278 spin_unlock_bh(&counter_list_lock);
279 kfree(e);
280}
281
282static bool
283quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
284{
285 struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
286 struct xt_quota_counter *e = q->master;
287 bool ret = q->flags & XT_QUOTA_INVERT;
288
289 spin_lock_bh(&e->lock);
290 if (q->flags & XT_QUOTA_GROW) {
291 /*
292 * While no_change is pointless in "grow" mode, we will
293 * implement it here simply to have a consistent behavior.
294 */
295 if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
296 e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
297 }
298 ret = true;
299 } else {
300 if (e->quota >= skb->len) {
301 if (!(q->flags & XT_QUOTA_NO_CHANGE))
302 e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
303 ret = !ret;
304 } else {
305 /* We are transitioning, log that fact. */
306 if (e->quota) {
307 quota2_log(par->hooknum,
308 skb,
309 par->in,
310 par->out,
311 q->name);
312 }
313 /* we do not allow even small packets from now on */
314 e->quota = 0;
315 }
316 }
317 spin_unlock_bh(&e->lock);
318 return ret;
319}
320
321static struct xt_match quota_mt2_reg[] __read_mostly = {
322 {
323 .name = "quota2",
324 .revision = 3,
325 .family = NFPROTO_IPV4,
326 .checkentry = quota_mt2_check,
327 .match = quota_mt2,
328 .destroy = quota_mt2_destroy,
329 .matchsize = sizeof(struct xt_quota_mtinfo2),
330 .me = THIS_MODULE,
331 },
332 {
333 .name = "quota2",
334 .revision = 3,
335 .family = NFPROTO_IPV6,
336 .checkentry = quota_mt2_check,
337 .match = quota_mt2,
338 .destroy = quota_mt2_destroy,
339 .matchsize = sizeof(struct xt_quota_mtinfo2),
340 .me = THIS_MODULE,
341 },
342};
343
344static int __init quota_mt2_init(void)
345{
346 int ret;
347 pr_debug("xt_quota2: init()");
348
349#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
350 nflognl = netlink_kernel_create(&init_net,
351 NETLINK_NFLOG, 1, NULL,
352 NULL, THIS_MODULE);
353 if (!nflognl)
354 return -ENOMEM;
355#endif
356
357 proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
358 if (proc_xt_quota == NULL)
359 return -EACCES;
360
361 ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
362 if (ret < 0)
363 remove_proc_entry("xt_quota", init_net.proc_net);
364 pr_debug("xt_quota2: init() %d", ret);
365 return ret;
366}
367
368static void __exit quota_mt2_exit(void)
369{
370 xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
371 remove_proc_entry("xt_quota", init_net.proc_net);
372}
373
374module_init(quota_mt2_init);
375module_exit(quota_mt2_exit);
376MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
377MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
378MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
379MODULE_LICENSE("GPL");
380MODULE_ALIAS("ipt_quota2");
381MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index fe39f7e913d..ddf5e0507f5 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,7 +35,7 @@
35#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack.h>
36#endif 36#endif
37 37
38static void 38void
39xt_socket_put_sk(struct sock *sk) 39xt_socket_put_sk(struct sock *sk)
40{ 40{
41 if (sk->sk_state == TCP_TIME_WAIT) 41 if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk)
43 else 43 else
44 sock_put(sk); 44 sock_put(sk);
45} 45}
46EXPORT_SYMBOL(xt_socket_put_sk);
46 47
47static int 48static int
48extract_icmp4_fields(const struct sk_buff *skb, 49extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb,
101 return 0; 102 return 0;
102} 103}
103 104
104static bool 105struct sock*
105socket_match(const struct sk_buff *skb, struct xt_action_param *par, 106xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
106 const struct xt_socket_mtinfo1 *info)
107{ 107{
108 const struct iphdr *iph = ip_hdr(skb); 108 const struct iphdr *iph = ip_hdr(skb);
109 struct udphdr _hdr, *hp = NULL; 109 struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
120 hp = skb_header_pointer(skb, ip_hdrlen(skb), 120 hp = skb_header_pointer(skb, ip_hdrlen(skb),
121 sizeof(_hdr), &_hdr); 121 sizeof(_hdr), &_hdr);
122 if (hp == NULL) 122 if (hp == NULL)
123 return false; 123 return NULL;
124 124
125 protocol = iph->protocol; 125 protocol = iph->protocol;
126 saddr = iph->saddr; 126 saddr = iph->saddr;
@@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
131 } else if (iph->protocol == IPPROTO_ICMP) { 131 } else if (iph->protocol == IPPROTO_ICMP) {
132 if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr, 132 if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
133 &sport, &dport)) 133 &sport, &dport))
134 return false; 134 return NULL;
135 } else { 135 } else {
136 return false; 136 return NULL;
137 } 137 }
138 138
139#ifdef XT_SOCKET_HAVE_CONNTRACK 139#ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
157 157
158 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 158 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
159 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); 159 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
160
161 pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
162 protocol, &saddr, ntohs(sport),
163 &daddr, ntohs(dport),
164 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
165
166 return sk;
167}
168EXPORT_SYMBOL(xt_socket_get4_sk);
169
170static bool
171socket_match(const struct sk_buff *skb, struct xt_action_param *par,
172 const struct xt_socket_mtinfo1 *info)
173{
174 struct sock *sk;
175
176 sk = xt_socket_get4_sk(skb, par);
160 if (sk != NULL) { 177 if (sk != NULL) {
161 bool wildcard; 178 bool wildcard;
162 bool transparent = true; 179 bool transparent = true;
@@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
179 sk = NULL; 196 sk = NULL;
180 } 197 }
181 198
182 pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
183 protocol, &saddr, ntohs(sport),
184 &daddr, ntohs(dport),
185 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
186
187 return (sk != NULL); 199 return (sk != NULL);
188} 200}
189 201
@@ -253,8 +265,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
253 return 0; 265 return 0;
254} 266}
255 267
256static bool 268struct sock*
257socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) 269xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
258{ 270{
259 struct ipv6hdr *iph = ipv6_hdr(skb); 271 struct ipv6hdr *iph = ipv6_hdr(skb);
260 struct udphdr _hdr, *hp = NULL; 272 struct udphdr _hdr, *hp = NULL;
@@ -262,7 +274,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
262 struct in6_addr *daddr, *saddr; 274 struct in6_addr *daddr, *saddr;
263 __be16 dport, sport; 275 __be16 dport, sport;
264 int thoff, tproto; 276 int thoff, tproto;
265 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
266 277
267 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); 278 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
268 if (tproto < 0) { 279 if (tproto < 0) {
@@ -274,7 +285,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
274 hp = skb_header_pointer(skb, thoff, 285 hp = skb_header_pointer(skb, thoff,
275 sizeof(_hdr), &_hdr); 286 sizeof(_hdr), &_hdr);
276 if (hp == NULL) 287 if (hp == NULL)
277 return false; 288 return NULL;
278 289
279 saddr = &iph->saddr; 290 saddr = &iph->saddr;
280 sport = hp->source; 291 sport = hp->source;
@@ -284,13 +295,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
284 } else if (tproto == IPPROTO_ICMPV6) { 295 } else if (tproto == IPPROTO_ICMPV6) {
285 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, 296 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
286 &sport, &dport)) 297 &sport, &dport))
287 return false; 298 return NULL;
288 } else { 299 } else {
289 return false; 300 return NULL;
290 } 301 }
291 302
292 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, 303 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
293 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); 304 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
305 pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
306 "(orig %pI6:%hu) sock %p\n",
307 tproto, saddr, ntohs(sport),
308 daddr, ntohs(dport),
309 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
310 return sk;
311}
312EXPORT_SYMBOL(xt_socket_get6_sk);
313
314static bool
315socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
316{
317 struct sock *sk;
318 const struct xt_socket_mtinfo1 *info;
319
320 info = (struct xt_socket_mtinfo1 *) par->matchinfo;
321 sk = xt_socket_get6_sk(skb, par);
294 if (sk != NULL) { 322 if (sk != NULL) {
295 bool wildcard; 323 bool wildcard;
296 bool transparent = true; 324 bool transparent = true;
@@ -313,12 +341,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
313 sk = NULL; 341 sk = NULL;
314 } 342 }
315 343
316 pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
317 "(orig %pI6:%hu) sock %p\n",
318 tproto, saddr, ntohs(sport),
319 daddr, ntohs(dport),
320 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
321
322 return (sk != NULL); 344 return (sk != NULL);
323} 345}
324#endif 346#endif
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6ef64adf736..24bc620b539 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -830,12 +830,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
830 return 0; 830 return 0;
831} 831}
832 832
833int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 833static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
834{ 834{
835 int len = skb->len; 835 int len = skb->len;
836 836
837 skb_queue_tail(&sk->sk_receive_queue, skb); 837 skb_queue_tail(&sk->sk_receive_queue, skb);
838 sk->sk_data_ready(sk, len); 838 sk->sk_data_ready(sk, len);
839 return len;
840}
841
842int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
843{
844 int len = __netlink_sendskb(sk, skb);
845
839 sock_put(sk); 846 sock_put(sk);
840 return len; 847 return len;
841} 848}
@@ -960,8 +967,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
960 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 967 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
961 !test_bit(0, &nlk->state)) { 968 !test_bit(0, &nlk->state)) {
962 skb_set_owner_r(skb, sk); 969 skb_set_owner_r(skb, sk);
963 skb_queue_tail(&sk->sk_receive_queue, skb); 970 __netlink_sendskb(sk, skb);
964 sk->sk_data_ready(sk, skb->len);
965 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; 971 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
966 } 972 }
967 return -1; 973 return -1;
@@ -1682,10 +1688,8 @@ static int netlink_dump(struct sock *sk)
1682 1688
1683 if (sk_filter(sk, skb)) 1689 if (sk_filter(sk, skb))
1684 kfree_skb(skb); 1690 kfree_skb(skb);
1685 else { 1691 else
1686 skb_queue_tail(&sk->sk_receive_queue, skb); 1692 __netlink_sendskb(sk, skb);
1687 sk->sk_data_ready(sk, skb->len);
1688 }
1689 return 0; 1693 return 0;
1690 } 1694 }
1691 1695
@@ -1697,10 +1701,8 @@ static int netlink_dump(struct sock *sk)
1697 1701
1698 if (sk_filter(sk, skb)) 1702 if (sk_filter(sk, skb))
1699 kfree_skb(skb); 1703 kfree_skb(skb);
1700 else { 1704 else
1701 skb_queue_tail(&sk->sk_receive_queue, skb); 1705 __netlink_sendskb(sk, skb);
1702 sk->sk_data_ready(sk, skb->len);
1703 }
1704 1706
1705 if (cb->done) 1707 if (cb->done)
1706 cb->done(cb); 1708 cb->done(cb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c0c3cda1971..fafb96830e7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -654,7 +654,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
654 return 0; 654 return 0;
655 655
656drop_n_acct: 656drop_n_acct:
657 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 657 spin_lock(&sk->sk_receive_queue.lock);
658 po->stats.tp_drops++;
659 atomic_inc(&sk->sk_drops);
660 spin_unlock(&sk->sk_receive_queue.lock);
658 661
659drop_n_restore: 662drop_n_restore:
660 if (skb_head != skb->data && skb_shared(skb)) { 663 if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index f17fd841f94..d29a7fb3f61 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1045,6 +1045,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
1045 int flags = msg->msg_flags; 1045 int flags = msg->msg_flags;
1046 int err, done; 1046 int err, done;
1047 1047
1048 if (len > USHRT_MAX)
1049 return -EMSGSIZE;
1050
1048 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| 1051 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1049 MSG_CMSG_COMPAT)) || 1052 MSG_CMSG_COMPAT)) ||
1050 !(msg->msg_flags & MSG_EOR)) 1053 !(msg->msg_flags & MSG_EOR))
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index bb6ad81b671..424ff622ab5 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
68{ 68{
69 struct sock *sk = sock->sk; 69 struct sock *sk = sock->sk;
70 struct rds_sock *rs; 70 struct rds_sock *rs;
71 unsigned long flags;
72 71
73 if (!sk) 72 if (!sk)
74 goto out; 73 goto out;
@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
94 rds_rdma_drop_keys(rs); 93 rds_rdma_drop_keys(rs);
95 rds_notify_queue_get(rs, NULL); 94 rds_notify_queue_get(rs, NULL);
96 95
97 spin_lock_irqsave(&rds_sock_lock, flags); 96 spin_lock_bh(&rds_sock_lock);
98 list_del_init(&rs->rs_item); 97 list_del_init(&rs->rs_item);
99 rds_sock_count--; 98 rds_sock_count--;
100 spin_unlock_irqrestore(&rds_sock_lock, flags); 99 spin_unlock_bh(&rds_sock_lock);
101 100
102 rds_trans_put(rs->rs_transport); 101 rds_trans_put(rs->rs_transport);
103 102
@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
409 408
410static int __rds_create(struct socket *sock, struct sock *sk, int protocol) 409static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
411{ 410{
412 unsigned long flags;
413 struct rds_sock *rs; 411 struct rds_sock *rs;
414 412
415 sock_init_data(sock, sk); 413 sock_init_data(sock, sk);
@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
426 spin_lock_init(&rs->rs_rdma_lock); 424 spin_lock_init(&rs->rs_rdma_lock);
427 rs->rs_rdma_keys = RB_ROOT; 425 rs->rs_rdma_keys = RB_ROOT;
428 426
429 spin_lock_irqsave(&rds_sock_lock, flags); 427 spin_lock_bh(&rds_sock_lock);
430 list_add_tail(&rs->rs_item, &rds_sock_list); 428 list_add_tail(&rs->rs_item, &rds_sock_list);
431 rds_sock_count++; 429 rds_sock_count++;
432 spin_unlock_irqrestore(&rds_sock_lock, flags); 430 spin_unlock_bh(&rds_sock_lock);
433 431
434 return 0; 432 return 0;
435} 433}
@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
471{ 469{
472 struct rds_sock *rs; 470 struct rds_sock *rs;
473 struct rds_incoming *inc; 471 struct rds_incoming *inc;
474 unsigned long flags;
475 unsigned int total = 0; 472 unsigned int total = 0;
476 473
477 len /= sizeof(struct rds_info_message); 474 len /= sizeof(struct rds_info_message);
478 475
479 spin_lock_irqsave(&rds_sock_lock, flags); 476 spin_lock_bh(&rds_sock_lock);
480 477
481 list_for_each_entry(rs, &rds_sock_list, rs_item) { 478 list_for_each_entry(rs, &rds_sock_list, rs_item) {
482 read_lock(&rs->rs_recv_lock); 479 read_lock(&rs->rs_recv_lock);
@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
492 read_unlock(&rs->rs_recv_lock); 489 read_unlock(&rs->rs_recv_lock);
493 } 490 }
494 491
495 spin_unlock_irqrestore(&rds_sock_lock, flags); 492 spin_unlock_bh(&rds_sock_lock);
496 493
497 lens->nr = total; 494 lens->nr = total;
498 lens->each = sizeof(struct rds_info_message); 495 lens->each = sizeof(struct rds_info_message);
@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
504{ 501{
505 struct rds_info_socket sinfo; 502 struct rds_info_socket sinfo;
506 struct rds_sock *rs; 503 struct rds_sock *rs;
507 unsigned long flags;
508 504
509 len /= sizeof(struct rds_info_socket); 505 len /= sizeof(struct rds_info_socket);
510 506
511 spin_lock_irqsave(&rds_sock_lock, flags); 507 spin_lock_bh(&rds_sock_lock);
512 508
513 if (len < rds_sock_count) 509 if (len < rds_sock_count)
514 goto out; 510 goto out;
@@ -529,7 +525,7 @@ out:
529 lens->nr = rds_sock_count; 525 lens->nr = rds_sock_count;
530 lens->each = sizeof(struct rds_info_socket); 526 lens->each = sizeof(struct rds_info_socket);
531 527
532 spin_unlock_irqrestore(&rds_sock_lock, flags); 528 spin_unlock_bh(&rds_sock_lock);
533} 529}
534 530
535static void rds_exit(void) 531static void rds_exit(void)
diff --git a/net/rds/send.c b/net/rds/send.c
index d58ae5f9339..c803341f284 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -932,7 +932,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
932 /* Mirror Linux UDP mirror of BSD error message compatibility */ 932 /* Mirror Linux UDP mirror of BSD error message compatibility */
933 /* XXX: Perhaps MSG_MORE someday */ 933 /* XXX: Perhaps MSG_MORE someday */
934 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { 934 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
935 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
936 ret = -EOPNOTSUPP; 935 ret = -EOPNOTSUPP;
937 goto out; 936 goto out;
938 } 937 }
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe895b66..8e12c8a2b82 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@ menuconfig RFKILL
10 To compile this driver as a module, choose M here: the 10 To compile this driver as a module, choose M here: the
11 module will be called rfkill. 11 module will be called rfkill.
12 12
13config RFKILL_PM
14 bool "Power off on suspend"
15 depends on RFKILL && PM
16 default y
17
13# LED trigger support 18# LED trigger support
14config RFKILL_LEDS 19config RFKILL_LEDS
15 bool 20 bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640a277..df2dae6b272 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -769,6 +769,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
769} 769}
770EXPORT_SYMBOL(rfkill_pause_polling); 770EXPORT_SYMBOL(rfkill_pause_polling);
771 771
772#ifdef CONFIG_RFKILL_PM
772void rfkill_resume_polling(struct rfkill *rfkill) 773void rfkill_resume_polling(struct rfkill *rfkill)
773{ 774{
774 BUG_ON(!rfkill); 775 BUG_ON(!rfkill);
@@ -803,14 +804,17 @@ static int rfkill_resume(struct device *dev)
803 804
804 return 0; 805 return 0;
805} 806}
807#endif
806 808
807static struct class rfkill_class = { 809static struct class rfkill_class = {
808 .name = "rfkill", 810 .name = "rfkill",
809 .dev_release = rfkill_release, 811 .dev_release = rfkill_release,
810 .dev_attrs = rfkill_dev_attrs, 812 .dev_attrs = rfkill_dev_attrs,
811 .dev_uevent = rfkill_dev_uevent, 813 .dev_uevent = rfkill_dev_uevent,
814#ifdef CONFIG_RFKILL_PM
812 .suspend = rfkill_suspend, 815 .suspend = rfkill_suspend,
813 .resume = rfkill_resume, 816 .resume = rfkill_resume,
817#endif
814}; 818};
815 819
816bool rfkill_blocked(struct rfkill *rfkill) 820bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 178ff4f73c8..2679507ad33 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -96,11 +96,11 @@ static int rose_set_mac_address(struct net_device *dev, void *addr)
96 struct sockaddr *sa = addr; 96 struct sockaddr *sa = addr;
97 int err; 97 int err;
98 98
99 if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len)) 99 if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
100 return 0; 100 return 0;
101 101
102 if (dev->flags & IFF_UP) { 102 if (dev->flags & IFF_UP) {
103 err = rose_add_loopback_node((rose_address *)dev->dev_addr); 103 err = rose_add_loopback_node((rose_address *)sa->sa_data);
104 if (err) 104 if (err)
105 return err; 105 return err;
106 106
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 06afbaeb4c8..178ee83175a 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -225,8 +225,7 @@ struct choke_skb_cb {
225 225
226static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 226static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
227{ 227{
228 BUILD_BUG_ON(sizeof(skb->cb) < 228 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
229 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
230 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; 229 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
231} 230}
232 231
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b4c680900d7..06d8a8fec68 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -246,6 +246,7 @@ static void dev_watchdog(unsigned long arg)
246 time_after(jiffies, (trans_start + 246 time_after(jiffies, (trans_start +
247 dev->watchdog_timeo))) { 247 dev->watchdog_timeo))) {
248 some_queue_timedout = 1; 248 some_queue_timedout = 1;
249 txq->trans_timeout++;
249 break; 250 break;
250 } 251 }
251 } 252 }
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b9493a09a87..e1afe0c205f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
385 struct gred_sched_data *q; 385 struct gred_sched_data *q;
386 386
387 if (table->tab[dp] == NULL) { 387 if (table->tab[dp] == NULL) {
388 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); 388 table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
389 if (table->tab[dp] == NULL) 389 if (table->tab[dp] == NULL)
390 return -ENOMEM; 390 return -ENOMEM;
391 } 391 }
@@ -544,11 +544,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
544 opt.packets = q->packetsin; 544 opt.packets = q->packetsin;
545 opt.bytesin = q->bytesin; 545 opt.bytesin = q->bytesin;
546 546
547 if (gred_wred_mode(table)) { 547 if (gred_wred_mode(table))
548 q->parms.qidlestart = 548 gred_load_wred_set(table, q);
549 table->tab[table->def]->parms.qidlestart;
550 q->parms.qavg = table->tab[table->def]->parms.qavg;
551 }
552 549
553 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); 550 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
554 551
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index ea17cbed29e..59b26b8ff4b 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
106 if (!netif_is_multiqueue(dev)) 106 if (!netif_is_multiqueue(dev))
107 return -EOPNOTSUPP; 107 return -EOPNOTSUPP;
108 108
109 if (nla_len(opt) < sizeof(*qopt)) 109 if (!opt || nla_len(opt) < sizeof(*qopt))
110 return -EINVAL; 110 return -EINVAL;
111 111
112 qopt = nla_data(opt); 112 qopt = nla_data(opt);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 69c35f6cd13..945f3dd6c6f 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -117,8 +117,7 @@ struct netem_skb_cb {
117 117
118static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 118static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
119{ 119{
120 BUILD_BUG_ON(sizeof(skb->cb) < 120 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
121 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
122 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 121 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
123} 122}
124 123
@@ -351,10 +350,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
351 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 350 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
352 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 351 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
353 (skb->ip_summed == CHECKSUM_PARTIAL && 352 (skb->ip_summed == CHECKSUM_PARTIAL &&
354 skb_checksum_help(skb))) { 353 skb_checksum_help(skb)))
355 sch->qstats.drops++; 354 return qdisc_drop(skb, sch);
356 return NET_XMIT_DROP;
357 }
358 355
359 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 356 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
360 } 357 }
@@ -382,8 +379,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
382 q->counter = 0; 379 q->counter = 0;
383 380
384 __skb_queue_head(&q->qdisc->q, skb); 381 __skb_queue_head(&q->qdisc->q, skb);
385 q->qdisc->qstats.backlog += qdisc_pkt_len(skb); 382 sch->qstats.backlog += qdisc_pkt_len(skb);
386 q->qdisc->qstats.requeues++; 383 sch->qstats.requeues++;
387 ret = NET_XMIT_SUCCESS; 384 ret = NET_XMIT_SUCCESS;
388 } 385 }
389 386
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2a318f2dc3e..b5d56a22b1d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
112 112
113 for (prio = 0; prio < q->bands; prio++) { 113 for (prio = 0; prio < q->bands; prio++) {
114 struct Qdisc *qdisc = q->queues[prio]; 114 struct Qdisc *qdisc = q->queues[prio];
115 struct sk_buff *skb = qdisc->dequeue(qdisc); 115 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
116 if (skb) { 116 if (skb) {
117 qdisc_bstats_update(sch, skb); 117 qdisc_bstats_update(sch, skb);
118 sch->q.qlen--; 118 sch->q.qlen--;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0a833d0c1f6..e85b248773e 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -93,8 +93,7 @@ struct sfb_skb_cb {
93 93
94static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 94static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
95{ 95{
96 BUILD_BUG_ON(sizeof(skb->cb) < 96 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
97 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 97 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
99} 98}
100 99
@@ -557,6 +556,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
557 556
558 sch->qstats.backlog = q->qdisc->qstats.backlog; 557 sch->qstats.backlog = q->qdisc->qstats.backlog;
559 opts = nla_nest_start(skb, TCA_OPTIONS); 558 opts = nla_nest_start(skb, TCA_OPTIONS);
559 if (opts == NULL)
560 goto nla_put_failure;
560 NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt); 561 NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
561 return nla_nest_end(skb, opts); 562 return nla_nest_end(skb, opts);
562 563
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b6ea6afa55b..69400e3c69d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -410,7 +410,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
410 /* Return Congestion Notification only if we dropped a packet 410 /* Return Congestion Notification only if we dropped a packet
411 * from this flow. 411 * from this flow.
412 */ 412 */
413 return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS; 413 if (qlen != slot->qlen)
414 return NET_XMIT_CN;
415
416 /* As we dropped a packet, better let upper stack know this */
417 qdisc_tree_decrease_qlen(sch, 1);
418 return NET_XMIT_SUCCESS;
414} 419}
415 420
416static struct sk_buff * 421static struct sk_buff *
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 45cd30098e3..4f4c52c0eeb 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
225 225
226 226
227static int 227static int
228__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 228__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
229 struct net_device *dev, struct netdev_queue *txq,
230 struct neighbour *mn)
229{ 231{
230 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 232 struct teql_sched_data *q = qdisc_priv(txq->qdisc);
231 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
232 struct neighbour *mn = skb_dst(skb)->neighbour;
233 struct neighbour *n = q->ncache; 233 struct neighbour *n = q->ncache;
234 234
235 if (mn->tbl == NULL) 235 if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
262} 262}
263 263
264static inline int teql_resolve(struct sk_buff *skb, 264static inline int teql_resolve(struct sk_buff *skb,
265 struct sk_buff *skb_res, struct net_device *dev) 265 struct sk_buff *skb_res,
266 struct net_device *dev,
267 struct netdev_queue *txq)
266{ 268{
267 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 269 struct dst_entry *dst = skb_dst(skb);
270 struct neighbour *mn;
271 int res;
272
268 if (txq->qdisc == &noop_qdisc) 273 if (txq->qdisc == &noop_qdisc)
269 return -ENODEV; 274 return -ENODEV;
270 275
271 if (dev->header_ops == NULL || 276 if (!dev->header_ops || !dst)
272 skb_dst(skb) == NULL ||
273 skb_dst(skb)->neighbour == NULL)
274 return 0; 277 return 0;
275 return __teql_resolve(skb, skb_res, dev); 278
279 rcu_read_lock();
280 mn = dst_get_neighbour(dst);
281 res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
282 rcu_read_unlock();
283
284 return res;
276} 285}
277 286
278static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 287static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
307 continue; 316 continue;
308 } 317 }
309 318
310 switch (teql_resolve(skb, skb_res, slave)) { 319 switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
311 case 0: 320 case 0:
312 if (__netif_tx_trylock(slave_txq)) { 321 if (__netif_tx_trylock(slave_txq)) {
313 unsigned int length = qdisc_pkt_len(skb); 322 unsigned int length = qdisc_pkt_len(skb);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 4a62888f2e4..17a6e658a4c 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
173 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 173 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
174 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 174 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
175 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 175 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
176 (unsigned long)sp->autoclose * HZ; 176 min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
177 177
178 /* Initializes the timers */ 178 /* Initializes the timers */
179 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 179 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 741ed164883..cd9eded3bb0 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -737,15 +737,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
737 737
738 epb = &ep->base; 738 epb = &ep->base;
739 739
740 if (hlist_unhashed(&epb->node))
741 return;
742
743 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 740 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
744 741
745 head = &sctp_ep_hashtable[epb->hashent]; 742 head = &sctp_ep_hashtable[epb->hashent];
746 743
747 sctp_write_lock(&head->lock); 744 sctp_write_lock(&head->lock);
748 __hlist_del(&epb->node); 745 hlist_del_init(&epb->node);
749 sctp_write_unlock(&head->lock); 746 sctp_write_unlock(&head->lock);
750} 747}
751 748
@@ -826,7 +823,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
826 head = &sctp_assoc_hashtable[epb->hashent]; 823 head = &sctp_assoc_hashtable[epb->hashent];
827 824
828 sctp_write_lock(&head->lock); 825 sctp_write_lock(&head->lock);
829 __hlist_del(&epb->node); 826 hlist_del_init(&epb->node);
830 sctp_write_unlock(&head->lock); 827 sctp_write_unlock(&head->lock);
831} 828}
832 829
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 08b3cead650..8fc4dcd294a 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
377 */ 377 */
378 skb_set_owner_w(nskb, sk); 378 skb_set_owner_w(nskb, sk);
379 379
380 /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ 380 if (!sctp_transport_dst_check(tp)) {
381 if (!dst || (dst->obsolete > 1)) {
382 dst_release(dst);
383 sctp_transport_route(tp, NULL, sctp_sk(sk)); 381 sctp_transport_route(tp, NULL, sctp_sk(sk));
384 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
385 sctp_assoc_sync_pmtu(asoc); 383 sctp_assoc_sync_pmtu(asoc);
@@ -697,13 +695,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
697 /* Keep track of how many bytes are in flight to the receiver. */ 695 /* Keep track of how many bytes are in flight to the receiver. */
698 asoc->outqueue.outstanding_bytes += datasize; 696 asoc->outqueue.outstanding_bytes += datasize;
699 697
700 /* Update our view of the receiver's rwnd. Include sk_buff overhead 698 /* Update our view of the receiver's rwnd. */
701 * while updating peer.rwnd so that it reduces the chances of a
702 * receiver running out of receive buffer space even when receive
703 * window is still open. This can happen when a sender is sending
704 * sending small messages.
705 */
706 datasize += sizeof(struct sk_buff);
707 if (datasize < rwnd) 699 if (datasize < rwnd)
708 rwnd -= datasize; 700 rwnd -= datasize;
709 else 701 else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d03682109b7..1f2938fbf9b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
411 chunk->transport->flight_size -= 411 chunk->transport->flight_size -=
412 sctp_data_size(chunk); 412 sctp_data_size(chunk);
413 q->outstanding_bytes -= sctp_data_size(chunk); 413 q->outstanding_bytes -= sctp_data_size(chunk);
414 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 414 q->asoc->peer.rwnd += sctp_data_size(chunk);
415 sizeof(struct sk_buff));
416 } 415 }
417 continue; 416 continue;
418 } 417 }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
432 * (Section 7.2.4)), add the data size of those 431 * (Section 7.2.4)), add the data size of those
433 * chunks to the rwnd. 432 * chunks to the rwnd.
434 */ 433 */
435 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 434 q->asoc->peer.rwnd += sctp_data_size(chunk);
436 sizeof(struct sk_buff));
437 q->outstanding_bytes -= sctp_data_size(chunk); 435 q->outstanding_bytes -= sctp_data_size(chunk);
438 if (chunk->transport) 436 if (chunk->transport)
439 transport->flight_size -= sctp_data_size(chunk); 437 transport->flight_size -= sctp_data_size(chunk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 207175b2f40..946afd6045c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1144,6 +1144,9 @@ SCTP_STATIC __init int sctp_init(void)
1144 sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; 1144 sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
1145 sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; 1145 sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
1146 1146
1147 /* Initialize maximum autoclose timeout. */
1148 sctp_max_autoclose = INT_MAX / HZ;
1149
1147 /* Initialize handle used for association ids. */ 1150 /* Initialize handle used for association ids. */
1148 idr_init(&sctp_assocs_id); 1151 idr_init(&sctp_assocs_id);
1149 1152
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d3ccf7973c5..b70a3ee6016 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1160,8 +1160,14 @@ out_free:
1160 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" 1160 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1161 " kaddrs: %p err: %d\n", 1161 " kaddrs: %p err: %d\n",
1162 asoc, kaddrs, err); 1162 asoc, kaddrs, err);
1163 if (asoc) 1163 if (asoc) {
1164 /* sctp_primitive_ASSOCIATE may have added this association
1165 * To the hash table, try to unhash it, just in case, its a noop
1166 * if it wasn't hashed so we're safe
1167 */
1168 sctp_unhash_established(asoc);
1164 sctp_association_free(asoc); 1169 sctp_association_free(asoc);
1170 }
1165 return err; 1171 return err;
1166} 1172}
1167 1173
@@ -1871,8 +1877,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1871 goto out_unlock; 1877 goto out_unlock;
1872 1878
1873out_free: 1879out_free:
1874 if (new_asoc) 1880 if (new_asoc) {
1881 sctp_unhash_established(asoc);
1875 sctp_association_free(asoc); 1882 sctp_association_free(asoc);
1883 }
1876out_unlock: 1884out_unlock:
1877 sctp_release_sock(sk); 1885 sctp_release_sock(sk);
1878 1886
@@ -2129,8 +2137,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2129 return -EINVAL; 2137 return -EINVAL;
2130 if (copy_from_user(&sp->autoclose, optval, optlen)) 2138 if (copy_from_user(&sp->autoclose, optval, optlen))
2131 return -EFAULT; 2139 return -EFAULT;
2132 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
2133 sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
2134 2140
2135 return 0; 2141 return 0;
2136} 2142}
@@ -4011,9 +4017,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4011static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4017static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4012 int __user *optlen) 4018 int __user *optlen)
4013{ 4019{
4014 if (len < sizeof(struct sctp_event_subscribe)) 4020 if (len <= 0)
4015 return -EINVAL; 4021 return -EINVAL;
4016 len = sizeof(struct sctp_event_subscribe); 4022 if (len > sizeof(struct sctp_event_subscribe))
4023 len = sizeof(struct sctp_event_subscribe);
4017 if (put_user(len, optlen)) 4024 if (put_user(len, optlen))
4018 return -EFAULT; 4025 return -EFAULT;
4019 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4026 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 50cb57f0919..6752f489feb 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
53static int sack_timer_max = 500; 53static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16; 55static int rwnd_scale_max = 16;
56static unsigned long max_autoclose_min = 0;
57static unsigned long max_autoclose_max =
58 (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
59 ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
56 60
57extern long sysctl_sctp_mem[3]; 61extern long sysctl_sctp_mem[3];
58extern int sysctl_sctp_rmem[3]; 62extern int sysctl_sctp_rmem[3];
@@ -251,6 +255,15 @@ static ctl_table sctp_table[] = {
251 .extra1 = &one, 255 .extra1 = &one,
252 .extra2 = &rwnd_scale_max, 256 .extra2 = &rwnd_scale_max,
253 }, 257 },
258 {
259 .procname = "max_autoclose",
260 .data = &sctp_max_autoclose,
261 .maxlen = sizeof(unsigned long),
262 .mode = 0644,
263 .proc_handler = &proc_doulongvec_minmax,
264 .extra1 = &max_autoclose_min,
265 .extra2 = &max_autoclose_max,
266 },
254 267
255 { /* sentinel */ } 268 { /* sentinel */ }
256}; 269};
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 394c57ca2f5..8da4481ed30 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
227} 227}
228 228
229/* this is a complete rip-off from __sk_dst_check
230 * the cookie is always 0 since this is how it's used in the
231 * pmtu code
232 */
233static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
234{
235 struct dst_entry *dst = t->dst;
236
237 if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
238 dst_release(t->dst);
239 t->dst = NULL;
240 return NULL;
241 }
242
243 return dst;
244}
245
246void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 229void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
247{ 230{
248 struct dst_entry *dst; 231 struct dst_entry *dst;
diff --git a/net/socket.c b/net/socket.c
index 02dc82db3d2..cf41afcc89b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -791,9 +791,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
791 791
792 sock = file->private_data; 792 sock = file->private_data;
793 793
794 flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; 794 flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
795 if (more) 795 /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
796 flags |= MSG_MORE; 796 flags |= more;
797 797
798 return kernel_sendpage(sock, page, offset, size, flags); 798 return kernel_sendpage(sock, page, offset, size, flags);
799} 799}
@@ -1871,8 +1871,14 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
1871#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1871#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
1872#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1872#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
1873 1873
1874struct used_address {
1875 struct sockaddr_storage name;
1876 unsigned int name_len;
1877};
1878
1874static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1879static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1875 struct msghdr *msg_sys, unsigned flags, int nosec) 1880 struct msghdr *msg_sys, unsigned flags,
1881 struct used_address *used_address)
1876{ 1882{
1877 struct compat_msghdr __user *msg_compat = 1883 struct compat_msghdr __user *msg_compat =
1878 (struct compat_msghdr __user *)msg; 1884 (struct compat_msghdr __user *)msg;
@@ -1953,8 +1959,30 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1953 1959
1954 if (sock->file->f_flags & O_NONBLOCK) 1960 if (sock->file->f_flags & O_NONBLOCK)
1955 msg_sys->msg_flags |= MSG_DONTWAIT; 1961 msg_sys->msg_flags |= MSG_DONTWAIT;
1956 err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys, 1962 /*
1957 total_len); 1963 * If this is sendmmsg() and current destination address is same as
1964 * previously succeeded address, omit asking LSM's decision.
1965 * used_address->name_len is initialized to UINT_MAX so that the first
1966 * destination address never matches.
1967 */
1968 if (used_address && msg_sys->msg_name &&
1969 used_address->name_len == msg_sys->msg_namelen &&
1970 !memcmp(&used_address->name, msg_sys->msg_name,
1971 used_address->name_len)) {
1972 err = sock_sendmsg_nosec(sock, msg_sys, total_len);
1973 goto out_freectl;
1974 }
1975 err = sock_sendmsg(sock, msg_sys, total_len);
1976 /*
1977 * If this is sendmmsg() and sending to current destination address was
1978 * successful, remember it.
1979 */
1980 if (used_address && err >= 0) {
1981 used_address->name_len = msg_sys->msg_namelen;
1982 if (msg_sys->msg_name)
1983 memcpy(&used_address->name, msg_sys->msg_name,
1984 used_address->name_len);
1985 }
1958 1986
1959out_freectl: 1987out_freectl:
1960 if (ctl_buf != ctl) 1988 if (ctl_buf != ctl)
@@ -1979,7 +2007,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1979 if (!sock) 2007 if (!sock)
1980 goto out; 2008 goto out;
1981 2009
1982 err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0); 2010 err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
1983 2011
1984 fput_light(sock->file, fput_needed); 2012 fput_light(sock->file, fput_needed);
1985out: 2013out:
@@ -1998,6 +2026,10 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
1998 struct mmsghdr __user *entry; 2026 struct mmsghdr __user *entry;
1999 struct compat_mmsghdr __user *compat_entry; 2027 struct compat_mmsghdr __user *compat_entry;
2000 struct msghdr msg_sys; 2028 struct msghdr msg_sys;
2029 struct used_address used_address;
2030
2031 if (vlen > UIO_MAXIOV)
2032 vlen = UIO_MAXIOV;
2001 2033
2002 datagrams = 0; 2034 datagrams = 0;
2003 2035
@@ -2005,27 +2037,22 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2005 if (!sock) 2037 if (!sock)
2006 return err; 2038 return err;
2007 2039
2008 err = sock_error(sock->sk); 2040 used_address.name_len = UINT_MAX;
2009 if (err)
2010 goto out_put;
2011
2012 entry = mmsg; 2041 entry = mmsg;
2013 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2042 compat_entry = (struct compat_mmsghdr __user *)mmsg;
2043 err = 0;
2014 2044
2015 while (datagrams < vlen) { 2045 while (datagrams < vlen) {
2016 /*
2017 * No need to ask LSM for more than the first datagram.
2018 */
2019 if (MSG_CMSG_COMPAT & flags) { 2046 if (MSG_CMSG_COMPAT & flags) {
2020 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, 2047 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
2021 &msg_sys, flags, datagrams); 2048 &msg_sys, flags, &used_address);
2022 if (err < 0) 2049 if (err < 0)
2023 break; 2050 break;
2024 err = __put_user(err, &compat_entry->msg_len); 2051 err = __put_user(err, &compat_entry->msg_len);
2025 ++compat_entry; 2052 ++compat_entry;
2026 } else { 2053 } else {
2027 err = __sys_sendmsg(sock, (struct msghdr __user *)entry, 2054 err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
2028 &msg_sys, flags, datagrams); 2055 &msg_sys, flags, &used_address);
2029 if (err < 0) 2056 if (err < 0)
2030 break; 2057 break;
2031 err = put_user(err, &entry->msg_len); 2058 err = put_user(err, &entry->msg_len);
@@ -2037,29 +2064,11 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2037 ++datagrams; 2064 ++datagrams;
2038 } 2065 }
2039 2066
2040out_put:
2041 fput_light(sock->file, fput_needed); 2067 fput_light(sock->file, fput_needed);
2042 2068
2043 if (err == 0) 2069 /* We only return an error if no datagrams were able to be sent */
2044 return datagrams; 2070 if (datagrams != 0)
2045
2046 if (datagrams != 0) {
2047 /*
2048 * We may send less entries than requested (vlen) if the
2049 * sock is non blocking...
2050 */
2051 if (err != -EAGAIN) {
2052 /*
2053 * ... or if sendmsg returns an error after we
2054 * send some datagrams, where we record the
2055 * error to return on the next call or if the
2056 * app asks about it using getsockopt(SO_ERROR).
2057 */
2058 sock->sk->sk_err = -err;
2059 }
2060
2061 return datagrams; 2071 return datagrams;
2062 }
2063 2072
2064 return err; 2073 return err;
2065} 2074}
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4cb70dc6e7a..e50502d8ceb 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -129,6 +129,9 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
129 for (i = 0; i < groups ; i++) 129 for (i = 0; i < groups ; i++)
130 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i)) 130 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
131 return 0; 131 return 0;
132 if (groups < NFS_NGROUPS &&
133 cred->uc_gids[groups] != NOGROUP)
134 return 0;
132 return 1; 135 return 1;
133} 136}
134 137
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 72ad836e4fe..4530a912b8b 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -828,6 +828,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
828{ 828{
829 ssize_t ret; 829 ssize_t ret;
830 830
831 if (count == 0)
832 return -EINVAL;
831 if (copy_from_user(kaddr, buf, count)) 833 if (copy_from_user(kaddr, buf, count))
832 return -EFAULT; 834 return -EFAULT;
833 kaddr[count] = '\0'; 835 kaddr[count] = '\0';
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index e45d2fbbe5a..bf0a7f64f00 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -193,7 +193,7 @@ static int rpcb_create_local_unix(void)
193 if (IS_ERR(clnt)) { 193 if (IS_ERR(clnt)) {
194 dprintk("RPC: failed to create AF_LOCAL rpcbind " 194 dprintk("RPC: failed to create AF_LOCAL rpcbind "
195 "client (errno %ld).\n", PTR_ERR(clnt)); 195 "client (errno %ld).\n", PTR_ERR(clnt));
196 result = -PTR_ERR(clnt); 196 result = PTR_ERR(clnt);
197 goto out; 197 goto out;
198 } 198 }
199 199
@@ -242,7 +242,7 @@ static int rpcb_create_local_net(void)
242 if (IS_ERR(clnt)) { 242 if (IS_ERR(clnt)) {
243 dprintk("RPC: failed to create local rpcbind " 243 dprintk("RPC: failed to create local rpcbind "
244 "client (errno %ld).\n", PTR_ERR(clnt)); 244 "client (errno %ld).\n", PTR_ERR(clnt));
245 result = -PTR_ERR(clnt); 245 result = PTR_ERR(clnt);
246 goto out; 246 goto out;
247 } 247 }
248 248
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4814e246a87..c57f97f44e6 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -480,14 +480,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
480 */ 480 */
481void rpc_wake_up(struct rpc_wait_queue *queue) 481void rpc_wake_up(struct rpc_wait_queue *queue)
482{ 482{
483 struct rpc_task *task, *next;
484 struct list_head *head; 483 struct list_head *head;
485 484
486 spin_lock_bh(&queue->lock); 485 spin_lock_bh(&queue->lock);
487 head = &queue->tasks[queue->maxpriority]; 486 head = &queue->tasks[queue->maxpriority];
488 for (;;) { 487 for (;;) {
489 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 488 while (!list_empty(head)) {
489 struct rpc_task *task;
490 task = list_first_entry(head,
491 struct rpc_task,
492 u.tk_wait.list);
490 rpc_wake_up_task_queue_locked(queue, task); 493 rpc_wake_up_task_queue_locked(queue, task);
494 }
491 if (head == &queue->tasks[0]) 495 if (head == &queue->tasks[0])
492 break; 496 break;
493 head--; 497 head--;
@@ -505,13 +509,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
505 */ 509 */
506void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 510void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
507{ 511{
508 struct rpc_task *task, *next;
509 struct list_head *head; 512 struct list_head *head;
510 513
511 spin_lock_bh(&queue->lock); 514 spin_lock_bh(&queue->lock);
512 head = &queue->tasks[queue->maxpriority]; 515 head = &queue->tasks[queue->maxpriority];
513 for (;;) { 516 for (;;) {
514 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 517 while (!list_empty(head)) {
518 struct rpc_task *task;
519 task = list_first_entry(head,
520 struct rpc_task,
521 u.tk_wait.list);
515 task->tk_status = status; 522 task->tk_status = status;
516 rpc_wake_up_task_queue_locked(queue, task); 523 rpc_wake_up_task_queue_locked(queue, task);
517 } 524 }
@@ -706,7 +713,9 @@ void rpc_execute(struct rpc_task *task)
706 713
707static void rpc_async_schedule(struct work_struct *work) 714static void rpc_async_schedule(struct work_struct *work)
708{ 715{
716 current->flags |= PF_FSTRANS;
709 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 717 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
718 current->flags &= ~PF_FSTRANS;
710} 719}
711 720
712/** 721/**
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2b90292e950..54c59ab3b10 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
167 167
168fail_free: 168fail_free:
169 kfree(m->to_pool); 169 kfree(m->to_pool);
170 m->to_pool = NULL;
170fail: 171fail:
171 return -ENOMEM; 172 return -ENOMEM;
172} 173}
@@ -287,7 +288,9 @@ svc_pool_map_put(void)
287 if (!--m->count) { 288 if (!--m->count) {
288 m->mode = SVC_POOL_DEFAULT; 289 m->mode = SVC_POOL_DEFAULT;
289 kfree(m->to_pool); 290 kfree(m->to_pool);
291 m->to_pool = NULL;
290 kfree(m->pool_to); 292 kfree(m->pool_to);
293 m->pool_to = NULL;
291 m->npools = 0; 294 m->npools = 0;
292 } 295 }
293 296
@@ -472,17 +475,20 @@ svc_destroy(struct svc_serv *serv)
472 printk("svc_destroy: no threads for serv=%p!\n", serv); 475 printk("svc_destroy: no threads for serv=%p!\n", serv);
473 476
474 del_timer_sync(&serv->sv_temptimer); 477 del_timer_sync(&serv->sv_temptimer);
475 478 /*
476 svc_close_all(&serv->sv_tempsocks); 479 * The set of xprts (contained in the sv_tempsocks and
480 * sv_permsocks lists) is now constant, since it is modified
481 * only by accepting new sockets (done by service threads in
482 * svc_recv) or aging old ones (done by sv_temptimer), or
483 * configuration changes (excluded by whatever locking the
484 * caller is using--nfsd_mutex in the case of nfsd). So it's
485 * safe to traverse those lists and shut everything down:
486 */
487 svc_close_all(serv);
477 488
478 if (serv->sv_shutdown) 489 if (serv->sv_shutdown)
479 serv->sv_shutdown(serv); 490 serv->sv_shutdown(serv);
480 491
481 svc_close_all(&serv->sv_permsocks);
482
483 BUG_ON(!list_empty(&serv->sv_permsocks));
484 BUG_ON(!list_empty(&serv->sv_tempsocks));
485
486 cache_clean_deferred(serv); 492 cache_clean_deferred(serv);
487 493
488 if (svc_serv_is_pooled(serv)) 494 if (svc_serv_is_pooled(serv))
@@ -1296,7 +1302,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1296 sizeof(req->rq_snd_buf)); 1302 sizeof(req->rq_snd_buf));
1297 return bc_send(req); 1303 return bc_send(req);
1298 } else { 1304 } else {
1299 /* Nothing to do to drop request */ 1305 /* drop request */
1306 xprt_free_bc_request(req);
1300 return 0; 1307 return 0;
1301 } 1308 }
1302} 1309}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index ab86b7927f8..9d7ed0b48b5 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -901,13 +901,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
901 spin_lock_bh(&serv->sv_lock); 901 spin_lock_bh(&serv->sv_lock);
902 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 902 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
903 list_del_init(&xprt->xpt_list); 903 list_del_init(&xprt->xpt_list);
904 /* 904 BUG_ON(!list_empty(&xprt->xpt_ready));
905 * We used to delete the transport from whichever list
906 * it's sk_xprt.xpt_ready node was on, but we don't actually
907 * need to. This is because the only time we're called
908 * while still attached to a queue, the queue itself
909 * is about to be destroyed (in svc_destroy).
910 */
911 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 905 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
912 serv->sv_tmpcnt--; 906 serv->sv_tmpcnt--;
913 spin_unlock_bh(&serv->sv_lock); 907 spin_unlock_bh(&serv->sv_lock);
@@ -935,22 +929,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
935} 929}
936EXPORT_SYMBOL_GPL(svc_close_xprt); 930EXPORT_SYMBOL_GPL(svc_close_xprt);
937 931
938void svc_close_all(struct list_head *xprt_list) 932static void svc_close_list(struct list_head *xprt_list)
939{ 933{
940 struct svc_xprt *xprt; 934 struct svc_xprt *xprt;
935
936 list_for_each_entry(xprt, xprt_list, xpt_list) {
937 set_bit(XPT_CLOSE, &xprt->xpt_flags);
938 set_bit(XPT_BUSY, &xprt->xpt_flags);
939 }
940}
941
942void svc_close_all(struct svc_serv *serv)
943{
944 struct svc_pool *pool;
945 struct svc_xprt *xprt;
941 struct svc_xprt *tmp; 946 struct svc_xprt *tmp;
947 int i;
948
949 svc_close_list(&serv->sv_tempsocks);
950 svc_close_list(&serv->sv_permsocks);
942 951
952 for (i = 0; i < serv->sv_nrpools; i++) {
953 pool = &serv->sv_pools[i];
954
955 spin_lock_bh(&pool->sp_lock);
956 while (!list_empty(&pool->sp_sockets)) {
957 xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
958 list_del_init(&xprt->xpt_ready);
959 }
960 spin_unlock_bh(&pool->sp_lock);
961 }
943 /* 962 /*
944 * The server is shutting down, and no more threads are running. 963 * At this point the sp_sockets lists will stay empty, since
945 * svc_xprt_enqueue() might still be running, but at worst it 964 * svc_enqueue will not add new entries without taking the
946 * will re-add the xprt to sp_sockets, which will soon get 965 * sp_lock and checking XPT_BUSY.
947 * freed. So we don't bother with any more locking, and don't
948 * leave the close to the (nonexistent) server threads:
949 */ 966 */
950 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 967 list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
951 set_bit(XPT_CLOSE, &xprt->xpt_flags);
952 svc_delete_xprt(xprt); 968 svc_delete_xprt(xprt);
953 } 969 list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
970 svc_delete_xprt(xprt);
971
972 BUG_ON(!list_empty(&serv->sv_permsocks));
973 BUG_ON(!list_empty(&serv->sv_tempsocks));
954} 974}
955 975
956/* 976/*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 0867070bb5c..d0b5210d981 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
200 int rc = 0; 200 int rc = 0;
201 201
202 if (!xprt->shutdown) { 202 if (!xprt->shutdown) {
203 current->flags |= PF_FSTRANS;
203 xprt_clear_connected(xprt); 204 xprt_clear_connected(xprt);
204 205
205 dprintk("RPC: %s: %sconnect\n", __func__, 206 dprintk("RPC: %s: %sconnect\n", __func__,
@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
212 213
213out: 214out:
214 xprt_wake_pending_tasks(xprt, rc); 215 xprt_wake_pending_tasks(xprt, rc);
215
216out_clear: 216out_clear:
217 dprintk("RPC: %s: exit\n", __func__); 217 dprintk("RPC: %s: exit\n", __func__);
218 xprt_clear_connecting(xprt); 218 xprt_clear_connecting(xprt);
219 current->flags &= ~PF_FSTRANS;
219} 220}
220 221
221/* 222/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 72abb735893..554111f42b0 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -485,7 +485,7 @@ static int xs_nospace(struct rpc_task *task)
485 struct rpc_rqst *req = task->tk_rqstp; 485 struct rpc_rqst *req = task->tk_rqstp;
486 struct rpc_xprt *xprt = req->rq_xprt; 486 struct rpc_xprt *xprt = req->rq_xprt;
487 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 487 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
488 int ret = 0; 488 int ret = -EAGAIN;
489 489
490 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 490 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
491 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 491 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -497,7 +497,6 @@ static int xs_nospace(struct rpc_task *task)
497 /* Don't race with disconnect */ 497 /* Don't race with disconnect */
498 if (xprt_connected(xprt)) { 498 if (xprt_connected(xprt)) {
499 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 499 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
500 ret = -EAGAIN;
501 /* 500 /*
502 * Notify TCP that we're limited by the application 501 * Notify TCP that we're limited by the application
503 * window size 502 * window size
@@ -1883,6 +1882,8 @@ static void xs_local_setup_socket(struct work_struct *work)
1883 if (xprt->shutdown) 1882 if (xprt->shutdown)
1884 goto out; 1883 goto out;
1885 1884
1885 current->flags |= PF_FSTRANS;
1886
1886 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1887 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1887 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1888 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1888 SOCK_STREAM, 0, &sock, 1); 1889 SOCK_STREAM, 0, &sock, 1);
@@ -1916,6 +1917,7 @@ static void xs_local_setup_socket(struct work_struct *work)
1916out: 1917out:
1917 xprt_clear_connecting(xprt); 1918 xprt_clear_connecting(xprt);
1918 xprt_wake_pending_tasks(xprt, status); 1919 xprt_wake_pending_tasks(xprt, status);
1920 current->flags &= ~PF_FSTRANS;
1919} 1921}
1920 1922
1921static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1923static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
@@ -1958,6 +1960,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
1958 if (xprt->shutdown) 1960 if (xprt->shutdown)
1959 goto out; 1961 goto out;
1960 1962
1963 current->flags |= PF_FSTRANS;
1964
1961 /* Start by resetting any existing state */ 1965 /* Start by resetting any existing state */
1962 xs_reset_transport(transport); 1966 xs_reset_transport(transport);
1963 sock = xs_create_sock(xprt, transport, 1967 sock = xs_create_sock(xprt, transport,
@@ -1976,6 +1980,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
1976out: 1980out:
1977 xprt_clear_connecting(xprt); 1981 xprt_clear_connecting(xprt);
1978 xprt_wake_pending_tasks(xprt, status); 1982 xprt_wake_pending_tasks(xprt, status);
1983 current->flags &= ~PF_FSTRANS;
1979} 1984}
1980 1985
1981/* 1986/*
@@ -2101,6 +2106,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2101 if (xprt->shutdown) 2106 if (xprt->shutdown)
2102 goto out; 2107 goto out;
2103 2108
2109 current->flags |= PF_FSTRANS;
2110
2104 if (!sock) { 2111 if (!sock) {
2105 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 2112 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
2106 sock = xs_create_sock(xprt, transport, 2113 sock = xs_create_sock(xprt, transport,
@@ -2150,6 +2157,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2150 case -EINPROGRESS: 2157 case -EINPROGRESS:
2151 case -EALREADY: 2158 case -EALREADY:
2152 xprt_clear_connecting(xprt); 2159 xprt_clear_connecting(xprt);
2160 current->flags &= ~PF_FSTRANS;
2153 return; 2161 return;
2154 case -EINVAL: 2162 case -EINVAL:
2155 /* Happens, for instance, if the user specified a link 2163 /* Happens, for instance, if the user specified a link
@@ -2162,6 +2170,7 @@ out_eagain:
2162out: 2170out:
2163 xprt_clear_connecting(xprt); 2171 xprt_clear_connecting(xprt);
2164 xprt_wake_pending_tasks(xprt, status); 2172 xprt_wake_pending_tasks(xprt, status);
2173 current->flags &= ~PF_FSTRANS;
2165} 2174}
2166 2175
2167/** 2176/**
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 788a12c1eb5..2ab785064b7 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
602 * successfully, add it to the interface list. 602 * successfully, add it to the interface list.
603 */ 603 */
604 604
605 if (dev->name == NULL) { 605#ifdef WANDEBUG
606 err = -EINVAL; 606 printk(KERN_INFO "%s: registering interface %s...\n",
607 } else { 607 wanrouter_modname, dev->name);
608#endif
608 609
609 #ifdef WANDEBUG 610 err = register_netdev(dev);
610 printk(KERN_INFO "%s: registering interface %s...\n", 611 if (!err) {
611 wanrouter_modname, dev->name); 612 struct net_device *slave = NULL;
612 #endif 613 unsigned long smp_flags=0;
613 614
614 err = register_netdev(dev); 615 lock_adapter_irq(&wandev->lock, &smp_flags);
615 if (!err) { 616
616 struct net_device *slave = NULL; 617 if (wandev->dev == NULL) {
617 unsigned long smp_flags=0; 618 wandev->dev = dev;
618 619 } else {
619 lock_adapter_irq(&wandev->lock, &smp_flags); 620 for (slave=wandev->dev;
620 621 DEV_TO_SLAVE(slave);
621 if (wandev->dev == NULL) { 622 slave = DEV_TO_SLAVE(slave))
622 wandev->dev = dev; 623 DEV_TO_SLAVE(slave) = dev;
623 } else {
624 for (slave=wandev->dev;
625 DEV_TO_SLAVE(slave);
626 slave = DEV_TO_SLAVE(slave))
627 DEV_TO_SLAVE(slave) = dev;
628 }
629 ++wandev->ndev;
630
631 unlock_adapter_irq(&wandev->lock, &smp_flags);
632 err = 0; /* done !!! */
633 goto out;
634 } 624 }
625 ++wandev->ndev;
626
627 unlock_adapter_irq(&wandev->lock, &smp_flags);
628 err = 0; /* done !!! */
629 goto out;
635 } 630 }
636 if (wandev->del_if) 631 if (wandev->del_if)
637 wandev->del_if(wandev, dev); 632 wandev->del_if(wandev, dev);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1f1ef70f34f..8e2a668c923 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -159,3 +159,14 @@ config LIB80211_DEBUG
159 from lib80211. 159 from lib80211.
160 160
161 If unsure, say N. 161 If unsure, say N.
162
163config CFG80211_ALLOW_RECONNECT
164 bool "Allow reconnect while already connected"
165 depends on CFG80211
166 default n
167 help
168 cfg80211 stack doesn't allow to connect if you are already
169 connected. This option allows to make a connection in this case.
170
171 Select this option ONLY for wlan drivers that are specifically
172 built for such purposes.
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 880dbe2e6f9..498c760a1d2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -959,6 +959,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
959 */ 959 */
960 synchronize_rcu(); 960 synchronize_rcu();
961 INIT_LIST_HEAD(&wdev->list); 961 INIT_LIST_HEAD(&wdev->list);
962 /*
963 * Ensure that all events have been processed and
964 * freed.
965 */
966 cfg80211_process_wdev_events(wdev);
962 break; 967 break;
963 case NETDEV_PRE_UP: 968 case NETDEV_PRE_UP:
964 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 969 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a570ff9214e..83516455a1d 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
426 struct net_device *dev, enum nl80211_iftype ntype, 426 struct net_device *dev, enum nl80211_iftype ntype,
427 u32 *flags, struct vif_params *params); 427 u32 *flags, struct vif_params *params);
428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); 428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
429void cfg80211_process_wdev_events(struct wireless_dev *wdev);
429 430
430int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, 431int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
431 struct wireless_dev *wdev, 432 struct wireless_dev *wdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cea338150d0..379ed3a1322 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -83,8 +83,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
83 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 83 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
84 [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, 84 [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
85 85
86 [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, 86 [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
87 [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, 87 [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
88 88
89 [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, 89 [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
90 [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, 90 [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@ -126,8 +126,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, 126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
127 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, 127 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
128 128
129 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 129 [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
130 .len = NL80211_HT_CAPABILITY_LEN },
131 130
132 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, 131 [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
133 [NL80211_ATTR_IE] = { .type = NLA_BINARY, 132 [NL80211_ATTR_IE] = { .type = NLA_BINARY,
@@ -1182,6 +1181,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1182 goto bad_res; 1181 goto bad_res;
1183 } 1182 }
1184 1183
1184 if (!netif_running(netdev)) {
1185 result = -ENETDOWN;
1186 goto bad_res;
1187 }
1188
1185 nla_for_each_nested(nl_txq_params, 1189 nla_for_each_nested(nl_txq_params,
1186 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1190 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1187 rem_txq_params) { 1191 rem_txq_params) {
@@ -2209,6 +2213,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2209 } 2213 }
2210 nla_nest_end(msg, sinfoattr); 2214 nla_nest_end(msg, sinfoattr);
2211 2215
2216 if (sinfo->assoc_req_ies)
2217 NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
2218 sinfo->assoc_req_ies);
2219
2212 return genlmsg_end(msg, hdr); 2220 return genlmsg_end(msg, hdr);
2213 2221
2214 nla_put_failure: 2222 nla_put_failure:
@@ -2236,6 +2244,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
2236 } 2244 }
2237 2245
2238 while (1) { 2246 while (1) {
2247 memset(&sinfo, 0, sizeof(sinfo));
2239 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, 2248 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
2240 mac_addr, &sinfo); 2249 mac_addr, &sinfo);
2241 if (err == -ENOENT) 2250 if (err == -ENOENT)
@@ -4044,9 +4053,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4044 if (len % sizeof(u32)) 4053 if (len % sizeof(u32))
4045 return -EINVAL; 4054 return -EINVAL;
4046 4055
4056 if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES)
4057 return -EINVAL;
4058
4047 memcpy(settings->akm_suites, data, len); 4059 memcpy(settings->akm_suites, data, len);
4048 4060
4049 for (i = 0; i < settings->n_ciphers_pairwise; i++) 4061 for (i = 0; i < settings->n_akm_suites; i++)
4050 if (!nl80211_valid_akm_suite(settings->akm_suites[i])) 4062 if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
4051 return -EINVAL; 4063 return -EINVAL;
4052 } 4064 }
@@ -5430,7 +5442,7 @@ static struct genl_ops nl80211_ops[] = {
5430 .doit = nl80211_get_key, 5442 .doit = nl80211_get_key,
5431 .policy = nl80211_policy, 5443 .policy = nl80211_policy,
5432 .flags = GENL_ADMIN_PERM, 5444 .flags = GENL_ADMIN_PERM,
5433 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5445 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5434 NL80211_FLAG_NEED_RTNL, 5446 NL80211_FLAG_NEED_RTNL,
5435 }, 5447 },
5436 { 5448 {
@@ -5462,7 +5474,7 @@ static struct genl_ops nl80211_ops[] = {
5462 .policy = nl80211_policy, 5474 .policy = nl80211_policy,
5463 .flags = GENL_ADMIN_PERM, 5475 .flags = GENL_ADMIN_PERM,
5464 .doit = nl80211_addset_beacon, 5476 .doit = nl80211_addset_beacon,
5465 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5477 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5466 NL80211_FLAG_NEED_RTNL, 5478 NL80211_FLAG_NEED_RTNL,
5467 }, 5479 },
5468 { 5480 {
@@ -5470,7 +5482,7 @@ static struct genl_ops nl80211_ops[] = {
5470 .policy = nl80211_policy, 5482 .policy = nl80211_policy,
5471 .flags = GENL_ADMIN_PERM, 5483 .flags = GENL_ADMIN_PERM,
5472 .doit = nl80211_addset_beacon, 5484 .doit = nl80211_addset_beacon,
5473 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5485 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5474 NL80211_FLAG_NEED_RTNL, 5486 NL80211_FLAG_NEED_RTNL,
5475 }, 5487 },
5476 { 5488 {
@@ -5494,7 +5506,7 @@ static struct genl_ops nl80211_ops[] = {
5494 .doit = nl80211_set_station, 5506 .doit = nl80211_set_station,
5495 .policy = nl80211_policy, 5507 .policy = nl80211_policy,
5496 .flags = GENL_ADMIN_PERM, 5508 .flags = GENL_ADMIN_PERM,
5497 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5509 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5498 NL80211_FLAG_NEED_RTNL, 5510 NL80211_FLAG_NEED_RTNL,
5499 }, 5511 },
5500 { 5512 {
@@ -5510,7 +5522,7 @@ static struct genl_ops nl80211_ops[] = {
5510 .doit = nl80211_del_station, 5522 .doit = nl80211_del_station,
5511 .policy = nl80211_policy, 5523 .policy = nl80211_policy,
5512 .flags = GENL_ADMIN_PERM, 5524 .flags = GENL_ADMIN_PERM,
5513 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5525 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5514 NL80211_FLAG_NEED_RTNL, 5526 NL80211_FLAG_NEED_RTNL,
5515 }, 5527 },
5516 { 5528 {
@@ -5543,7 +5555,7 @@ static struct genl_ops nl80211_ops[] = {
5543 .doit = nl80211_del_mpath, 5555 .doit = nl80211_del_mpath,
5544 .policy = nl80211_policy, 5556 .policy = nl80211_policy,
5545 .flags = GENL_ADMIN_PERM, 5557 .flags = GENL_ADMIN_PERM,
5546 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5558 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5547 NL80211_FLAG_NEED_RTNL, 5559 NL80211_FLAG_NEED_RTNL,
5548 }, 5560 },
5549 { 5561 {
@@ -5551,7 +5563,7 @@ static struct genl_ops nl80211_ops[] = {
5551 .doit = nl80211_set_bss, 5563 .doit = nl80211_set_bss,
5552 .policy = nl80211_policy, 5564 .policy = nl80211_policy,
5553 .flags = GENL_ADMIN_PERM, 5565 .flags = GENL_ADMIN_PERM,
5554 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5566 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5555 NL80211_FLAG_NEED_RTNL, 5567 NL80211_FLAG_NEED_RTNL,
5556 }, 5568 },
5557 { 5569 {
@@ -5577,7 +5589,7 @@ static struct genl_ops nl80211_ops[] = {
5577 .doit = nl80211_get_mesh_config, 5589 .doit = nl80211_get_mesh_config,
5578 .policy = nl80211_policy, 5590 .policy = nl80211_policy,
5579 /* can be retrieved by unprivileged users */ 5591 /* can be retrieved by unprivileged users */
5580 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5592 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5581 NL80211_FLAG_NEED_RTNL, 5593 NL80211_FLAG_NEED_RTNL,
5582 }, 5594 },
5583 { 5595 {
@@ -5709,7 +5721,7 @@ static struct genl_ops nl80211_ops[] = {
5709 .doit = nl80211_setdel_pmksa, 5721 .doit = nl80211_setdel_pmksa,
5710 .policy = nl80211_policy, 5722 .policy = nl80211_policy,
5711 .flags = GENL_ADMIN_PERM, 5723 .flags = GENL_ADMIN_PERM,
5712 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5724 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5713 NL80211_FLAG_NEED_RTNL, 5725 NL80211_FLAG_NEED_RTNL,
5714 }, 5726 },
5715 { 5727 {
@@ -5717,7 +5729,7 @@ static struct genl_ops nl80211_ops[] = {
5717 .doit = nl80211_setdel_pmksa, 5729 .doit = nl80211_setdel_pmksa,
5718 .policy = nl80211_policy, 5730 .policy = nl80211_policy,
5719 .flags = GENL_ADMIN_PERM, 5731 .flags = GENL_ADMIN_PERM,
5720 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5732 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5721 NL80211_FLAG_NEED_RTNL, 5733 NL80211_FLAG_NEED_RTNL,
5722 }, 5734 },
5723 { 5735 {
@@ -5725,7 +5737,7 @@ static struct genl_ops nl80211_ops[] = {
5725 .doit = nl80211_flush_pmksa, 5737 .doit = nl80211_flush_pmksa,
5726 .policy = nl80211_policy, 5738 .policy = nl80211_policy,
5727 .flags = GENL_ADMIN_PERM, 5739 .flags = GENL_ADMIN_PERM,
5728 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5740 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5729 NL80211_FLAG_NEED_RTNL, 5741 NL80211_FLAG_NEED_RTNL,
5730 }, 5742 },
5731 { 5743 {
@@ -5813,7 +5825,7 @@ static struct genl_ops nl80211_ops[] = {
5813 .doit = nl80211_set_wds_peer, 5825 .doit = nl80211_set_wds_peer,
5814 .policy = nl80211_policy, 5826 .policy = nl80211_policy,
5815 .flags = GENL_ADMIN_PERM, 5827 .flags = GENL_ADMIN_PERM,
5816 .internal_flags = NL80211_FLAG_NEED_NETDEV | 5828 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5817 NL80211_FLAG_NEED_RTNL, 5829 NL80211_FLAG_NEED_RTNL,
5818 }, 5830 },
5819 { 5831 {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1ad0f39fe09..092775af035 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -57,8 +57,17 @@
57#define REG_DBG_PRINT(args...) 57#define REG_DBG_PRINT(args...)
58#endif 58#endif
59 59
60static struct regulatory_request core_request_world = {
61 .initiator = NL80211_REGDOM_SET_BY_CORE,
62 .alpha2[0] = '0',
63 .alpha2[1] = '0',
64 .intersect = false,
65 .processed = true,
66 .country_ie_env = ENVIRON_ANY,
67};
68
60/* Receipt of information from last regulatory request */ 69/* Receipt of information from last regulatory request */
61static struct regulatory_request *last_request; 70static struct regulatory_request *last_request = &core_request_world;
62 71
63/* To trigger userspace events */ 72/* To trigger userspace events */
64static struct platform_device *reg_pdev; 73static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
150module_param(ieee80211_regdom, charp, 0444); 159module_param(ieee80211_regdom, charp, 0444);
151MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 160MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
152 161
153static void reset_regdomains(void) 162static void reset_regdomains(bool full_reset)
154{ 163{
155 /* avoid freeing static information or freeing something twice */ 164 /* avoid freeing static information or freeing something twice */
156 if (cfg80211_regdomain == cfg80211_world_regdom) 165 if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
165 174
166 cfg80211_world_regdom = &world_regdom; 175 cfg80211_world_regdom = &world_regdom;
167 cfg80211_regdomain = NULL; 176 cfg80211_regdomain = NULL;
177
178 if (!full_reset)
179 return;
180
181 if (last_request != &core_request_world)
182 kfree(last_request);
183 last_request = &core_request_world;
168} 184}
169 185
170/* 186/*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
175{ 191{
176 BUG_ON(!last_request); 192 BUG_ON(!last_request);
177 193
178 reset_regdomains(); 194 reset_regdomains(false);
179 195
180 cfg80211_world_regdom = rd; 196 cfg80211_world_regdom = rd;
181 cfg80211_regdomain = rd; 197 cfg80211_regdomain = rd;
@@ -363,7 +379,15 @@ static void reg_regdb_query(const char *alpha2)
363 379
364 schedule_work(&reg_regdb_work); 380 schedule_work(&reg_regdb_work);
365} 381}
382
383/* Feel free to add any other sanity checks here */
384static void reg_regdb_size_check(void)
385{
386 /* We should ideally BUILD_BUG_ON() but then random builds would fail */
387 WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
388}
366#else 389#else
390static inline void reg_regdb_size_check(void) {}
367static inline void reg_regdb_query(const char *alpha2) {} 391static inline void reg_regdb_query(const char *alpha2) {}
368#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ 392#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
369 393
@@ -852,6 +876,7 @@ static void handle_channel(struct wiphy *wiphy,
852 return; 876 return;
853 } 877 }
854 878
879 chan->beacon_found = false;
855 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 880 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
856 chan->max_antenna_gain = min(chan->orig_mag, 881 chan->max_antenna_gain = min(chan->orig_mag,
857 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 882 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
@@ -1125,12 +1150,13 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
1125 enum ieee80211_band band; 1150 enum ieee80211_band band;
1126 1151
1127 if (ignore_reg_update(wiphy, initiator)) 1152 if (ignore_reg_update(wiphy, initiator))
1128 goto out; 1153 return;
1154
1129 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1155 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1130 if (wiphy->bands[band]) 1156 if (wiphy->bands[band])
1131 handle_band(wiphy, band, initiator); 1157 handle_band(wiphy, band, initiator);
1132 } 1158 }
1133out: 1159
1134 reg_process_beacons(wiphy); 1160 reg_process_beacons(wiphy);
1135 reg_process_ht_flags(wiphy); 1161 reg_process_ht_flags(wiphy);
1136 if (wiphy->reg_notifier) 1162 if (wiphy->reg_notifier)
@@ -1332,7 +1358,7 @@ static void reg_set_request_processed(void)
1332 spin_unlock(&reg_requests_lock); 1358 spin_unlock(&reg_requests_lock);
1333 1359
1334 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) 1360 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1335 cancel_delayed_work_sync(&reg_timeout); 1361 cancel_delayed_work(&reg_timeout);
1336 1362
1337 if (need_more_processing) 1363 if (need_more_processing)
1338 schedule_work(&reg_work); 1364 schedule_work(&reg_work);
@@ -1394,7 +1420,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
1394 } 1420 }
1395 1421
1396new_request: 1422new_request:
1397 kfree(last_request); 1423 if (last_request != &core_request_world)
1424 kfree(last_request);
1398 1425
1399 last_request = pending_request; 1426 last_request = pending_request;
1400 last_request->intersect = intersect; 1427 last_request->intersect = intersect;
@@ -1564,9 +1591,6 @@ static int regulatory_hint_core(const char *alpha2)
1564{ 1591{
1565 struct regulatory_request *request; 1592 struct regulatory_request *request;
1566 1593
1567 kfree(last_request);
1568 last_request = NULL;
1569
1570 request = kzalloc(sizeof(struct regulatory_request), 1594 request = kzalloc(sizeof(struct regulatory_request),
1571 GFP_KERNEL); 1595 GFP_KERNEL);
1572 if (!request) 1596 if (!request)
@@ -1757,6 +1781,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1757static void restore_regulatory_settings(bool reset_user) 1781static void restore_regulatory_settings(bool reset_user)
1758{ 1782{
1759 char alpha2[2]; 1783 char alpha2[2];
1784 char world_alpha2[2];
1760 struct reg_beacon *reg_beacon, *btmp; 1785 struct reg_beacon *reg_beacon, *btmp;
1761 struct regulatory_request *reg_request, *tmp; 1786 struct regulatory_request *reg_request, *tmp;
1762 LIST_HEAD(tmp_reg_req_list); 1787 LIST_HEAD(tmp_reg_req_list);
@@ -1764,7 +1789,7 @@ static void restore_regulatory_settings(bool reset_user)
1764 mutex_lock(&cfg80211_mutex); 1789 mutex_lock(&cfg80211_mutex);
1765 mutex_lock(&reg_mutex); 1790 mutex_lock(&reg_mutex);
1766 1791
1767 reset_regdomains(); 1792 reset_regdomains(true);
1768 restore_alpha2(alpha2, reset_user); 1793 restore_alpha2(alpha2, reset_user);
1769 1794
1770 /* 1795 /*
@@ -1807,11 +1832,13 @@ static void restore_regulatory_settings(bool reset_user)
1807 1832
1808 /* First restore to the basic regulatory settings */ 1833 /* First restore to the basic regulatory settings */
1809 cfg80211_regdomain = cfg80211_world_regdom; 1834 cfg80211_regdomain = cfg80211_world_regdom;
1835 world_alpha2[0] = cfg80211_regdomain->alpha2[0];
1836 world_alpha2[1] = cfg80211_regdomain->alpha2[1];
1810 1837
1811 mutex_unlock(&reg_mutex); 1838 mutex_unlock(&reg_mutex);
1812 mutex_unlock(&cfg80211_mutex); 1839 mutex_unlock(&cfg80211_mutex);
1813 1840
1814 regulatory_hint_core(cfg80211_regdomain->alpha2); 1841 regulatory_hint_core(world_alpha2);
1815 1842
1816 /* 1843 /*
1817 * This restores the ieee80211_regdom module parameter 1844 * This restores the ieee80211_regdom module parameter
@@ -2024,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2024 } 2051 }
2025 2052
2026 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2053 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2054 if (!request_wiphy &&
2055 (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
2056 last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
2057 schedule_delayed_work(&reg_timeout, 0);
2058 return -ENODEV;
2059 }
2027 2060
2028 if (!last_request->intersect) { 2061 if (!last_request->intersect) {
2029 int r; 2062 int r;
2030 2063
2031 if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { 2064 if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
2032 reset_regdomains(); 2065 reset_regdomains(false);
2033 cfg80211_regdomain = rd; 2066 cfg80211_regdomain = rd;
2034 return 0; 2067 return 0;
2035 } 2068 }
@@ -2050,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2050 if (r) 2083 if (r)
2051 return r; 2084 return r;
2052 2085
2053 reset_regdomains(); 2086 reset_regdomains(false);
2054 cfg80211_regdomain = rd; 2087 cfg80211_regdomain = rd;
2055 return 0; 2088 return 0;
2056 } 2089 }
@@ -2075,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2075 2108
2076 rd = NULL; 2109 rd = NULL;
2077 2110
2078 reset_regdomains(); 2111 reset_regdomains(false);
2079 cfg80211_regdomain = intersected_rd; 2112 cfg80211_regdomain = intersected_rd;
2080 2113
2081 return 0; 2114 return 0;
@@ -2095,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2095 kfree(rd); 2128 kfree(rd);
2096 rd = NULL; 2129 rd = NULL;
2097 2130
2098 reset_regdomains(); 2131 reset_regdomains(false);
2099 cfg80211_regdomain = intersected_rd; 2132 cfg80211_regdomain = intersected_rd;
2100 2133
2101 return 0; 2134 return 0;
@@ -2203,6 +2236,8 @@ int __init regulatory_init(void)
2203 spin_lock_init(&reg_requests_lock); 2236 spin_lock_init(&reg_requests_lock);
2204 spin_lock_init(&reg_pending_beacons_lock); 2237 spin_lock_init(&reg_pending_beacons_lock);
2205 2238
2239 reg_regdb_size_check();
2240
2206 cfg80211_regdomain = cfg80211_world_regdom; 2241 cfg80211_regdomain = cfg80211_world_regdom;
2207 2242
2208 user_alpha2[0] = '9'; 2243 user_alpha2[0] = '9';
@@ -2248,9 +2283,9 @@ void /* __init_or_exit */ regulatory_exit(void)
2248 mutex_lock(&cfg80211_mutex); 2283 mutex_lock(&cfg80211_mutex);
2249 mutex_lock(&reg_mutex); 2284 mutex_lock(&reg_mutex);
2250 2285
2251 reset_regdomains(); 2286 reset_regdomains(true);
2252 2287
2253 kfree(last_request); 2288 dev_set_uevent_suppress(&reg_pdev->dev, true);
2254 2289
2255 platform_device_unregister(reg_pdev); 2290 platform_device_unregister(reg_pdev);
2256 2291
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ae0c2256ba3..cbbc92731ec 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -17,7 +17,7 @@
17#include "nl80211.h" 17#include "nl80211.h"
18#include "wext-compat.h" 18#include "wext-compat.h"
19 19
20#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) 20#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ)
21 21
22void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) 22void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
23{ 23{
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index b7b6ff8be55..cf4be21236b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -659,8 +659,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
659 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) 659 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
660 return; 660 return;
661 661
662#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
662 if (wdev->sme_state != CFG80211_SME_CONNECTED) 663 if (wdev->sme_state != CFG80211_SME_CONNECTED)
663 return; 664 return;
665#endif
664 666
665 if (wdev->current_bss) { 667 if (wdev->current_bss) {
666 cfg80211_unhold_bss(wdev->current_bss); 668 cfg80211_unhold_bss(wdev->current_bss);
@@ -758,10 +760,14 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
758 760
759 ASSERT_WDEV_LOCK(wdev); 761 ASSERT_WDEV_LOCK(wdev);
760 762
763#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
761 if (wdev->sme_state != CFG80211_SME_IDLE) 764 if (wdev->sme_state != CFG80211_SME_IDLE)
762 return -EALREADY; 765 return -EALREADY;
763 766
764 if (WARN_ON(wdev->connect_keys)) { 767 if (WARN_ON(wdev->connect_keys)) {
768#else
769 if (wdev->connect_keys) {
770#endif
765 kfree(wdev->connect_keys); 771 kfree(wdev->connect_keys);
766 wdev->connect_keys = NULL; 772 wdev->connect_keys = NULL;
767 } 773 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4d7b83fbc32..18e22bef386 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -719,7 +719,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
719 wdev->connect_keys = NULL; 719 wdev->connect_keys = NULL;
720} 720}
721 721
722static void cfg80211_process_wdev_events(struct wireless_dev *wdev) 722void cfg80211_process_wdev_events(struct wireless_dev *wdev)
723{ 723{
724 struct cfg80211_event *ev; 724 struct cfg80211_event *ev;
725 unsigned long flags; 725 unsigned long flags;
@@ -807,7 +807,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
807 ntype == NL80211_IFTYPE_P2P_CLIENT)) 807 ntype == NL80211_IFTYPE_P2P_CLIENT))
808 return -EBUSY; 808 return -EBUSY;
809 809
810 if (ntype != otype) { 810 if (ntype != otype && netif_running(dev)) {
811 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, 811 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
812 ntype); 812 ntype);
813 if (err) 813 if (err)
@@ -937,6 +937,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
937 enum nl80211_iftype iftype) 937 enum nl80211_iftype iftype)
938{ 938{
939 struct wireless_dev *wdev_iter; 939 struct wireless_dev *wdev_iter;
940 u32 used_iftypes = BIT(iftype);
940 int num[NUM_NL80211_IFTYPES]; 941 int num[NUM_NL80211_IFTYPES];
941 int total = 1; 942 int total = 1;
942 int i, j; 943 int i, j;
@@ -970,12 +971,17 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
970 971
971 num[wdev_iter->iftype]++; 972 num[wdev_iter->iftype]++;
972 total++; 973 total++;
974 used_iftypes |= BIT(wdev_iter->iftype);
973 } 975 }
974 mutex_unlock(&rdev->devlist_mtx); 976 mutex_unlock(&rdev->devlist_mtx);
975 977
978 if (total == 1)
979 return 0;
980
976 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 981 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
977 const struct ieee80211_iface_combination *c; 982 const struct ieee80211_iface_combination *c;
978 struct ieee80211_iface_limit *limits; 983 struct ieee80211_iface_limit *limits;
984 u32 all_iftypes = 0;
979 985
980 c = &rdev->wiphy.iface_combinations[i]; 986 c = &rdev->wiphy.iface_combinations[i];
981 987
@@ -990,14 +996,28 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
990 if (rdev->wiphy.software_iftypes & BIT(iftype)) 996 if (rdev->wiphy.software_iftypes & BIT(iftype))
991 continue; 997 continue;
992 for (j = 0; j < c->n_limits; j++) { 998 for (j = 0; j < c->n_limits; j++) {
993 if (!(limits[j].types & iftype)) 999 all_iftypes |= limits[j].types;
1000 if (!(limits[j].types & BIT(iftype)))
994 continue; 1001 continue;
995 if (limits[j].max < num[iftype]) 1002 if (limits[j].max < num[iftype])
996 goto cont; 1003 goto cont;
997 limits[j].max -= num[iftype]; 1004 limits[j].max -= num[iftype];
998 } 1005 }
999 } 1006 }
1000 /* yay, it fits */ 1007
1008 /*
1009 * Finally check that all iftypes that we're currently
1010 * using are actually part of this combination. If they
1011 * aren't then we can't use this combination and have
1012 * to continue to the next.
1013 */
1014 if ((all_iftypes & used_iftypes) != used_iftypes)
1015 goto cont;
1016
1017 /*
1018 * This combination covered all interface types and
1019 * supported the requested numbers, so we're good.
1020 */
1001 kfree(limits); 1021 kfree(limits);
1002 return 0; 1022 return 0;
1003 cont: 1023 cont:
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 4680b1e4c79..373e14f21a1 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
295 * Found a listening socket, now check the incoming 295 * Found a listening socket, now check the incoming
296 * call user data vs this sockets call user data 296 * call user data vs this sockets call user data
297 */ 297 */
298 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 298 if (x25_sk(s)->cudmatchlength > 0 &&
299 skb->len >= x25_sk(s)->cudmatchlength) {
299 if((memcmp(x25_sk(s)->calluserdata.cuddata, 300 if((memcmp(x25_sk(s)->calluserdata.cuddata,
300 skb->data, 301 skb->data,
301 x25_sk(s)->cudmatchlength)) == 0) { 302 x25_sk(s)->cudmatchlength)) == 0) {
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 58064d9e565..791ab2e77f3 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -462,8 +462,8 @@ static struct xfrm_algo_desc ealg_list[] = {
462 .desc = { 462 .desc = {
463 .sadb_alg_id = SADB_X_EALG_AESCTR, 463 .sadb_alg_id = SADB_X_EALG_AESCTR,
464 .sadb_alg_ivlen = 8, 464 .sadb_alg_ivlen = 8,
465 .sadb_alg_minbits = 128, 465 .sadb_alg_minbits = 160,
466 .sadb_alg_maxbits = 256 466 .sadb_alg_maxbits = 288
467 } 467 }
468}, 468},
469}; 469};
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index a026b0ef244..54a0dc2e2f8 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -212,6 +212,11 @@ resume:
212 /* only the first xfrm gets the encap type */ 212 /* only the first xfrm gets the encap type */
213 encap_type = 0; 213 encap_type = 0;
214 214
215 if (async && x->repl->check(x, skb, seq)) {
216 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
217 goto drop_unlock;
218 }
219
215 x->repl->advance(x, seq); 220 x->repl->advance(x, seq);
216 221
217 x->curlft.bytes += skb->len; 222 x->curlft.bytes += skb->len;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5ce74a38552..0c0e40e9cfc 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1497,7 +1497,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1497 goto free_dst; 1497 goto free_dst;
1498 1498
1499 /* Copy neighbour for reachability confirmation */ 1499 /* Copy neighbour for reachability confirmation */
1500 dst0->neighbour = neigh_clone(dst->neighbour); 1500 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
1501 1501
1502 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1502 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1503 xfrm_init_pmtu(dst_prev); 1503 xfrm_init_pmtu(dst_prev);
@@ -1917,6 +1917,9 @@ no_transform:
1917 } 1917 }
1918ok: 1918ok:
1919 xfrm_pols_put(pols, drop_pols); 1919 xfrm_pols_put(pols, drop_pols);
1920 if (dst && dst->xfrm &&
1921 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
1922 dst->flags |= DST_XFRM_TUNNEL;
1920 return dst; 1923 return dst;
1921 1924
1922nopol: 1925nopol:
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index b11ea692bd7..3235023eaf4 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -166,7 +166,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
166 } 166 }
167 167
168 if (xfrm_aevent_is_on(xs_net(x))) 168 if (xfrm_aevent_is_on(xs_net(x)))
169 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 169 x->repl->notify(x, XFRM_REPLAY_UPDATE);
170} 170}
171 171
172static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) 172static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
@@ -293,7 +293,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
293 } 293 }
294 294
295 if (xfrm_aevent_is_on(xs_net(x))) 295 if (xfrm_aevent_is_on(xs_net(x)))
296 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 296 x->repl->notify(x, XFRM_REPLAY_UPDATE);
297} 297}
298 298
299static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) 299static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
@@ -502,7 +502,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
502 } 502 }
503 503
504 if (xfrm_aevent_is_on(xs_net(x))) 504 if (xfrm_aevent_is_on(xs_net(x)))
505 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 505 x->repl->notify(x, XFRM_REPLAY_UPDATE);
506} 506}
507 507
508static struct xfrm_replay xfrm_replay_legacy = { 508static struct xfrm_replay xfrm_replay_legacy = {