aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-09-28 13:46:28 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-09-28 14:50:15 -0400
commitdaa22703f14c007e93b464c45fa60019a36f546d (patch)
treea1a130b6e128dc9d57c35c026977e1b4953105e1 /net
parent5aa287dcf1b5879aa0150b0511833c52885f5b4c (diff)
Apply k4412 kernel from HardKernel for ODROID-X.
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/Kconfig16
-rw-r--r--net/Makefile1
-rw-r--r--net/activity_stats.c115
-rw-r--r--net/bluetooth/Kconfig9
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/af_bluetooth.c44
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/hci_conn.c132
-rw-r--r--net/bluetooth/hci_core.c266
-rw-r--r--net/bluetooth/hci_event.c311
-rw-r--r--net/bluetooth/hci_sock.c70
-rw-r--r--net/bluetooth/hidp/core.c19
-rw-r--r--net/bluetooth/l2cap_core.c1043
-rw-r--r--net/bluetooth/l2cap_sock.c472
-rw-r--r--net/bluetooth/lib.c23
-rw-r--r--net/bluetooth/mgmt.c283
-rw-r--r--net/bluetooth/rfcomm/core.c18
-rw-r--r--net/bluetooth/rfcomm/sock.c33
-rw-r--r--net/bluetooth/sco.c86
-rw-r--r--net/bluetooth/smp.c702
-rw-r--r--net/bridge/br_device.c11
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/devinet.c8
-rw-r--r--net/ipv4/netfilter/Kconfig12
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c8
-rw-r--r--net/ipv4/sysfs_net_ipv4.c88
-rw-r--r--net/ipv4/tcp.c121
-rw-r--r--net/ipv6/addrconf.c69
-rw-r--r--net/ipv6/af_inet6.c34
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c9
-rw-r--r--net/mac80211/sta_info.c1
-rw-r--r--net/netfilter/Kconfig42
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/xt_qtaguid.c2785
-rw-r--r--net/netfilter/xt_qtaguid_internal.h330
-rw-r--r--net/netfilter/xt_qtaguid_print.c556
-rw-r--r--net/netfilter/xt_qtaguid_print.h120
-rw-r--r--net/netfilter/xt_quota2.c381
-rw-r--r--net/netfilter/xt_socket.c70
-rw-r--r--net/rfkill/Kconfig5
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/wireless/Kconfig11
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c6
52 files changed, 7330 insertions, 1064 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 917ecb93ea2..d265c526839 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -438,7 +438,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
438 } 438 }
439 439
440 break; 440 break;
441 441 case NETDEV_GOING_DOWN: // NETDEV_DOWN
442 case NETDEV_DOWN: 442 case NETDEV_DOWN:
443 /* Put all VLANs for this dev in the down state too. */ 443 /* Put all VLANs for this dev in the down state too. */
444 for (i = 0; i < VLAN_N_VID; i++) { 444 for (i = 0; i < VLAN_N_VID; i++) {
diff --git a/net/Kconfig b/net/Kconfig
index 878151c772c..919cf9a8212 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -79,6 +79,20 @@ source "net/netlabel/Kconfig"
79 79
80endif # if INET 80endif # if INET
81 81
82config ANDROID_PARANOID_NETWORK
83 bool "Only allow certain groups to create sockets"
84 default y
85 help
86 none
87
88config NET_ACTIVITY_STATS
89 bool "Network activity statistics tracking"
90 default y
91 help
92 Network activity statistics are useful for tracking wireless
93 modem activity on 2G, 3G, 4G wireless networks. Counts number of
94 transmissions and groups them in specified time buckets.
95
82config NETWORK_SECMARK 96config NETWORK_SECMARK
83 bool "Security Marking" 97 bool "Security Marking"
84 help 98 help
@@ -217,7 +231,7 @@ source "net/dns_resolver/Kconfig"
217source "net/batman-adv/Kconfig" 231source "net/batman-adv/Kconfig"
218 232
219config RPS 233config RPS
220 boolean 234 boolean "RPS"
221 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 235 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
222 default y 236 default y
223 237
diff --git a/net/Makefile b/net/Makefile
index a51d9465e62..54808aba6c1 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -68,3 +68,4 @@ obj-$(CONFIG_WIMAX) += wimax/
68obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ 68obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
69obj-$(CONFIG_CEPH_LIB) += ceph/ 69obj-$(CONFIG_CEPH_LIB) += ceph/
70obj-$(CONFIG_BATMAN_ADV) += batman-adv/ 70obj-$(CONFIG_BATMAN_ADV) += batman-adv/
71obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 00000000000..8a3e9347006
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,115 @@
1/* net/activity_stats.c
2 *
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Author: Mike Chan (mike@android.com)
15 */
16
17#include <linux/proc_fs.h>
18#include <linux/suspend.h>
19#include <net/net_namespace.h>
20
21/*
22 * Track transmission rates in buckets (power of 2).
23 * 1,2,4,8...512 seconds.
24 *
25 * Buckets represent the count of network transmissions at least
26 * N seconds apart, where N is 1 << bucket index.
27 */
28#define BUCKET_MAX 10
29
30/* Track network activity frequency */
31static unsigned long activity_stats[BUCKET_MAX];
32static ktime_t last_transmit;
33static ktime_t suspend_time;
34static DEFINE_SPINLOCK(activity_lock);
35
36void activity_stats_update(void)
37{
38 int i;
39 unsigned long flags;
40 ktime_t now;
41 s64 delta;
42
43 spin_lock_irqsave(&activity_lock, flags);
44 now = ktime_get();
45 delta = ktime_to_ns(ktime_sub(now, last_transmit));
46
47 for (i = BUCKET_MAX - 1; i >= 0; i--) {
48 /*
49 * Check if the time delta between network activity is within the
50 * minimum bucket range.
51 */
52 if (delta < (1000000000ULL << i))
53 continue;
54
55 activity_stats[i]++;
56 last_transmit = now;
57 break;
58 }
59 spin_unlock_irqrestore(&activity_lock, flags);
60}
61
62static int activity_stats_read_proc(char *page, char **start, off_t off,
63 int count, int *eof, void *data)
64{
65 int i;
66 int len;
67 char *p = page;
68
69 /* Only print if offset is 0, or we have enough buffer space */
70 if (off || count < (30 * BUCKET_MAX + 22))
71 return -ENOMEM;
72
73 len = snprintf(p, count, "Min Bucket(sec) Count\n");
74 count -= len;
75 p += len;
76
77 for (i = 0; i < BUCKET_MAX; i++) {
78 len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]);
79 count -= len;
80 p += len;
81 }
82 *eof = 1;
83
84 return p - page;
85}
86
87static int activity_stats_notifier(struct notifier_block *nb,
88 unsigned long event, void *dummy)
89{
90 switch (event) {
91 case PM_SUSPEND_PREPARE:
92 suspend_time = ktime_get_real();
93 break;
94
95 case PM_POST_SUSPEND:
96 suspend_time = ktime_sub(ktime_get_real(), suspend_time);
97 last_transmit = ktime_sub(last_transmit, suspend_time);
98 }
99
100 return 0;
101}
102
103static struct notifier_block activity_stats_notifier_block = {
104 .notifier_call = activity_stats_notifier,
105};
106
107static int __init activity_stats_init(void)
108{
109 create_proc_read_entry("activity", S_IRUGO,
110 init_net.proc_net_stat, activity_stats_read_proc, NULL);
111 return register_pm_notifier(&activity_stats_notifier_block);
112}
113
114subsys_initcall(activity_stats_init);
115
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 6ae5ec50858..bfb3dc03c9d 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,6 +6,7 @@ menuconfig BT
6 tristate "Bluetooth subsystem support" 6 tristate "Bluetooth subsystem support"
7 depends on NET && !S390 7 depends on NET && !S390
8 depends on RFKILL || !RFKILL 8 depends on RFKILL || !RFKILL
9 select CRYPTO
9 help 10 help
10 Bluetooth is low-cost, low-power, short-range wireless technology. 11 Bluetooth is low-cost, low-power, short-range wireless technology.
11 It was designed as a replacement for cables and other short-range 12 It was designed as a replacement for cables and other short-range
@@ -22,6 +23,7 @@ menuconfig BT
22 BNEP Module (Bluetooth Network Encapsulation Protocol) 23 BNEP Module (Bluetooth Network Encapsulation Protocol)
23 CMTP Module (CAPI Message Transport Protocol) 24 CMTP Module (CAPI Message Transport Protocol)
24 HIDP Module (Human Interface Device Protocol) 25 HIDP Module (Human Interface Device Protocol)
26 SMP Module (Security Manager Protocol)
25 27
26 Say Y here to compile Bluetooth support into the kernel or say M to 28 Say Y here to compile Bluetooth support into the kernel or say M to
27 compile it as module (bluetooth). 29 compile it as module (bluetooth).
@@ -36,11 +38,18 @@ if BT != n
36config BT_L2CAP 38config BT_L2CAP
37 bool "L2CAP protocol support" 39 bool "L2CAP protocol support"
38 select CRC16 40 select CRC16
41 select CRYPTO
42 select CRYPTO_BLKCIPHER
43 select CRYPTO_AES
44 select CRYPTO_ECB
39 help 45 help
40 L2CAP (Logical Link Control and Adaptation Protocol) provides 46 L2CAP (Logical Link Control and Adaptation Protocol) provides
41 connection oriented and connection-less data transport. L2CAP 47 connection oriented and connection-less data transport. L2CAP
42 support is required for most Bluetooth applications. 48 support is required for most Bluetooth applications.
43 49
50 Also included is support for SMP (Security Manager Protocol) which
51 is the security layer on top of LE (Low Energy) links.
52
44config BT_SCO 53config BT_SCO
45 bool "SCO links support" 54 bool "SCO links support"
46 help 55 help
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index f04fe9a9d63..9b67f3d08fa 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,5 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o 12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o 13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 8add9b49991..7c73a10d7ed 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,6 +40,15 @@
40 40
41#include <net/bluetooth/bluetooth.h> 41#include <net/bluetooth/bluetooth.h>
42 42
43#ifdef CONFIG_ANDROID_PARANOID_NETWORK
44#include <linux/android_aid.h>
45#endif
46
47#ifndef CONFIG_BT_SOCK_DEBUG
48#undef BT_DBG
49#define BT_DBG(D...)
50#endif
51
43#define VERSION "2.16" 52#define VERSION "2.16"
44 53
45/* Bluetooth sockets */ 54/* Bluetooth sockets */
@@ -125,11 +134,40 @@ int bt_sock_unregister(int proto)
125} 134}
126EXPORT_SYMBOL(bt_sock_unregister); 135EXPORT_SYMBOL(bt_sock_unregister);
127 136
137#ifdef CONFIG_ANDROID_PARANOID_NETWORK
138static inline int current_has_bt_admin(void)
139{
140 return (!current_euid() || in_egroup_p(AID_NET_BT_ADMIN));
141}
142
143static inline int current_has_bt(void)
144{
145 return (current_has_bt_admin() || in_egroup_p(AID_NET_BT));
146}
147# else
148static inline int current_has_bt_admin(void)
149{
150 return 1;
151}
152
153static inline int current_has_bt(void)
154{
155 return 1;
156}
157#endif
158
128static int bt_sock_create(struct net *net, struct socket *sock, int proto, 159static int bt_sock_create(struct net *net, struct socket *sock, int proto,
129 int kern) 160 int kern)
130{ 161{
131 int err; 162 int err;
132 163
164 if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
165 proto == BTPROTO_L2CAP) {
166 if (!current_has_bt())
167 return -EPERM;
168 } else if (!current_has_bt_admin())
169 return -EPERM;
170
133 if (net != &init_net) 171 if (net != &init_net)
134 return -EAFNOSUPPORT; 172 return -EAFNOSUPPORT;
135 173
@@ -494,9 +532,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
494 BT_DBG("sk %p", sk); 532 BT_DBG("sk %p", sk);
495 533
496 add_wait_queue(sk_sleep(sk), &wait); 534 add_wait_queue(sk_sleep(sk), &wait);
535 set_current_state(TASK_INTERRUPTIBLE);
497 while (sk->sk_state != state) { 536 while (sk->sk_state != state) {
498 set_current_state(TASK_INTERRUPTIBLE);
499
500 if (!timeo) { 537 if (!timeo) {
501 err = -EINPROGRESS; 538 err = -EINPROGRESS;
502 break; 539 break;
@@ -510,12 +547,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
510 release_sock(sk); 547 release_sock(sk);
511 timeo = schedule_timeout(timeo); 548 timeo = schedule_timeout(timeo);
512 lock_sock(sk); 549 lock_sock(sk);
550 set_current_state(TASK_INTERRUPTIBLE);
513 551
514 err = sock_error(sk); 552 err = sock_error(sk);
515 if (err) 553 if (err)
516 break; 554 break;
517 } 555 }
518 set_current_state(TASK_RUNNING); 556 __set_current_state(TASK_RUNNING);
519 remove_wait_queue(sk_sleep(sk), &wait); 557 remove_wait_queue(sk_sleep(sk), &wait);
520 return err; 558 return err;
521} 559}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 8e6c06158f8..e7ee5314f39 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -155,6 +155,7 @@ struct bnep_session {
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t terminate;
158 struct task_struct *task; 159 struct task_struct *task;
159 160
160 struct ethhdr eh; 161 struct ethhdr eh;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ca39fcf010c..d9edfe8bf9d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,9 +484,11 @@ static int bnep_session(void *arg)
484 484
485 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
486 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
487 while (!kthread_should_stop()) { 487 while (1) {
488 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
489 489
490 if (atomic_read(&s->terminate))
491 break;
490 /* RX */ 492 /* RX */
491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
492 skb_orphan(skb); 494 skb_orphan(skb);
@@ -504,7 +506,7 @@ static int bnep_session(void *arg)
504 506
505 schedule(); 507 schedule();
506 } 508 }
507 set_current_state(TASK_RUNNING); 509 __set_current_state(TASK_RUNNING);
508 remove_wait_queue(sk_sleep(sk), &wait); 510 remove_wait_queue(sk_sleep(sk), &wait);
509 511
510 /* Cleanup session */ 512 /* Cleanup session */
@@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req)
640 down_read(&bnep_session_sem); 642 down_read(&bnep_session_sem);
641 643
642 s = __bnep_get_session(req->dst); 644 s = __bnep_get_session(req->dst);
643 if (s) 645 if (s) {
644 kthread_stop(s->task); 646 atomic_inc(&s->terminate);
645 else 647 wake_up_process(s->task);
648 } else
646 err = -ENOENT; 649 err = -ENOENT;
647 650
648 up_read(&bnep_session_sem); 651 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 744233cba24..040f67b1297 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -326,7 +326,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
326{ 326{
327 struct capi_ctr *ctrl = &session->ctrl; 327 struct capi_ctr *ctrl = &session->ctrl;
328 struct cmtp_application *application; 328 struct cmtp_application *application;
329 __u16 cmd, appl; 329 __u16 appl;
330 __u32 contr; 330 __u32 contr;
331 331
332 BT_DBG("session %p skb %p len %d", session, skb, skb->len); 332 BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,7 +344,6 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
344 return; 344 return;
345 } 345 }
346 346
347 cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
348 appl = CAPIMSG_APPID(skb->data); 347 appl = CAPIMSG_APPID(skb->data);
349 contr = CAPIMSG_CONTROL(skb->data); 348 contr = CAPIMSG_CONTROL(skb->data);
350 349
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 4bb16b8decb..33c4e0cd83b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -53,11 +53,13 @@ static void hci_le_connect(struct hci_conn *conn)
53 conn->state = BT_CONNECT; 53 conn->state = BT_CONNECT;
54 conn->out = 1; 54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER; 55 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW;
56 57
57 memset(&cp, 0, sizeof(cp)); 58 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004); 59 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004); 60 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst); 61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0008); 63 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100); 64 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064); 65 cp.supervision_timeout = cpu_to_le16(0x0064);
@@ -203,6 +205,55 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
203} 205}
204EXPORT_SYMBOL(hci_le_conn_update); 206EXPORT_SYMBOL(hci_le_conn_update);
205 207
208void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 __u8 ltk[16])
210{
211 struct hci_dev *hdev = conn->hdev;
212 struct hci_cp_le_start_enc cp;
213
214 BT_DBG("%p", conn);
215
216 memset(&cp, 0, sizeof(cp));
217
218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(rand));
222
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224}
225EXPORT_SYMBOL(hci_le_start_enc);
226
227void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
228{
229 struct hci_dev *hdev = conn->hdev;
230 struct hci_cp_le_ltk_reply cp;
231
232 BT_DBG("%p", conn);
233
234 memset(&cp, 0, sizeof(cp));
235
236 cp.handle = cpu_to_le16(conn->handle);
237 memcpy(cp.ltk, ltk, sizeof(ltk));
238
239 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
240}
241EXPORT_SYMBOL(hci_le_ltk_reply);
242
243void hci_le_ltk_neg_reply(struct hci_conn *conn)
244{
245 struct hci_dev *hdev = conn->hdev;
246 struct hci_cp_le_ltk_neg_reply cp;
247
248 BT_DBG("%p", conn);
249
250 memset(&cp, 0, sizeof(cp));
251
252 cp.handle = cpu_to_le16(conn->handle);
253
254 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
255}
256
206/* Device _must_ be locked */ 257/* Device _must_ be locked */
207void hci_sco_setup(struct hci_conn *conn, __u8 status) 258void hci_sco_setup(struct hci_conn *conn, __u8 status)
208{ 259{
@@ -282,7 +333,8 @@ static void hci_conn_auto_accept(unsigned long arg)
282 hci_dev_unlock(hdev); 333 hci_dev_unlock(hdev);
283} 334}
284 335
285struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 336struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
337 __u16 pkt_type, bdaddr_t *dst)
286{ 338{
287 struct hci_conn *conn; 339 struct hci_conn *conn;
288 340
@@ -310,14 +362,22 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
310 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 362 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
311 break; 363 break;
312 case SCO_LINK: 364 case SCO_LINK:
313 if (lmp_esco_capable(hdev)) 365 if (!pkt_type)
314 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 366 pkt_type = SCO_ESCO_MASK;
315 (hdev->esco_type & EDR_ESCO_MASK);
316 else
317 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
318 break;
319 case ESCO_LINK: 367 case ESCO_LINK:
320 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; 368 if (!pkt_type)
369 pkt_type = ALL_ESCO_MASK;
370 if (lmp_esco_capable(hdev)) {
371 /* HCI Setup Synchronous Connection Command uses
372 reverse logic on the EDR_ESCO_MASK bits */
373 conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
374 hdev->esco_type;
375 } else {
376 /* Legacy HCI Add Sco Connection Command uses a
377 shifted bitmask */
378 conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
379 SCO_PTYPE_MASK;
380 }
321 break; 381 break;
322 } 382 }
323 383
@@ -441,7 +501,9 @@ EXPORT_SYMBOL(hci_get_route);
441 501
442/* Create SCO, ACL or LE connection. 502/* Create SCO, ACL or LE connection.
443 * Device _must_ be locked */ 503 * Device _must_ be locked */
444struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 504struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
505 __u16 pkt_type, bdaddr_t *dst,
506 __u8 sec_level, __u8 auth_type)
445{ 507{
446 struct hci_conn *acl; 508 struct hci_conn *acl;
447 struct hci_conn *sco; 509 struct hci_conn *sco;
@@ -450,14 +512,23 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
450 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 512 BT_DBG("%s dst %s", hdev->name, batostr(dst));
451 513
452 if (type == LE_LINK) { 514 if (type == LE_LINK) {
515 struct adv_entry *entry;
516
453 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 517 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
454 if (le) 518 if (le)
455 return ERR_PTR(-EBUSY); 519 return ERR_PTR(-EBUSY);
456 le = hci_conn_add(hdev, LE_LINK, dst); 520
521 entry = hci_find_adv_entry(hdev, dst);
522 if (!entry)
523 return ERR_PTR(-EHOSTUNREACH);
524
525 le = hci_conn_add(hdev, LE_LINK, 0, dst);
457 if (!le) 526 if (!le)
458 return ERR_PTR(-ENOMEM); 527 return ERR_PTR(-ENOMEM);
459 if (le->state == BT_OPEN) 528
460 hci_le_connect(le); 529 le->dst_type = entry->bdaddr_type;
530
531 hci_le_connect(le);
461 532
462 hci_conn_hold(le); 533 hci_conn_hold(le);
463 534
@@ -466,7 +537,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
466 537
467 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 538 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
468 if (!acl) { 539 if (!acl) {
469 acl = hci_conn_add(hdev, ACL_LINK, dst); 540 acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
470 if (!acl) 541 if (!acl)
471 return NULL; 542 return NULL;
472 } 543 }
@@ -485,7 +556,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
485 556
486 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 557 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
487 if (!sco) { 558 if (!sco) {
488 sco = hci_conn_add(hdev, type, dst); 559 sco = hci_conn_add(hdev, type, pkt_type, dst);
489 if (!sco) { 560 if (!sco) {
490 hci_conn_put(acl); 561 hci_conn_put(acl);
491 return NULL; 562 return NULL;
@@ -500,7 +571,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
500 if (acl->state == BT_CONNECTED && 571 if (acl->state == BT_CONNECTED &&
501 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 572 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
502 acl->power_save = 1; 573 acl->power_save = 1;
503 hci_conn_enter_active_mode(acl); 574 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
504 575
505 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { 576 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
506 /* defer SCO setup until mode change completed */ 577 /* defer SCO setup until mode change completed */
@@ -555,6 +626,8 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
555 cp.handle = cpu_to_le16(conn->handle); 626 cp.handle = cpu_to_le16(conn->handle);
556 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 627 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
557 sizeof(cp), &cp); 628 sizeof(cp), &cp);
629 if (conn->key_type != 0xff)
630 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
558 } 631 }
559 632
560 return 0; 633 return 0;
@@ -638,9 +711,7 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
638 if (sec_level != BT_SECURITY_HIGH) 711 if (sec_level != BT_SECURITY_HIGH)
639 return 1; /* Accept if non-secure is required */ 712 return 1; /* Accept if non-secure is required */
640 713
641 if (conn->key_type == HCI_LK_AUTH_COMBINATION || 714 if (conn->sec_level == BT_SECURITY_HIGH)
642 (conn->key_type == HCI_LK_COMBINATION &&
643 conn->pin_length == 16))
644 return 1; 715 return 1;
645 716
646 return 0; /* Reject not secure link */ 717 return 0; /* Reject not secure link */
@@ -683,7 +754,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
683EXPORT_SYMBOL(hci_conn_switch_role); 754EXPORT_SYMBOL(hci_conn_switch_role);
684 755
685/* Enter active mode */ 756/* Enter active mode */
686void hci_conn_enter_active_mode(struct hci_conn *conn) 757void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
687{ 758{
688 struct hci_dev *hdev = conn->hdev; 759 struct hci_dev *hdev = conn->hdev;
689 760
@@ -692,7 +763,10 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
692 if (test_bit(HCI_RAW, &hdev->flags)) 763 if (test_bit(HCI_RAW, &hdev->flags))
693 return; 764 return;
694 765
695 if (conn->mode != HCI_CM_SNIFF || !conn->power_save) 766 if (conn->mode != HCI_CM_SNIFF)
767 goto timer;
768
769 if (!conn->power_save && !force_active)
696 goto timer; 770 goto timer;
697 771
698 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 772 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
@@ -833,6 +907,15 @@ int hci_get_conn_list(void __user *arg)
833 (ci + n)->out = c->out; 907 (ci + n)->out = c->out;
834 (ci + n)->state = c->state; 908 (ci + n)->state = c->state;
835 (ci + n)->link_mode = c->link_mode; 909 (ci + n)->link_mode = c->link_mode;
910 if (c->type == SCO_LINK) {
911 (ci + n)->mtu = hdev->sco_mtu;
912 (ci + n)->cnt = hdev->sco_cnt;
913 (ci + n)->pkts = hdev->sco_pkts;
914 } else {
915 (ci + n)->mtu = hdev->acl_mtu;
916 (ci + n)->cnt = hdev->acl_cnt;
917 (ci + n)->pkts = hdev->acl_pkts;
918 }
836 if (++n >= req.conn_num) 919 if (++n >= req.conn_num)
837 break; 920 break;
838 } 921 }
@@ -869,6 +952,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
869 ci.out = conn->out; 952 ci.out = conn->out;
870 ci.state = conn->state; 953 ci.state = conn->state;
871 ci.link_mode = conn->link_mode; 954 ci.link_mode = conn->link_mode;
955 if (req.type == SCO_LINK) {
956 ci.mtu = hdev->sco_mtu;
957 ci.cnt = hdev->sco_cnt;
958 ci.pkts = hdev->sco_pkts;
959 } else {
960 ci.mtu = hdev->acl_mtu;
961 ci.cnt = hdev->acl_cnt;
962 ci.pkts = hdev->acl_pkts;
963 }
872 } 964 }
873 hci_dev_unlock_bh(hdev); 965 hci_dev_unlock_bh(hdev);
874 966
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cb9cb48c6e8..f38e633c754 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -42,6 +42,7 @@
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/crypto.h>
45#include <net/sock.h> 46#include <net/sock.h>
46 47
47#include <asm/system.h> 48#include <asm/system.h>
@@ -145,7 +146,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
145 146
146 switch (hdev->req_status) { 147 switch (hdev->req_status) {
147 case HCI_REQ_DONE: 148 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result); 149 err = -bt_to_errno(hdev->req_result);
149 break; 150 break;
150 151
151 case HCI_REQ_CANCELED: 152 case HCI_REQ_CANCELED:
@@ -544,7 +545,7 @@ int hci_dev_open(__u16 dev)
544 ret = __hci_request(hdev, hci_init_req, 0, 545 ret = __hci_request(hdev, hci_init_req, 0,
545 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 546 msecs_to_jiffies(HCI_INIT_TIMEOUT));
546 547
547 if (lmp_le_capable(hdev)) 548 if (lmp_host_le_capable(hdev))
548 ret = __hci_request(hdev, hci_le_init_req, 0, 549 ret = __hci_request(hdev, hci_le_init_req, 0,
549 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 550 msecs_to_jiffies(HCI_INIT_TIMEOUT));
550 551
@@ -1061,6 +1062,42 @@ static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1061 return 0; 1062 return 0;
1062} 1063}
1063 1064
1065struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1066{
1067 struct link_key *k;
1068
1069 list_for_each_entry(k, &hdev->link_keys, list) {
1070 struct key_master_id *id;
1071
1072 if (k->type != HCI_LK_SMP_LTK)
1073 continue;
1074
1075 if (k->dlen != sizeof(*id))
1076 continue;
1077
1078 id = (void *) &k->data;
1079 if (id->ediv == ediv &&
1080 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1081 return k;
1082 }
1083
1084 return NULL;
1085}
1086EXPORT_SYMBOL(hci_find_ltk);
1087
1088struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1089 bdaddr_t *bdaddr, u8 type)
1090{
1091 struct link_key *k;
1092
1093 list_for_each_entry(k, &hdev->link_keys, list)
1094 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1095 return k;
1096
1097 return NULL;
1098}
1099EXPORT_SYMBOL(hci_find_link_key_type);
1100
1064int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1101int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1065 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1102 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1066{ 1103{
@@ -1116,6 +1153,44 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1116 return 0; 1153 return 0;
1117} 1154}
1118 1155
1156int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1157 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1158{
1159 struct link_key *key, *old_key;
1160 struct key_master_id *id;
1161 u8 old_key_type;
1162
1163 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1164
1165 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1166 if (old_key) {
1167 key = old_key;
1168 old_key_type = old_key->type;
1169 } else {
1170 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1171 if (!key)
1172 return -ENOMEM;
1173 list_add(&key->list, &hdev->link_keys);
1174 old_key_type = 0xff;
1175 }
1176
1177 key->dlen = sizeof(*id);
1178
1179 bacpy(&key->bdaddr, bdaddr);
1180 memcpy(key->val, ltk, sizeof(key->val));
1181 key->type = HCI_LK_SMP_LTK;
1182 key->pin_len = key_size;
1183
1184 id = (void *) &key->data;
1185 id->ediv = ediv;
1186 memcpy(id->rand, rand, sizeof(id->rand));
1187
1188 if (new_key)
1189 mgmt_new_key(hdev->id, key, old_key_type);
1190
1191 return 0;
1192}
1193
1119int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1194int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1120{ 1195{
1121 struct link_key *key; 1196 struct link_key *key;
@@ -1139,7 +1214,6 @@ static void hci_cmd_timer(unsigned long arg)
1139 1214
1140 BT_ERR("%s command tx timeout", hdev->name); 1215 BT_ERR("%s command tx timeout", hdev->name);
1141 atomic_set(&hdev->cmd_cnt, 1); 1216 atomic_set(&hdev->cmd_cnt, 1);
1142 clear_bit(HCI_RESET, &hdev->flags);
1143 tasklet_schedule(&hdev->cmd_task); 1217 tasklet_schedule(&hdev->cmd_task);
1144} 1218}
1145 1219
@@ -1207,6 +1281,169 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1207 return 0; 1281 return 0;
1208} 1282}
1209 1283
1284struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1285 bdaddr_t *bdaddr)
1286{
1287 struct list_head *p;
1288
1289 list_for_each(p, &hdev->blacklist) {
1290 struct bdaddr_list *b;
1291
1292 b = list_entry(p, struct bdaddr_list, list);
1293
1294 if (bacmp(bdaddr, &b->bdaddr) == 0)
1295 return b;
1296 }
1297
1298 return NULL;
1299}
1300
1301int hci_blacklist_clear(struct hci_dev *hdev)
1302{
1303 struct list_head *p, *n;
1304
1305 list_for_each_safe(p, n, &hdev->blacklist) {
1306 struct bdaddr_list *b;
1307
1308 b = list_entry(p, struct bdaddr_list, list);
1309
1310 list_del(p);
1311 kfree(b);
1312 }
1313
1314 return 0;
1315}
1316
1317int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1318{
1319 struct bdaddr_list *entry;
1320 int err;
1321
1322 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1323 return -EBADF;
1324
1325 hci_dev_lock_bh(hdev);
1326
1327 if (hci_blacklist_lookup(hdev, bdaddr)) {
1328 err = -EEXIST;
1329 goto err;
1330 }
1331
1332 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1333 if (!entry) {
1334 return -ENOMEM;
1335 goto err;
1336 }
1337
1338 bacpy(&entry->bdaddr, bdaddr);
1339
1340 list_add(&entry->list, &hdev->blacklist);
1341
1342 err = 0;
1343
1344err:
1345 hci_dev_unlock_bh(hdev);
1346 return err;
1347}
1348
1349int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351 struct bdaddr_list *entry;
1352 int err = 0;
1353
1354 hci_dev_lock_bh(hdev);
1355
1356 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1357 hci_blacklist_clear(hdev);
1358 goto done;
1359 }
1360
1361 entry = hci_blacklist_lookup(hdev, bdaddr);
1362 if (!entry) {
1363 err = -ENOENT;
1364 goto done;
1365 }
1366
1367 list_del(&entry->list);
1368 kfree(entry);
1369
1370done:
1371 hci_dev_unlock_bh(hdev);
1372 return err;
1373}
1374
1375static void hci_clear_adv_cache(unsigned long arg)
1376{
1377 struct hci_dev *hdev = (void *) arg;
1378
1379 hci_dev_lock(hdev);
1380
1381 hci_adv_entries_clear(hdev);
1382
1383 hci_dev_unlock(hdev);
1384}
1385
1386int hci_adv_entries_clear(struct hci_dev *hdev)
1387{
1388 struct adv_entry *entry, *tmp;
1389
1390 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1391 list_del(&entry->list);
1392 kfree(entry);
1393 }
1394
1395 BT_DBG("%s adv cache cleared", hdev->name);
1396
1397 return 0;
1398}
1399
1400struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1401{
1402 struct adv_entry *entry;
1403
1404 list_for_each_entry(entry, &hdev->adv_entries, list)
1405 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1406 return entry;
1407
1408 return NULL;
1409}
1410
1411static inline int is_connectable_adv(u8 evt_type)
1412{
1413 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1414 return 1;
1415
1416 return 0;
1417}
1418
1419int hci_add_adv_entry(struct hci_dev *hdev,
1420 struct hci_ev_le_advertising_info *ev)
1421{
1422 struct adv_entry *entry;
1423
1424 if (!is_connectable_adv(ev->evt_type))
1425 return -EINVAL;
1426
1427 /* Only new entries should be added to adv_entries. So, if
1428 * bdaddr was found, don't add it. */
1429 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1430 return 0;
1431
1432 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1433 if (!entry)
1434 return -ENOMEM;
1435
1436 bacpy(&entry->bdaddr, &ev->bdaddr);
1437 entry->bdaddr_type = ev->bdaddr_type;
1438
1439 list_add(&entry->list, &hdev->adv_entries);
1440
1441 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1442 batostr(&entry->bdaddr), entry->bdaddr_type);
1443
1444 return 0;
1445}
1446
1210/* Register HCI device */ 1447/* Register HCI device */
1211int hci_register_dev(struct hci_dev *hdev) 1448int hci_register_dev(struct hci_dev *hdev)
1212{ 1449{
@@ -1273,6 +1510,10 @@ int hci_register_dev(struct hci_dev *hdev)
1273 1510
1274 INIT_LIST_HEAD(&hdev->remote_oob_data); 1511 INIT_LIST_HEAD(&hdev->remote_oob_data);
1275 1512
1513 INIT_LIST_HEAD(&hdev->adv_entries);
1514 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1515 (unsigned long) hdev);
1516
1276 INIT_WORK(&hdev->power_on, hci_power_on); 1517 INIT_WORK(&hdev->power_on, hci_power_on);
1277 INIT_WORK(&hdev->power_off, hci_power_off); 1518 INIT_WORK(&hdev->power_off, hci_power_off);
1278 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1519 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1287,6 +1528,11 @@ int hci_register_dev(struct hci_dev *hdev)
1287 if (!hdev->workqueue) 1528 if (!hdev->workqueue)
1288 goto nomem; 1529 goto nomem;
1289 1530
1531 hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1532 if (IS_ERR(hdev->tfm))
1533 BT_INFO("Failed to load transform for ecb(aes): %ld",
1534 PTR_ERR(hdev->tfm));
1535
1290 hci_register_sysfs(hdev); 1536 hci_register_sysfs(hdev);
1291 1537
1292 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1538 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1337,6 +1583,9 @@ int hci_unregister_dev(struct hci_dev *hdev)
1337 !test_bit(HCI_SETUP, &hdev->flags)) 1583 !test_bit(HCI_SETUP, &hdev->flags))
1338 mgmt_index_removed(hdev->id); 1584 mgmt_index_removed(hdev->id);
1339 1585
1586 if (!IS_ERR(hdev->tfm))
1587 crypto_free_blkcipher(hdev->tfm);
1588
1340 hci_notify(hdev, HCI_DEV_UNREG); 1589 hci_notify(hdev, HCI_DEV_UNREG);
1341 1590
1342 if (hdev->rfkill) { 1591 if (hdev->rfkill) {
@@ -1347,6 +1596,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1347 hci_unregister_sysfs(hdev); 1596 hci_unregister_sysfs(hdev);
1348 1597
1349 hci_del_off_timer(hdev); 1598 hci_del_off_timer(hdev);
1599 del_timer(&hdev->adv_timer);
1350 1600
1351 destroy_workqueue(hdev->workqueue); 1601 destroy_workqueue(hdev->workqueue);
1352 1602
@@ -1355,6 +1605,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1355 hci_uuids_clear(hdev); 1605 hci_uuids_clear(hdev);
1356 hci_link_keys_clear(hdev); 1606 hci_link_keys_clear(hdev);
1357 hci_remote_oob_data_clear(hdev); 1607 hci_remote_oob_data_clear(hdev);
1608 hci_adv_entries_clear(hdev);
1358 hci_dev_unlock_bh(hdev); 1609 hci_dev_unlock_bh(hdev);
1359 1610
1360 __hci_dev_put(hdev); 1611 __hci_dev_put(hdev);
@@ -1898,7 +2149,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1898 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2149 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1899 BT_DBG("skb %p len %d", skb, skb->len); 2150 BT_DBG("skb %p len %d", skb, skb->len);
1900 2151
1901 hci_conn_enter_active_mode(conn); 2152 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
1902 2153
1903 hci_send_frame(skb); 2154 hci_send_frame(skb);
1904 hdev->acl_last_tx = jiffies; 2155 hdev->acl_last_tx = jiffies;
@@ -2037,7 +2288,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2037 if (conn) { 2288 if (conn) {
2038 register struct hci_proto *hp; 2289 register struct hci_proto *hp;
2039 2290
2040 hci_conn_enter_active_mode(conn); 2291 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2041 2292
2042 /* Send to upper protocol */ 2293 /* Send to upper protocol */
2043 hp = hci_proto[HCI_PROTO_L2CAP]; 2294 hp = hci_proto[HCI_PROTO_L2CAP];
@@ -2163,7 +2414,10 @@ static void hci_cmd_task(unsigned long arg)
2163 if (hdev->sent_cmd) { 2414 if (hdev->sent_cmd) {
2164 atomic_dec(&hdev->cmd_cnt); 2415 atomic_dec(&hdev->cmd_cnt);
2165 hci_send_frame(skb); 2416 hci_send_frame(skb);
2166 mod_timer(&hdev->cmd_timer, 2417 if (test_bit(HCI_RESET, &hdev->flags))
2418 del_timer(&hdev->cmd_timer);
2419 else
2420 mod_timer(&hdev->cmd_timer,
2167 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2421 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2168 } else { 2422 } else {
2169 skb_queue_head(&hdev->cmd_q, skb); 2423 skb_queue_head(&hdev->cmd_q, skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 01aa7e73d9b..5a7074a7b5b 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,6 +45,8 @@
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static int enable_le;
49
48/* Handle HCI Event packets */ 50/* Handle HCI Event packets */
49 51
50static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -525,6 +527,20 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
525 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
526} 528}
527 529
530static void hci_set_le_support(struct hci_dev *hdev)
531{
532 struct hci_cp_write_le_host_supported cp;
533
534 memset(&cp, 0, sizeof(cp));
535
536 if (enable_le) {
537 cp.le = 1;
538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
539 }
540
541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
542}
543
528static void hci_setup(struct hci_dev *hdev) 544static void hci_setup(struct hci_dev *hdev)
529{ 545{
530 hci_setup_event_mask(hdev); 546 hci_setup_event_mask(hdev);
@@ -542,6 +558,17 @@ static void hci_setup(struct hci_dev *hdev)
542 558
543 if (hdev->features[7] & LMP_INQ_TX_PWR) 559 if (hdev->features[7] & LMP_INQ_TX_PWR)
544 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
561
562 if (hdev->features[7] & LMP_EXTFEATURES) {
563 struct hci_cp_read_local_ext_features cp;
564
565 cp.page = 0x01;
566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
567 sizeof(cp), &cp);
568 }
569
570 if (hdev->features[4] & LMP_LE)
571 hci_set_le_support(hdev);
545} 572}
546 573
547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 574static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -658,6 +685,21 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
658 hdev->features[6], hdev->features[7]); 685 hdev->features[6], hdev->features[7]);
659} 686}
660 687
688static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
689 struct sk_buff *skb)
690{
691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
692
693 BT_DBG("%s status 0x%x", hdev->name, rp->status);
694
695 if (rp->status)
696 return;
697
698 memcpy(hdev->extfeatures, rp->features, 8);
699
700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
701}
702
661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 703static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
662{ 704{
663 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 705 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -841,6 +883,72 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
841 rp->randomizer, rp->status); 883 rp->randomizer, rp->status);
842} 884}
843 885
886static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
887 struct sk_buff *skb)
888{
889 struct hci_cp_le_set_scan_enable *cp;
890 __u8 status = *((__u8 *) skb->data);
891
892 BT_DBG("%s status 0x%x", hdev->name, status);
893
894 if (status)
895 return;
896
897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
898 if (!cp)
899 return;
900
901 hci_dev_lock(hdev);
902
903 if (cp->enable == 0x01) {
904 del_timer(&hdev->adv_timer);
905 hci_adv_entries_clear(hdev);
906 } else if (cp->enable == 0x00) {
907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
908 }
909
910 hci_dev_unlock(hdev);
911}
912
913static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
914{
915 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
916
917 BT_DBG("%s status 0x%x", hdev->name, rp->status);
918
919 if (rp->status)
920 return;
921
922 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
923}
924
925static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
926{
927 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
928
929 BT_DBG("%s status 0x%x", hdev->name, rp->status);
930
931 if (rp->status)
932 return;
933
934 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
935}
936
937static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
938 struct sk_buff *skb)
939{
940 struct hci_cp_read_local_ext_features cp;
941 __u8 status = *((__u8 *) skb->data);
942
943 BT_DBG("%s status 0x%x", hdev->name, status);
944
945 if (status)
946 return;
947
948 cp.page = 0x01;
949 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
950}
951
844static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 952static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
845{ 953{
846 BT_DBG("%s status 0x%x", hdev->name, status); 954 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -884,7 +992,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
884 } 992 }
885 } else { 993 } else {
886 if (!conn) { 994 if (!conn) {
887 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 995 conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
888 if (conn) { 996 if (conn) {
889 conn->out = 1; 997 conn->out = 1;
890 conn->link_mode |= HCI_LM_MASTER; 998 conn->link_mode |= HCI_LM_MASTER;
@@ -1207,17 +1315,24 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1207 } 1315 }
1208 } else { 1316 } else {
1209 if (!conn) { 1317 if (!conn) {
1210 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1318 conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
1211 if (conn) 1319 if (conn) {
1320 conn->dst_type = cp->peer_addr_type;
1212 conn->out = 1; 1321 conn->out = 1;
1213 else 1322 } else {
1214 BT_ERR("No memory for new connection"); 1323 BT_ERR("No memory for new connection");
1324 }
1215 } 1325 }
1216 } 1326 }
1217 1327
1218 hci_dev_unlock(hdev); 1328 hci_dev_unlock(hdev);
1219} 1329}
1220 1330
1331static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1332{
1333 BT_DBG("%s status 0x%x", hdev->name, status);
1334}
1335
1221static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1336static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1222{ 1337{
1223 __u8 status = *((__u8 *) skb->data); 1338 __u8 status = *((__u8 *) skb->data);
@@ -1347,6 +1462,15 @@ unlock:
1347 hci_conn_check_pending(hdev); 1462 hci_conn_check_pending(hdev);
1348} 1463}
1349 1464
1465static inline bool is_sco_active(struct hci_dev *hdev)
1466{
1467 if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
1468 (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
1469 BT_CONNECTED)))
1470 return true;
1471 return false;
1472}
1473
1350static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1474static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1351{ 1475{
1352 struct hci_ev_conn_request *ev = (void *) skb->data; 1476 struct hci_ev_conn_request *ev = (void *) skb->data;
@@ -1371,7 +1495,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1371 1495
1372 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1496 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1373 if (!conn) { 1497 if (!conn) {
1374 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1498 /* pkt_type not yet used for incoming connections */
1499 conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
1375 if (!conn) { 1500 if (!conn) {
1376 BT_ERR("No memory for new connection"); 1501 BT_ERR("No memory for new connection");
1377 hci_dev_unlock(hdev); 1502 hci_dev_unlock(hdev);
@@ -1389,7 +1514,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1389 1514
1390 bacpy(&cp.bdaddr, &ev->bdaddr); 1515 bacpy(&cp.bdaddr, &ev->bdaddr);
1391 1516
1392 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1517 if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
1518 || is_sco_active(hdev)))
1393 cp.role = 0x00; /* Become master */ 1519 cp.role = 0x00; /* Become master */
1394 else 1520 else
1395 cp.role = 0x01; /* Remain slave */ 1521 cp.role = 0x01; /* Remain slave */
@@ -1461,51 +1587,58 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1461 hci_dev_lock(hdev); 1587 hci_dev_lock(hdev);
1462 1588
1463 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1589 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1464 if (conn) { 1590 if (!conn)
1465 if (!ev->status) { 1591 goto unlock;
1592
1593 if (!ev->status) {
1594 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1595 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1596 BT_INFO("re-auth of legacy device is not possible.");
1597 } else {
1466 conn->link_mode |= HCI_LM_AUTH; 1598 conn->link_mode |= HCI_LM_AUTH;
1467 conn->sec_level = conn->pending_sec_level; 1599 conn->sec_level = conn->pending_sec_level;
1468 } else {
1469 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1470 } 1600 }
1601 } else {
1602 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1603 }
1471 1604
1472 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1605 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1606 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1473 1607
1474 if (conn->state == BT_CONFIG) { 1608 if (conn->state == BT_CONFIG) {
1475 if (!ev->status && hdev->ssp_mode > 0 && 1609 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1476 conn->ssp_mode > 0) { 1610 struct hci_cp_set_conn_encrypt cp;
1477 struct hci_cp_set_conn_encrypt cp; 1611 cp.handle = ev->handle;
1478 cp.handle = ev->handle; 1612 cp.encrypt = 0x01;
1479 cp.encrypt = 0x01; 1613 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1480 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, 1614 &cp);
1481 sizeof(cp), &cp);
1482 } else {
1483 conn->state = BT_CONNECTED;
1484 hci_proto_connect_cfm(conn, ev->status);
1485 hci_conn_put(conn);
1486 }
1487 } else { 1615 } else {
1488 hci_auth_cfm(conn, ev->status); 1616 conn->state = BT_CONNECTED;
1489 1617 hci_proto_connect_cfm(conn, ev->status);
1490 hci_conn_hold(conn);
1491 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1492 hci_conn_put(conn); 1618 hci_conn_put(conn);
1493 } 1619 }
1620 } else {
1621 hci_auth_cfm(conn, ev->status);
1494 1622
1495 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1623 hci_conn_hold(conn);
1496 if (!ev->status) { 1624 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1497 struct hci_cp_set_conn_encrypt cp; 1625 hci_conn_put(conn);
1498 cp.handle = ev->handle; 1626 }
1499 cp.encrypt = 0x01; 1627
1500 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, 1628 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1501 sizeof(cp), &cp); 1629 if (!ev->status) {
1502 } else { 1630 struct hci_cp_set_conn_encrypt cp;
1503 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1631 cp.handle = ev->handle;
1504 hci_encrypt_cfm(conn, ev->status, 0x00); 1632 cp.encrypt = 0x01;
1505 } 1633 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1634 &cp);
1635 } else {
1636 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1637 hci_encrypt_cfm(conn, ev->status, 0x00);
1506 } 1638 }
1507 } 1639 }
1508 1640
1641unlock:
1509 hci_dev_unlock(hdev); 1642 hci_dev_unlock(hdev);
1510} 1643}
1511 1644
@@ -1556,6 +1689,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1556 /* Encryption implies authentication */ 1689 /* Encryption implies authentication */
1557 conn->link_mode |= HCI_LM_AUTH; 1690 conn->link_mode |= HCI_LM_AUTH;
1558 conn->link_mode |= HCI_LM_ENCRYPT; 1691 conn->link_mode |= HCI_LM_ENCRYPT;
1692 conn->sec_level = conn->pending_sec_level;
1559 } else 1693 } else
1560 conn->link_mode &= ~HCI_LM_ENCRYPT; 1694 conn->link_mode &= ~HCI_LM_ENCRYPT;
1561 } 1695 }
@@ -1759,6 +1893,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1759 hci_cc_read_local_features(hdev, skb); 1893 hci_cc_read_local_features(hdev, skb);
1760 break; 1894 break;
1761 1895
1896 case HCI_OP_READ_LOCAL_EXT_FEATURES:
1897 hci_cc_read_local_ext_features(hdev, skb);
1898 break;
1899
1762 case HCI_OP_READ_BUFFER_SIZE: 1900 case HCI_OP_READ_BUFFER_SIZE:
1763 hci_cc_read_buffer_size(hdev, skb); 1901 hci_cc_read_buffer_size(hdev, skb);
1764 break; 1902 break;
@@ -1815,6 +1953,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1815 hci_cc_user_confirm_neg_reply(hdev, skb); 1953 hci_cc_user_confirm_neg_reply(hdev, skb);
1816 break; 1954 break;
1817 1955
1956 case HCI_OP_LE_SET_SCAN_ENABLE:
1957 hci_cc_le_set_scan_enable(hdev, skb);
1958 break;
1959
1960 case HCI_OP_LE_LTK_REPLY:
1961 hci_cc_le_ltk_reply(hdev, skb);
1962 break;
1963
1964 case HCI_OP_LE_LTK_NEG_REPLY:
1965 hci_cc_le_ltk_neg_reply(hdev, skb);
1966 break;
1967
1968 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
1969 hci_cc_write_le_host_supported(hdev, skb);
1970 break;
1971
1818 default: 1972 default:
1819 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1973 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1820 break; 1974 break;
@@ -1893,6 +2047,10 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1893 hci_cs_le_create_conn(hdev, ev->status); 2047 hci_cs_le_create_conn(hdev, ev->status);
1894 break; 2048 break;
1895 2049
2050 case HCI_OP_LE_START_ENC:
2051 hci_cs_le_start_enc(hdev, ev->status);
2052 break;
2053
1896 default: 2054 default:
1897 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2055 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1898 break; 2056 break;
@@ -2332,6 +2490,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
2332 hci_conn_add_sysfs(conn); 2490 hci_conn_add_sysfs(conn);
2333 break; 2491 break;
2334 2492
2493 case 0x10: /* Connection Accept Timeout */
2335 case 0x11: /* Unsupported Feature or Parameter Value */ 2494 case 0x11: /* Unsupported Feature or Parameter Value */
2336 case 0x1c: /* SCO interval rejected */ 2495 case 0x1c: /* SCO interval rejected */
2337 case 0x1a: /* Unsupported Remote Feature */ 2496 case 0x1a: /* Unsupported Remote Feature */
@@ -2651,12 +2810,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2651 2810
2652 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 2811 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2653 if (!conn) { 2812 if (!conn) {
2654 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 2813 conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
2655 if (!conn) { 2814 if (!conn) {
2656 BT_ERR("No memory for new connection"); 2815 BT_ERR("No memory for new connection");
2657 hci_dev_unlock(hdev); 2816 hci_dev_unlock(hdev);
2658 return; 2817 return;
2659 } 2818 }
2819
2820 conn->dst_type = ev->bdaddr_type;
2660 } 2821 }
2661 2822
2662 if (ev->status) { 2823 if (ev->status) {
@@ -2669,6 +2830,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2669 2830
2670 mgmt_connected(hdev->id, &ev->bdaddr); 2831 mgmt_connected(hdev->id, &ev->bdaddr);
2671 2832
2833 conn->sec_level = BT_SECURITY_LOW;
2672 conn->handle = __le16_to_cpu(ev->handle); 2834 conn->handle = __le16_to_cpu(ev->handle);
2673 conn->state = BT_CONNECTED; 2835 conn->state = BT_CONNECTED;
2674 2836
@@ -2681,6 +2843,64 @@ unlock:
2681 hci_dev_unlock(hdev); 2843 hci_dev_unlock(hdev);
2682} 2844}
2683 2845
2846static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2847 struct sk_buff *skb)
2848{
2849 struct hci_ev_le_advertising_info *ev;
2850 u8 num_reports;
2851
2852 num_reports = skb->data[0];
2853 ev = (void *) &skb->data[1];
2854
2855 hci_dev_lock(hdev);
2856
2857 hci_add_adv_entry(hdev, ev);
2858
2859 while (--num_reports) {
2860 ev = (void *) (ev->data + ev->length + 1);
2861 hci_add_adv_entry(hdev, ev);
2862 }
2863
2864 hci_dev_unlock(hdev);
2865}
2866
2867static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
2868 struct sk_buff *skb)
2869{
2870 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
2871 struct hci_cp_le_ltk_reply cp;
2872 struct hci_cp_le_ltk_neg_reply neg;
2873 struct hci_conn *conn;
2874 struct link_key *ltk;
2875
2876 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
2877
2878 hci_dev_lock(hdev);
2879
2880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2881 if (conn == NULL)
2882 goto not_found;
2883
2884 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
2885 if (ltk == NULL)
2886 goto not_found;
2887
2888 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
2889 cp.handle = cpu_to_le16(conn->handle);
2890 conn->pin_length = ltk->pin_len;
2891
2892 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
2893
2894 hci_dev_unlock(hdev);
2895
2896 return;
2897
2898not_found:
2899 neg.handle = ev->handle;
2900 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
2901 hci_dev_unlock(hdev);
2902}
2903
2684static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 2904static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2685{ 2905{
2686 struct hci_ev_le_meta *le_ev = (void *) skb->data; 2906 struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -2692,6 +2912,14 @@ static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2692 hci_le_conn_complete_evt(hdev, skb); 2912 hci_le_conn_complete_evt(hdev, skb);
2693 break; 2913 break;
2694 2914
2915 case HCI_EV_LE_ADVERTISING_REPORT:
2916 hci_le_adv_report_evt(hdev, skb);
2917 break;
2918
2919 case HCI_EV_LE_LTK_REQ:
2920 hci_le_ltk_request_evt(hdev, skb);
2921 break;
2922
2695 default: 2923 default:
2696 break; 2924 break;
2697 } 2925 }
@@ -2885,3 +3113,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2885 hci_send_to_sock(hdev, skb, NULL); 3113 hci_send_to_sock(hdev, skb, NULL);
2886 kfree_skb(skb); 3114 kfree_skb(skb);
2887} 3115}
3116
3117module_param(enable_le, bool, 0444);
3118MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 295e4a88fff..ff02cf5e77c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -180,82 +180,24 @@ static int hci_sock_release(struct socket *sock)
180 return 0; 180 return 0;
181} 181}
182 182
183struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
184{
185 struct list_head *p;
186
187 list_for_each(p, &hdev->blacklist) {
188 struct bdaddr_list *b;
189
190 b = list_entry(p, struct bdaddr_list, list);
191
192 if (bacmp(bdaddr, &b->bdaddr) == 0)
193 return b;
194 }
195
196 return NULL;
197}
198
199static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
200{ 184{
201 bdaddr_t bdaddr; 185 bdaddr_t bdaddr;
202 struct bdaddr_list *entry;
203 186
204 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 187 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
205 return -EFAULT; 188 return -EFAULT;
206 189
207 if (bacmp(&bdaddr, BDADDR_ANY) == 0) 190 return hci_blacklist_add(hdev, &bdaddr);
208 return -EBADF;
209
210 if (hci_blacklist_lookup(hdev, &bdaddr))
211 return -EEXIST;
212
213 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
214 if (!entry)
215 return -ENOMEM;
216
217 bacpy(&entry->bdaddr, &bdaddr);
218
219 list_add(&entry->list, &hdev->blacklist);
220
221 return 0;
222}
223
224int hci_blacklist_clear(struct hci_dev *hdev)
225{
226 struct list_head *p, *n;
227
228 list_for_each_safe(p, n, &hdev->blacklist) {
229 struct bdaddr_list *b;
230
231 b = list_entry(p, struct bdaddr_list, list);
232
233 list_del(p);
234 kfree(b);
235 }
236
237 return 0;
238} 191}
239 192
240static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg) 193static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
241{ 194{
242 bdaddr_t bdaddr; 195 bdaddr_t bdaddr;
243 struct bdaddr_list *entry;
244 196
245 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 197 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
246 return -EFAULT; 198 return -EFAULT;
247 199
248 if (bacmp(&bdaddr, BDADDR_ANY) == 0) 200 return hci_blacklist_del(hdev, &bdaddr);
249 return hci_blacklist_clear(hdev);
250
251 entry = hci_blacklist_lookup(hdev, &bdaddr);
252 if (!entry)
253 return -ENOENT;
254
255 list_del(&entry->list);
256 kfree(entry);
257
258 return 0;
259} 201}
260 202
261/* Ioctls that require bound socket */ 203/* Ioctls that require bound socket */
@@ -290,12 +232,12 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
290 case HCIBLOCKADDR: 232 case HCIBLOCKADDR:
291 if (!capable(CAP_NET_ADMIN)) 233 if (!capable(CAP_NET_ADMIN))
292 return -EACCES; 234 return -EACCES;
293 return hci_blacklist_add(hdev, (void __user *) arg); 235 return hci_sock_blacklist_add(hdev, (void __user *) arg);
294 236
295 case HCIUNBLOCKADDR: 237 case HCIUNBLOCKADDR:
296 if (!capable(CAP_NET_ADMIN)) 238 if (!capable(CAP_NET_ADMIN))
297 return -EACCES; 239 return -EACCES;
298 return hci_blacklist_del(hdev, (void __user *) arg); 240 return hci_sock_blacklist_del(hdev, (void __user *) arg);
299 241
300 default: 242 default:
301 if (hdev->ioctl) 243 if (hdev->ioctl)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 43b4c2deb7c..fb68f344c34 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -764,6 +764,7 @@ static int hidp_session(void *arg)
764 764
765 up_write(&hidp_session_sem); 765 up_write(&hidp_session_sem);
766 766
767 kfree(session->rd_data);
767 kfree(session); 768 kfree(session);
768 return 0; 769 return 0;
769} 770}
@@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session,
841 842
842 err = input_register_device(input); 843 err = input_register_device(input);
843 if (err < 0) { 844 if (err < 0) {
844 hci_conn_put_device(session->conn); 845 input_free_device(input);
846 session->input = NULL;
845 return err; 847 return err;
846 } 848 }
847 849
@@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1044 } 1046 }
1045 1047
1046 err = hid_add_device(session->hid); 1048 err = hid_add_device(session->hid);
1047 if (err < 0) 1049 if (err < 0) {
1048 goto err_add_device; 1050 atomic_inc(&session->terminate);
1051 wake_up_process(session->task);
1052 up_write(&hidp_session_sem);
1053 return err;
1054 }
1049 1055
1050 if (session->input) { 1056 if (session->input) {
1051 hidp_send_ctrl_message(session, 1057 hidp_send_ctrl_message(session,
@@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1059 up_write(&hidp_session_sem); 1065 up_write(&hidp_session_sem);
1060 return 0; 1066 return 0;
1061 1067
1062err_add_device:
1063 hid_destroy_device(session->hid);
1064 session->hid = NULL;
1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1067
1068unlink: 1068unlink:
1069 hidp_del_timer(session); 1069 hidp_del_timer(session);
1070 1070
@@ -1090,7 +1090,6 @@ purge:
1090failed: 1090failed:
1091 up_write(&hidp_session_sem); 1091 up_write(&hidp_session_sem);
1092 1092
1093 input_free_device(session->input);
1094 kfree(session); 1093 kfree(session);
1095 return err; 1094 return err;
1096} 1095}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 7705e26e699..5a0ce738751 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -54,26 +54,39 @@
54#include <net/bluetooth/bluetooth.h> 54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
57 58
58int disable_ertm; 59int disable_ertm;
59 60
60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61static u8 l2cap_fixed_chan[8] = { 0x02, }; 62static u8 l2cap_fixed_chan[8] = { 0x02, };
62 63
63static struct workqueue_struct *_busy_wq; 64static LIST_HEAD(chan_list);
64 65static DEFINE_RWLOCK(chan_list_lock);
65LIST_HEAD(chan_list);
66DEFINE_RWLOCK(chan_list_lock);
67
68static void l2cap_busy_work(struct work_struct *work);
69 66
70static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 67static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data); 68 u8 code, u8 ident, u16 dlen, void *data);
69static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
73 74
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 76
76/* ---- L2CAP channels ---- */ 77/* ---- L2CAP channels ---- */
78
79static inline void chan_hold(struct l2cap_chan *c)
80{
81 atomic_inc(&c->refcnt);
82}
83
84static inline void chan_put(struct l2cap_chan *c)
85{
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88}
89
77static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{ 91{
79 struct l2cap_chan *c; 92 struct l2cap_chan *c;
@@ -204,6 +217,62 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 return 0; 217 return 0;
205} 218}
206 219
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226}
227
228static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229{
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234}
235
236static void l2cap_state_change(struct l2cap_chan *chan, int state)
237{
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240}
241
242static void l2cap_chan_timeout(unsigned long arg)
243{
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274}
275
207struct l2cap_chan *l2cap_chan_create(struct sock *sk) 276struct l2cap_chan *l2cap_chan_create(struct sock *sk)
208{ 277{
209 struct l2cap_chan *chan; 278 struct l2cap_chan *chan;
@@ -218,6 +287,12 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
218 list_add(&chan->global_l, &chan_list); 287 list_add(&chan->global_l, &chan_list);
219 write_unlock_bh(&chan_list_lock); 288 write_unlock_bh(&chan_list_lock);
220 289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
221 return chan; 296 return chan;
222} 297}
223 298
@@ -227,13 +302,11 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
227 list_del(&chan->global_l); 302 list_del(&chan->global_l);
228 write_unlock_bh(&chan_list_lock); 303 write_unlock_bh(&chan_list_lock);
229 304
230 kfree(chan); 305 chan_put(chan);
231} 306}
232 307
233static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 308static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
234{ 309{
235 struct sock *sk = chan->sk;
236
237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238 chan->psm, chan->dcid); 311 chan->psm, chan->dcid);
239 312
@@ -241,7 +314,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
241 314
242 chan->conn = conn; 315 chan->conn = conn;
243 316
244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
245 if (conn->hcon->type == LE_LINK) { 318 if (conn->hcon->type == LE_LINK) {
246 /* LE connection */ 319 /* LE connection */
247 chan->omtu = L2CAP_LE_DEFAULT_MTU; 320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -252,7 +325,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
252 chan->scid = l2cap_alloc_cid(conn); 325 chan->scid = l2cap_alloc_cid(conn);
253 chan->omtu = L2CAP_DEFAULT_MTU; 326 chan->omtu = L2CAP_DEFAULT_MTU;
254 } 327 }
255 } else if (sk->sk_type == SOCK_DGRAM) { 328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
256 /* Connectionless socket */ 329 /* Connectionless socket */
257 chan->scid = L2CAP_CID_CONN_LESS; 330 chan->scid = L2CAP_CID_CONN_LESS;
258 chan->dcid = L2CAP_CID_CONN_LESS; 331 chan->dcid = L2CAP_CID_CONN_LESS;
@@ -264,20 +337,20 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
264 chan->omtu = L2CAP_DEFAULT_MTU; 337 chan->omtu = L2CAP_DEFAULT_MTU;
265 } 338 }
266 339
267 sock_hold(sk); 340 chan_hold(chan);
268 341
269 list_add(&chan->list, &conn->chan_l); 342 list_add(&chan->list, &conn->chan_l);
270} 343}
271 344
272/* Delete channel. 345/* Delete channel.
273 * Must be called on the locked socket. */ 346 * Must be called on the locked socket. */
274void l2cap_chan_del(struct l2cap_chan *chan, int err) 347static void l2cap_chan_del(struct l2cap_chan *chan, int err)
275{ 348{
276 struct sock *sk = chan->sk; 349 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn; 350 struct l2cap_conn *conn = chan->conn;
278 struct sock *parent = bt_sk(sk)->parent; 351 struct sock *parent = bt_sk(sk)->parent;
279 352
280 l2cap_sock_clear_timer(sk); 353 __clear_chan_timer(chan);
281 354
282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err); 355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
283 356
@@ -286,13 +359,13 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
286 write_lock_bh(&conn->chan_lock); 359 write_lock_bh(&conn->chan_lock);
287 list_del(&chan->list); 360 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock); 361 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk); 362 chan_put(chan);
290 363
291 chan->conn = NULL; 364 chan->conn = NULL;
292 hci_conn_put(conn->hcon); 365 hci_conn_put(conn->hcon);
293 } 366 }
294 367
295 sk->sk_state = BT_CLOSED; 368 l2cap_state_change(chan, BT_CLOSED);
296 sock_set_flag(sk, SOCK_ZAPPED); 369 sock_set_flag(sk, SOCK_ZAPPED);
297 370
298 if (err) 371 if (err)
@@ -304,8 +377,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
304 } else 377 } else
305 sk->sk_state_change(sk); 378 sk->sk_state_change(sk);
306 379
307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE && 380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE)) 381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
309 return; 382 return;
310 383
311 skb_queue_purge(&chan->tx_q); 384 skb_queue_purge(&chan->tx_q);
@@ -313,12 +386,11 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
313 if (chan->mode == L2CAP_MODE_ERTM) { 386 if (chan->mode == L2CAP_MODE_ERTM) {
314 struct srej_list *l, *tmp; 387 struct srej_list *l, *tmp;
315 388
316 del_timer(&chan->retrans_timer); 389 __clear_retrans_timer(chan);
317 del_timer(&chan->monitor_timer); 390 __clear_monitor_timer(chan);
318 del_timer(&chan->ack_timer); 391 __clear_ack_timer(chan);
319 392
320 skb_queue_purge(&chan->srej_q); 393 skb_queue_purge(&chan->srej_q);
321 skb_queue_purge(&chan->busy_q);
322 394
323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324 list_del(&l->list); 396 list_del(&l->list);
@@ -327,11 +399,86 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
327 } 399 }
328} 400}
329 401
330static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 402static void l2cap_chan_cleanup_listen(struct sock *parent)
331{ 403{
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417}
418
419void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420{
421 struct l2cap_conn *conn = chan->conn;
332 struct sock *sk = chan->sk; 422 struct sock *sk = chan->sk;
333 423
334 if (sk->sk_type == SOCK_RAW) { 424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477}
478
479static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480{
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
335 switch (chan->sec_level) { 482 switch (chan->sec_level) {
336 case BT_SECURITY_HIGH: 483 case BT_SECURITY_HIGH:
337 return HCI_AT_DEDICATED_BONDING_MITM; 484 return HCI_AT_DEDICATED_BONDING_MITM;
@@ -371,7 +518,7 @@ static inline int l2cap_check_security(struct l2cap_chan *chan)
371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type); 518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
372} 519}
373 520
374u8 l2cap_get_ident(struct l2cap_conn *conn) 521static u8 l2cap_get_ident(struct l2cap_conn *conn)
375{ 522{
376 u8 id; 523 u8 id;
377 524
@@ -393,7 +540,7 @@ u8 l2cap_get_ident(struct l2cap_conn *conn)
393 return id; 540 return id;
394} 541}
395 542
396void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 543static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
397{ 544{
398 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399 u8 flags; 546 u8 flags;
@@ -408,6 +555,8 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d
408 else 555 else
409 flags = ACL_START; 556 flags = ACL_START;
410 557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
411 hci_send_acl(conn->hcon, skb, flags); 560 hci_send_acl(conn->hcon, skb, flags);
412} 561}
413 562
@@ -415,13 +564,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
415{ 564{
416 struct sk_buff *skb; 565 struct sk_buff *skb;
417 struct l2cap_hdr *lh; 566 struct l2cap_hdr *lh;
418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn; 567 struct l2cap_conn *conn = chan->conn;
420 struct sock *sk = (struct sock *)pi;
421 int count, hlen = L2CAP_HDR_SIZE + 2; 568 int count, hlen = L2CAP_HDR_SIZE + 2;
422 u8 flags; 569 u8 flags;
423 570
424 if (sk->sk_state != BT_CONNECTED) 571 if (chan->state != BT_CONNECTED)
425 return; 572 return;
426 573
427 if (chan->fcs == L2CAP_FCS_CRC16) 574 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -432,15 +579,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
432 count = min_t(unsigned int, conn->mtu, hlen); 579 count = min_t(unsigned int, conn->mtu, hlen);
433 control |= L2CAP_CTRL_FRAME_TYPE; 580 control |= L2CAP_CTRL_FRAME_TYPE;
434 581
435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
436 control |= L2CAP_CTRL_FINAL; 583 control |= L2CAP_CTRL_FINAL;
437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
438 }
439 584
440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) { 585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
441 control |= L2CAP_CTRL_POLL; 586 control |= L2CAP_CTRL_POLL;
442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
443 }
444 587
445 skb = bt_skb_alloc(count, GFP_ATOMIC); 588 skb = bt_skb_alloc(count, GFP_ATOMIC);
446 if (!skb) 589 if (!skb)
@@ -461,14 +604,16 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
461 else 604 else
462 flags = ACL_START; 605 flags = ACL_START;
463 606
607 bt_cb(skb)->force_active = chan->force_active;
608
464 hci_send_acl(chan->conn->hcon, skb, flags); 609 hci_send_acl(chan->conn->hcon, skb, flags);
465} 610}
466 611
467static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) 612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
468{ 613{
469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
470 control |= L2CAP_SUPER_RCV_NOT_READY; 615 control |= L2CAP_SUPER_RCV_NOT_READY;
471 chan->conn_state |= L2CAP_CONN_RNR_SENT; 616 set_bit(CONN_RNR_SENT, &chan->conn_state);
472 } else 617 } else
473 control |= L2CAP_SUPER_RCV_READY; 618 control |= L2CAP_SUPER_RCV_READY;
474 619
@@ -479,7 +624,7 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
479 624
480static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 625static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
481{ 626{
482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND); 627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
483} 628}
484 629
485static void l2cap_do_start(struct l2cap_chan *chan) 630static void l2cap_do_start(struct l2cap_chan *chan)
@@ -497,7 +642,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
497 req.psm = chan->psm; 642 req.psm = chan->psm;
498 643
499 chan->ident = l2cap_get_ident(conn); 644 chan->ident = l2cap_get_ident(conn);
500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
501 646
502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503 sizeof(req), &req); 648 sizeof(req), &req);
@@ -533,7 +678,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
533 } 678 }
534} 679}
535 680
536void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 681static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
537{ 682{
538 struct sock *sk; 683 struct sock *sk;
539 struct l2cap_disconn_req req; 684 struct l2cap_disconn_req req;
@@ -544,9 +689,9 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in
544 sk = chan->sk; 689 sk = chan->sk;
545 690
546 if (chan->mode == L2CAP_MODE_ERTM) { 691 if (chan->mode == L2CAP_MODE_ERTM) {
547 del_timer(&chan->retrans_timer); 692 __clear_retrans_timer(chan);
548 del_timer(&chan->monitor_timer); 693 __clear_monitor_timer(chan);
549 del_timer(&chan->ack_timer); 694 __clear_ack_timer(chan);
550 } 695 }
551 696
552 req.dcid = cpu_to_le16(chan->dcid); 697 req.dcid = cpu_to_le16(chan->dcid);
@@ -554,7 +699,7 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in
554 l2cap_send_cmd(conn, l2cap_get_ident(conn), 699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
555 L2CAP_DISCONN_REQ, sizeof(req), &req); 700 L2CAP_DISCONN_REQ, sizeof(req), &req);
556 701
557 sk->sk_state = BT_DISCONN; 702 l2cap_state_change(chan, BT_DISCONN);
558 sk->sk_err = err; 703 sk->sk_err = err;
559} 704}
560 705
@@ -572,13 +717,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
572 717
573 bh_lock_sock(sk); 718 bh_lock_sock(sk);
574 719
575 if (sk->sk_type != SOCK_SEQPACKET && 720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
576 sk->sk_type != SOCK_STREAM) {
577 bh_unlock_sock(sk); 721 bh_unlock_sock(sk);
578 continue; 722 continue;
579 } 723 }
580 724
581 if (sk->sk_state == BT_CONNECT) { 725 if (chan->state == BT_CONNECT) {
582 struct l2cap_conn_req req; 726 struct l2cap_conn_req req;
583 727
584 if (!l2cap_check_security(chan) || 728 if (!l2cap_check_security(chan) ||
@@ -587,15 +731,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
587 continue; 731 continue;
588 } 732 }
589 733
590 if (!l2cap_mode_supported(chan->mode, 734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
591 conn->feat_mask) 735 && test_bit(CONF_STATE2_DEVICE,
592 && chan->conf_state & 736 &chan->conf_state)) {
593 L2CAP_CONF_STATE2_DEVICE) { 737 /* l2cap_chan_close() calls list_del(chan)
594 /* __l2cap_sock_close() calls list_del(chan)
595 * so release the lock */ 738 * so release the lock */
596 read_unlock_bh(&conn->chan_lock); 739 read_unlock(&conn->chan_lock);
597 __l2cap_sock_close(sk, ECONNRESET); 740 l2cap_chan_close(chan, ECONNRESET);
598 read_lock_bh(&conn->chan_lock); 741 read_lock(&conn->chan_lock);
599 bh_unlock_sock(sk); 742 bh_unlock_sock(sk);
600 continue; 743 continue;
601 } 744 }
@@ -604,12 +747,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
604 req.psm = chan->psm; 747 req.psm = chan->psm;
605 748
606 chan->ident = l2cap_get_ident(conn); 749 chan->ident = l2cap_get_ident(conn);
607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
608 751
609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610 sizeof(req), &req); 753 sizeof(req), &req);
611 754
612 } else if (sk->sk_state == BT_CONNECT2) { 755 } else if (chan->state == BT_CONNECT2) {
613 struct l2cap_conn_rsp rsp; 756 struct l2cap_conn_rsp rsp;
614 char buf[128]; 757 char buf[128];
615 rsp.scid = cpu_to_le16(chan->dcid); 758 rsp.scid = cpu_to_le16(chan->dcid);
@@ -624,7 +767,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
624 parent->sk_data_ready(parent, 0); 767 parent->sk_data_ready(parent, 0);
625 768
626 } else { 769 } else {
627 sk->sk_state = BT_CONFIG; 770 l2cap_state_change(chan, BT_CONFIG);
628 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
629 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
630 } 773 }
@@ -636,13 +779,13 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
636 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
637 sizeof(rsp), &rsp); 780 sizeof(rsp), &rsp);
638 781
639 if (chan->conf_state & L2CAP_CONF_REQ_SENT || 782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
640 rsp.result != L2CAP_CR_SUCCESS) { 783 rsp.result != L2CAP_CR_SUCCESS) {
641 bh_unlock_sock(sk); 784 bh_unlock_sock(sk);
642 continue; 785 continue;
643 } 786 }
644 787
645 chan->conf_state |= L2CAP_CONF_REQ_SENT; 788 set_bit(CONF_REQ_SENT, &chan->conf_state);
646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
647 l2cap_build_conf_req(chan, buf), buf); 790 l2cap_build_conf_req(chan, buf), buf);
648 chan->num_conf_req++; 791 chan->num_conf_req++;
@@ -666,7 +809,7 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
666 list_for_each_entry(c, &chan_list, global_l) { 809 list_for_each_entry(c, &chan_list, global_l) {
667 struct sock *sk = c->sk; 810 struct sock *sk = c->sk;
668 811
669 if (state && sk->sk_state != state) 812 if (state && c->state != state)
670 continue; 813 continue;
671 814
672 if (c->scid == cid) { 815 if (c->scid == cid) {
@@ -710,24 +853,16 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
710 goto clean; 853 goto clean;
711 } 854 }
712 855
713 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); 856 chan = pchan->ops->new_connection(pchan->data);
714 if (!sk) 857 if (!chan)
715 goto clean;
716
717 chan = l2cap_chan_create(sk);
718 if (!chan) {
719 l2cap_sock_kill(sk);
720 goto clean; 858 goto clean;
721 }
722 859
723 l2cap_pi(sk)->chan = chan; 860 sk = chan->sk;
724 861
725 write_lock_bh(&conn->chan_lock); 862 write_lock_bh(&conn->chan_lock);
726 863
727 hci_conn_hold(conn->hcon); 864 hci_conn_hold(conn->hcon);
728 865
729 l2cap_sock_init(sk, parent);
730
731 bacpy(&bt_sk(sk)->src, conn->src); 866 bacpy(&bt_sk(sk)->src, conn->src);
732 bacpy(&bt_sk(sk)->dst, conn->dst); 867 bacpy(&bt_sk(sk)->dst, conn->dst);
733 868
@@ -735,9 +870,9 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
735 870
736 __l2cap_chan_add(conn, chan); 871 __l2cap_chan_add(conn, chan);
737 872
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 873 __set_chan_timer(chan, sk->sk_sndtimeo);
739 874
740 sk->sk_state = BT_CONNECTED; 875 l2cap_state_change(chan, BT_CONNECTED);
741 parent->sk_data_ready(parent, 0); 876 parent->sk_data_ready(parent, 0);
742 877
743 write_unlock_bh(&conn->chan_lock); 878 write_unlock_bh(&conn->chan_lock);
@@ -746,6 +881,23 @@ clean:
746 bh_unlock_sock(parent); 881 bh_unlock_sock(parent);
747} 882}
748 883
884static void l2cap_chan_ready(struct sock *sk)
885{
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899}
900
749static void l2cap_conn_ready(struct l2cap_conn *conn) 901static void l2cap_conn_ready(struct l2cap_conn *conn)
750{ 902{
751 struct l2cap_chan *chan; 903 struct l2cap_chan *chan;
@@ -763,17 +915,15 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
763 bh_lock_sock(sk); 915 bh_lock_sock(sk);
764 916
765 if (conn->hcon->type == LE_LINK) { 917 if (conn->hcon->type == LE_LINK) {
766 l2cap_sock_clear_timer(sk); 918 if (smp_conn_security(conn, chan->sec_level))
767 sk->sk_state = BT_CONNECTED; 919 l2cap_chan_ready(sk);
768 sk->sk_state_change(sk);
769 }
770 920
771 if (sk->sk_type != SOCK_SEQPACKET && 921 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
772 sk->sk_type != SOCK_STREAM) { 922 __clear_chan_timer(chan);
773 l2cap_sock_clear_timer(sk); 923 l2cap_state_change(chan, BT_CONNECTED);
774 sk->sk_state = BT_CONNECTED;
775 sk->sk_state_change(sk); 924 sk->sk_state_change(sk);
776 } else if (sk->sk_state == BT_CONNECT) 925
926 } else if (chan->state == BT_CONNECT)
777 l2cap_do_start(chan); 927 l2cap_do_start(chan);
778 928
779 bh_unlock_sock(sk); 929 bh_unlock_sock(sk);
@@ -811,6 +961,45 @@ static void l2cap_info_timeout(unsigned long arg)
811 l2cap_conn_start(conn); 961 l2cap_conn_start(conn);
812} 962}
813 963
964static void l2cap_conn_del(struct hci_conn *hcon, int err)
965{
966 struct l2cap_conn *conn = hcon->l2cap_data;
967 struct l2cap_chan *chan, *l;
968 struct sock *sk;
969
970 if (!conn)
971 return;
972
973 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
974
975 kfree_skb(conn->rx_skb);
976
977 /* Kill channels */
978 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
979 sk = chan->sk;
980 bh_lock_sock(sk);
981 l2cap_chan_del(chan, err);
982 bh_unlock_sock(sk);
983 chan->ops->close(chan->data);
984 }
985
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer);
988
989 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
990 del_timer(&conn->security_timer);
991
992 hcon->l2cap_data = NULL;
993 kfree(conn);
994}
995
996static void security_timeout(unsigned long arg)
997{
998 struct l2cap_conn *conn = (void *) arg;
999
1000 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1001}
1002
814static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1003static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
815{ 1004{
816 struct l2cap_conn *conn = hcon->l2cap_data; 1005 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -842,7 +1031,10 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
842 1031
843 INIT_LIST_HEAD(&conn->chan_l); 1032 INIT_LIST_HEAD(&conn->chan_l);
844 1033
845 if (hcon->type != LE_LINK) 1034 if (hcon->type == LE_LINK)
1035 setup_timer(&conn->security_timer, security_timeout,
1036 (unsigned long) conn);
1037 else
846 setup_timer(&conn->info_timer, l2cap_info_timeout, 1038 setup_timer(&conn->info_timer, l2cap_info_timeout,
847 (unsigned long) conn); 1039 (unsigned long) conn);
848 1040
@@ -851,35 +1043,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
851 return conn; 1043 return conn;
852} 1044}
853 1045
854static void l2cap_conn_del(struct hci_conn *hcon, int err)
855{
856 struct l2cap_conn *conn = hcon->l2cap_data;
857 struct l2cap_chan *chan, *l;
858 struct sock *sk;
859
860 if (!conn)
861 return;
862
863 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
864
865 kfree_skb(conn->rx_skb);
866
867 /* Kill channels */
868 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
869 sk = chan->sk;
870 bh_lock_sock(sk);
871 l2cap_chan_del(chan, err);
872 bh_unlock_sock(sk);
873 l2cap_sock_kill(sk);
874 }
875
876 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
877 del_timer_sync(&conn->info_timer);
878
879 hcon->l2cap_data = NULL;
880 kfree(conn);
881}
882
883static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 1046static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
884{ 1047{
885 write_lock_bh(&conn->chan_lock); 1048 write_lock_bh(&conn->chan_lock);
@@ -901,7 +1064,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
901 list_for_each_entry(c, &chan_list, global_l) { 1064 list_for_each_entry(c, &chan_list, global_l) {
902 struct sock *sk = c->sk; 1065 struct sock *sk = c->sk;
903 1066
904 if (state && sk->sk_state != state) 1067 if (state && c->state != state)
905 continue; 1068 continue;
906 1069
907 if (c->psm == psm) { 1070 if (c->psm == psm) {
@@ -945,10 +1108,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
945 auth_type = l2cap_get_auth_type(chan); 1108 auth_type = l2cap_get_auth_type(chan);
946 1109
947 if (chan->dcid == L2CAP_CID_LE_DATA) 1110 if (chan->dcid == L2CAP_CID_LE_DATA)
948 hcon = hci_connect(hdev, LE_LINK, dst, 1111 hcon = hci_connect(hdev, LE_LINK, 0, dst,
949 chan->sec_level, auth_type); 1112 chan->sec_level, auth_type);
950 else 1113 else
951 hcon = hci_connect(hdev, ACL_LINK, dst, 1114 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
952 chan->sec_level, auth_type); 1115 chan->sec_level, auth_type);
953 1116
954 if (IS_ERR(hcon)) { 1117 if (IS_ERR(hcon)) {
@@ -968,15 +1131,14 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
968 1131
969 l2cap_chan_add(conn, chan); 1132 l2cap_chan_add(conn, chan);
970 1133
971 sk->sk_state = BT_CONNECT; 1134 l2cap_state_change(chan, BT_CONNECT);
972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 1135 __set_chan_timer(chan, sk->sk_sndtimeo);
973 1136
974 if (hcon->state == BT_CONNECTED) { 1137 if (hcon->state == BT_CONNECTED) {
975 if (sk->sk_type != SOCK_SEQPACKET && 1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
976 sk->sk_type != SOCK_STREAM) { 1139 __clear_chan_timer(chan);
977 l2cap_sock_clear_timer(sk);
978 if (l2cap_check_security(chan)) 1140 if (l2cap_check_security(chan))
979 sk->sk_state = BT_CONNECTED; 1141 l2cap_state_change(chan, BT_CONNECTED);
980 } else 1142 } else
981 l2cap_do_start(chan); 1143 l2cap_do_start(chan);
982 } 1144 }
@@ -997,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk)
997 int timeo = HZ/5; 1159 int timeo = HZ/5;
998 1160
999 add_wait_queue(sk_sleep(sk), &wait); 1161 add_wait_queue(sk_sleep(sk), &wait);
1000 while ((chan->unacked_frames > 0 && chan->conn)) { 1162 set_current_state(TASK_INTERRUPTIBLE);
1001 set_current_state(TASK_INTERRUPTIBLE); 1163 while (chan->unacked_frames > 0 && chan->conn) {
1002
1003 if (!timeo) 1164 if (!timeo)
1004 timeo = HZ/5; 1165 timeo = HZ/5;
1005 1166
@@ -1011,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk)
1011 release_sock(sk); 1172 release_sock(sk);
1012 timeo = schedule_timeout(timeo); 1173 timeo = schedule_timeout(timeo);
1013 lock_sock(sk); 1174 lock_sock(sk);
1175 set_current_state(TASK_INTERRUPTIBLE);
1014 1176
1015 err = sock_error(sk); 1177 err = sock_error(sk);
1016 if (err) 1178 if (err)
@@ -1036,7 +1198,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
1036 } 1198 }
1037 1199
1038 chan->retry_count++; 1200 chan->retry_count++;
1039 __mod_monitor_timer(); 1201 __set_monitor_timer(chan);
1040 1202
1041 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1203 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1042 bh_unlock_sock(sk); 1204 bh_unlock_sock(sk);
@@ -1051,9 +1213,9 @@ static void l2cap_retrans_timeout(unsigned long arg)
1051 1213
1052 bh_lock_sock(sk); 1214 bh_lock_sock(sk);
1053 chan->retry_count = 1; 1215 chan->retry_count = 1;
1054 __mod_monitor_timer(); 1216 __set_monitor_timer(chan);
1055 1217
1056 chan->conn_state |= L2CAP_CONN_WAIT_F; 1218 set_bit(CONN_WAIT_F, &chan->conn_state);
1057 1219
1058 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1220 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1059 bh_unlock_sock(sk); 1221 bh_unlock_sock(sk);
@@ -1075,7 +1237,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1075 } 1237 }
1076 1238
1077 if (!chan->unacked_frames) 1239 if (!chan->unacked_frames)
1078 del_timer(&chan->retrans_timer); 1240 __clear_retrans_timer(chan);
1079} 1241}
1080 1242
1081void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 1243void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -1090,6 +1252,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1090 else 1252 else
1091 flags = ACL_START; 1253 flags = ACL_START;
1092 1254
1255 bt_cb(skb)->force_active = chan->force_active;
1093 hci_send_acl(hcon, skb, flags); 1256 hci_send_acl(hcon, skb, flags);
1094} 1257}
1095 1258
@@ -1143,10 +1306,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1143 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1306 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1144 control &= L2CAP_CTRL_SAR; 1307 control &= L2CAP_CTRL_SAR;
1145 1308
1146 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 1309 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1147 control |= L2CAP_CTRL_FINAL; 1310 control |= L2CAP_CTRL_FINAL;
1148 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1149 }
1150 1311
1151 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1312 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1152 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1313 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
@@ -1164,11 +1325,10 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1164int l2cap_ertm_send(struct l2cap_chan *chan) 1325int l2cap_ertm_send(struct l2cap_chan *chan)
1165{ 1326{
1166 struct sk_buff *skb, *tx_skb; 1327 struct sk_buff *skb, *tx_skb;
1167 struct sock *sk = chan->sk;
1168 u16 control, fcs; 1328 u16 control, fcs;
1169 int nsent = 0; 1329 int nsent = 0;
1170 1330
1171 if (sk->sk_state != BT_CONNECTED) 1331 if (chan->state != BT_CONNECTED)
1172 return -ENOTCONN; 1332 return -ENOTCONN;
1173 1333
1174 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1334 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
@@ -1186,10 +1346,9 @@ int l2cap_ertm_send(struct l2cap_chan *chan)
1186 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1346 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1187 control &= L2CAP_CTRL_SAR; 1347 control &= L2CAP_CTRL_SAR;
1188 1348
1189 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 1349 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1190 control |= L2CAP_CTRL_FINAL; 1350 control |= L2CAP_CTRL_FINAL;
1191 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1351
1192 }
1193 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1352 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1194 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1353 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1195 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1354 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1202,7 +1361,7 @@ int l2cap_ertm_send(struct l2cap_chan *chan)
1202 1361
1203 l2cap_do_send(chan, tx_skb); 1362 l2cap_do_send(chan, tx_skb);
1204 1363
1205 __mod_retrans_timer(); 1364 __set_retrans_timer(chan);
1206 1365
1207 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1366 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1208 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; 1367 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
@@ -1241,9 +1400,9 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
1241 1400
1242 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1401 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1243 1402
1244 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 1403 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1245 control |= L2CAP_SUPER_RCV_NOT_READY; 1404 control |= L2CAP_SUPER_RCV_NOT_READY;
1246 chan->conn_state |= L2CAP_CONN_RNR_SENT; 1405 set_bit(CONN_RNR_SENT, &chan->conn_state);
1247 l2cap_send_sframe(chan, control); 1406 l2cap_send_sframe(chan, control);
1248 return; 1407 return;
1249 } 1408 }
@@ -1451,28 +1610,83 @@ int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t le
1451 return size; 1610 return size;
1452} 1611}
1453 1612
1454static void l2cap_chan_ready(struct sock *sk) 1613int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1455{ 1614{
1456 struct sock *parent = bt_sk(sk)->parent; 1615 struct sk_buff *skb;
1457 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 1616 u16 control;
1617 int err;
1458 1618
1459 BT_DBG("sk %p, parent %p", sk, parent); 1619 /* Connectionless channel */
1620 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1621 skb = l2cap_create_connless_pdu(chan, msg, len);
1622 if (IS_ERR(skb))
1623 return PTR_ERR(skb);
1460 1624
1461 chan->conf_state = 0; 1625 l2cap_do_send(chan, skb);
1462 l2cap_sock_clear_timer(sk); 1626 return len;
1627 }
1463 1628
1464 if (!parent) { 1629 switch (chan->mode) {
1465 /* Outgoing channel. 1630 case L2CAP_MODE_BASIC:
1466 * Wake up socket sleeping on connect. 1631 /* Check outgoing MTU */
1467 */ 1632 if (len > chan->omtu)
1468 sk->sk_state = BT_CONNECTED; 1633 return -EMSGSIZE;
1469 sk->sk_state_change(sk); 1634
1470 } else { 1635 /* Create a basic PDU */
1471 /* Incoming channel. 1636 skb = l2cap_create_basic_pdu(chan, msg, len);
1472 * Wake up socket sleeping on accept. 1637 if (IS_ERR(skb))
1473 */ 1638 return PTR_ERR(skb);
1474 parent->sk_data_ready(parent, 0); 1639
1640 l2cap_do_send(chan, skb);
1641 err = len;
1642 break;
1643
1644 case L2CAP_MODE_ERTM:
1645 case L2CAP_MODE_STREAMING:
1646 /* Entire SDU fits into one PDU */
1647 if (len <= chan->remote_mps) {
1648 control = L2CAP_SDU_UNSEGMENTED;
1649 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1650 0);
1651 if (IS_ERR(skb))
1652 return PTR_ERR(skb);
1653
1654 __skb_queue_tail(&chan->tx_q, skb);
1655
1656 if (chan->tx_send_head == NULL)
1657 chan->tx_send_head = skb;
1658
1659 } else {
1660 /* Segment SDU into multiples PDUs */
1661 err = l2cap_sar_segment_sdu(chan, msg, len);
1662 if (err < 0)
1663 return err;
1664 }
1665
1666 if (chan->mode == L2CAP_MODE_STREAMING) {
1667 l2cap_streaming_send(chan);
1668 err = len;
1669 break;
1670 }
1671
1672 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1673 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1674 err = len;
1675 break;
1676 }
1677
1678 err = l2cap_ertm_send(chan);
1679 if (err >= 0)
1680 err = len;
1681
1682 break;
1683
1684 default:
1685 BT_DBG("bad state %1.1x", chan->mode);
1686 err = -EBADFD;
1475 } 1687 }
1688
1689 return err;
1476} 1690}
1477 1691
1478/* Copy frame to all raw sockets on that connection */ 1692/* Copy frame to all raw sockets on that connection */
@@ -1486,7 +1700,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1486 read_lock(&conn->chan_lock); 1700 read_lock(&conn->chan_lock);
1487 list_for_each_entry(chan, &conn->chan_l, list) { 1701 list_for_each_entry(chan, &conn->chan_l, list) {
1488 struct sock *sk = chan->sk; 1702 struct sock *sk = chan->sk;
1489 if (sk->sk_type != SOCK_RAW) 1703 if (chan->chan_type != L2CAP_CHAN_RAW)
1490 continue; 1704 continue;
1491 1705
1492 /* Don't send frame to the socket it came from */ 1706 /* Don't send frame to the socket it came from */
@@ -1496,7 +1710,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1496 if (!nskb) 1710 if (!nskb)
1497 continue; 1711 continue;
1498 1712
1499 if (sock_queue_rcv_skb(sk, nskb)) 1713 if (chan->ops->recv(chan->data, nskb))
1500 kfree_skb(nskb); 1714 kfree_skb(nskb);
1501 } 1715 }
1502 read_unlock(&conn->chan_lock); 1716 read_unlock(&conn->chan_lock);
@@ -1655,11 +1869,9 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1655 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); 1869 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1656 1870
1657 skb_queue_head_init(&chan->srej_q); 1871 skb_queue_head_init(&chan->srej_q);
1658 skb_queue_head_init(&chan->busy_q);
1659 1872
1660 INIT_LIST_HEAD(&chan->srej_l); 1873 INIT_LIST_HEAD(&chan->srej_l);
1661 1874
1662 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1663 1875
1664 sk->sk_backlog_rcv = l2cap_ertm_data_rcv; 1876 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1665} 1877}
@@ -1691,7 +1903,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1691 switch (chan->mode) { 1903 switch (chan->mode) {
1692 case L2CAP_MODE_STREAMING: 1904 case L2CAP_MODE_STREAMING:
1693 case L2CAP_MODE_ERTM: 1905 case L2CAP_MODE_ERTM:
1694 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE) 1906 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1695 break; 1907 break;
1696 1908
1697 /* fall through */ 1909 /* fall through */
@@ -1738,7 +1950,7 @@ done:
1738 break; 1950 break;
1739 1951
1740 if (chan->fcs == L2CAP_FCS_NONE || 1952 if (chan->fcs == L2CAP_FCS_NONE ||
1741 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1953 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1742 chan->fcs = L2CAP_FCS_NONE; 1954 chan->fcs = L2CAP_FCS_NONE;
1743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1744 } 1956 }
@@ -1761,7 +1973,7 @@ done:
1761 break; 1973 break;
1762 1974
1763 if (chan->fcs == L2CAP_FCS_NONE || 1975 if (chan->fcs == L2CAP_FCS_NONE ||
1764 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1976 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1765 chan->fcs = L2CAP_FCS_NONE; 1977 chan->fcs = L2CAP_FCS_NONE;
1766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 1978 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1767 } 1979 }
@@ -1813,7 +2025,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1813 2025
1814 case L2CAP_CONF_FCS: 2026 case L2CAP_CONF_FCS:
1815 if (val == L2CAP_FCS_NONE) 2027 if (val == L2CAP_FCS_NONE)
1816 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV; 2028 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
1817 2029
1818 break; 2030 break;
1819 2031
@@ -1833,7 +2045,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1833 switch (chan->mode) { 2045 switch (chan->mode) {
1834 case L2CAP_MODE_STREAMING: 2046 case L2CAP_MODE_STREAMING:
1835 case L2CAP_MODE_ERTM: 2047 case L2CAP_MODE_ERTM:
1836 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) { 2048 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
1837 chan->mode = l2cap_select_mode(rfc.mode, 2049 chan->mode = l2cap_select_mode(rfc.mode,
1838 chan->conn->feat_mask); 2050 chan->conn->feat_mask);
1839 break; 2051 break;
@@ -1866,14 +2078,14 @@ done:
1866 result = L2CAP_CONF_UNACCEPT; 2078 result = L2CAP_CONF_UNACCEPT;
1867 else { 2079 else {
1868 chan->omtu = mtu; 2080 chan->omtu = mtu;
1869 chan->conf_state |= L2CAP_CONF_MTU_DONE; 2081 set_bit(CONF_MTU_DONE, &chan->conf_state);
1870 } 2082 }
1871 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); 2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1872 2084
1873 switch (rfc.mode) { 2085 switch (rfc.mode) {
1874 case L2CAP_MODE_BASIC: 2086 case L2CAP_MODE_BASIC:
1875 chan->fcs = L2CAP_FCS_NONE; 2087 chan->fcs = L2CAP_FCS_NONE;
1876 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2088 set_bit(CONF_MODE_DONE, &chan->conf_state);
1877 break; 2089 break;
1878 2090
1879 case L2CAP_MODE_ERTM: 2091 case L2CAP_MODE_ERTM:
@@ -1890,7 +2102,7 @@ done:
1890 rfc.monitor_timeout = 2102 rfc.monitor_timeout =
1891 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 2103 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1892 2104
1893 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2105 set_bit(CONF_MODE_DONE, &chan->conf_state);
1894 2106
1895 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1896 sizeof(rfc), (unsigned long) &rfc); 2108 sizeof(rfc), (unsigned long) &rfc);
@@ -1903,7 +2115,7 @@ done:
1903 2115
1904 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2116 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1905 2117
1906 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2118 set_bit(CONF_MODE_DONE, &chan->conf_state);
1907 2119
1908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2120 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1909 sizeof(rfc), (unsigned long) &rfc); 2121 sizeof(rfc), (unsigned long) &rfc);
@@ -1918,7 +2130,7 @@ done:
1918 } 2130 }
1919 2131
1920 if (result == L2CAP_CONF_SUCCESS) 2132 if (result == L2CAP_CONF_SUCCESS)
1921 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2133 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
1922 } 2134 }
1923 rsp->scid = cpu_to_le16(chan->dcid); 2135 rsp->scid = cpu_to_le16(chan->dcid);
1924 rsp->result = cpu_to_le16(result); 2136 rsp->result = cpu_to_le16(result);
@@ -1960,7 +2172,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
1960 if (olen == sizeof(rfc)) 2172 if (olen == sizeof(rfc))
1961 memcpy(&rfc, (void *)val, olen); 2173 memcpy(&rfc, (void *)val, olen);
1962 2174
1963 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) && 2175 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
1964 rfc.mode != chan->mode) 2176 rfc.mode != chan->mode)
1965 return -ECONNREFUSED; 2177 return -ECONNREFUSED;
1966 2178
@@ -2022,10 +2234,9 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2022 l2cap_send_cmd(conn, chan->ident, 2234 l2cap_send_cmd(conn, chan->ident,
2023 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2235 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2024 2236
2025 if (chan->conf_state & L2CAP_CONF_REQ_SENT) 2237 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2026 return; 2238 return;
2027 2239
2028 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2240 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2030 l2cap_build_conf_req(chan, buf), buf); 2241 l2cap_build_conf_req(chan, buf), buf);
2031 chan->num_conf_req++; 2242 chan->num_conf_req++;
@@ -2125,17 +2336,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2125 goto response; 2336 goto response;
2126 } 2337 }
2127 2338
2128 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); 2339 chan = pchan->ops->new_connection(pchan->data);
2129 if (!sk) 2340 if (!chan)
2130 goto response;
2131
2132 chan = l2cap_chan_create(sk);
2133 if (!chan) {
2134 l2cap_sock_kill(sk);
2135 goto response; 2341 goto response;
2136 }
2137 2342
2138 l2cap_pi(sk)->chan = chan; 2343 sk = chan->sk;
2139 2344
2140 write_lock_bh(&conn->chan_lock); 2345 write_lock_bh(&conn->chan_lock);
2141 2346
@@ -2143,13 +2348,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2143 if (__l2cap_get_chan_by_dcid(conn, scid)) { 2348 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2144 write_unlock_bh(&conn->chan_lock); 2349 write_unlock_bh(&conn->chan_lock);
2145 sock_set_flag(sk, SOCK_ZAPPED); 2350 sock_set_flag(sk, SOCK_ZAPPED);
2146 l2cap_sock_kill(sk); 2351 chan->ops->close(chan->data);
2147 goto response; 2352 goto response;
2148 } 2353 }
2149 2354
2150 hci_conn_hold(conn->hcon); 2355 hci_conn_hold(conn->hcon);
2151 2356
2152 l2cap_sock_init(sk, parent);
2153 bacpy(&bt_sk(sk)->src, conn->src); 2357 bacpy(&bt_sk(sk)->src, conn->src);
2154 bacpy(&bt_sk(sk)->dst, conn->dst); 2358 bacpy(&bt_sk(sk)->dst, conn->dst);
2155 chan->psm = psm; 2359 chan->psm = psm;
@@ -2161,29 +2365,29 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2161 2365
2162 dcid = chan->scid; 2366 dcid = chan->scid;
2163 2367
2164 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 2368 __set_chan_timer(chan, sk->sk_sndtimeo);
2165 2369
2166 chan->ident = cmd->ident; 2370 chan->ident = cmd->ident;
2167 2371
2168 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2372 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2169 if (l2cap_check_security(chan)) { 2373 if (l2cap_check_security(chan)) {
2170 if (bt_sk(sk)->defer_setup) { 2374 if (bt_sk(sk)->defer_setup) {
2171 sk->sk_state = BT_CONNECT2; 2375 l2cap_state_change(chan, BT_CONNECT2);
2172 result = L2CAP_CR_PEND; 2376 result = L2CAP_CR_PEND;
2173 status = L2CAP_CS_AUTHOR_PEND; 2377 status = L2CAP_CS_AUTHOR_PEND;
2174 parent->sk_data_ready(parent, 0); 2378 parent->sk_data_ready(parent, 0);
2175 } else { 2379 } else {
2176 sk->sk_state = BT_CONFIG; 2380 l2cap_state_change(chan, BT_CONFIG);
2177 result = L2CAP_CR_SUCCESS; 2381 result = L2CAP_CR_SUCCESS;
2178 status = L2CAP_CS_NO_INFO; 2382 status = L2CAP_CS_NO_INFO;
2179 } 2383 }
2180 } else { 2384 } else {
2181 sk->sk_state = BT_CONNECT2; 2385 l2cap_state_change(chan, BT_CONNECT2);
2182 result = L2CAP_CR_PEND; 2386 result = L2CAP_CR_PEND;
2183 status = L2CAP_CS_AUTHEN_PEND; 2387 status = L2CAP_CS_AUTHEN_PEND;
2184 } 2388 }
2185 } else { 2389 } else {
2186 sk->sk_state = BT_CONNECT2; 2390 l2cap_state_change(chan, BT_CONNECT2);
2187 result = L2CAP_CR_PEND; 2391 result = L2CAP_CR_PEND;
2188 status = L2CAP_CS_NO_INFO; 2392 status = L2CAP_CS_NO_INFO;
2189 } 2393 }
@@ -2214,10 +2418,10 @@ sendresp:
2214 L2CAP_INFO_REQ, sizeof(info), &info); 2418 L2CAP_INFO_REQ, sizeof(info), &info);
2215 } 2419 }
2216 2420
2217 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) && 2421 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2218 result == L2CAP_CR_SUCCESS) { 2422 result == L2CAP_CR_SUCCESS) {
2219 u8 buf[128]; 2423 u8 buf[128];
2220 chan->conf_state |= L2CAP_CONF_REQ_SENT; 2424 set_bit(CONF_REQ_SENT, &chan->conf_state);
2221 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2425 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2222 l2cap_build_conf_req(chan, buf), buf); 2426 l2cap_build_conf_req(chan, buf), buf);
2223 chan->num_conf_req++; 2427 chan->num_conf_req++;
@@ -2255,31 +2459,29 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2255 2459
2256 switch (result) { 2460 switch (result) {
2257 case L2CAP_CR_SUCCESS: 2461 case L2CAP_CR_SUCCESS:
2258 sk->sk_state = BT_CONFIG; 2462 l2cap_state_change(chan, BT_CONFIG);
2259 chan->ident = 0; 2463 chan->ident = 0;
2260 chan->dcid = dcid; 2464 chan->dcid = dcid;
2261 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2465 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2262 2466
2263 if (chan->conf_state & L2CAP_CONF_REQ_SENT) 2467 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2264 break; 2468 break;
2265 2469
2266 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2267
2268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2269 l2cap_build_conf_req(chan, req), req); 2471 l2cap_build_conf_req(chan, req), req);
2270 chan->num_conf_req++; 2472 chan->num_conf_req++;
2271 break; 2473 break;
2272 2474
2273 case L2CAP_CR_PEND: 2475 case L2CAP_CR_PEND:
2274 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 2476 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2275 break; 2477 break;
2276 2478
2277 default: 2479 default:
2278 /* don't delete l2cap channel if sk is owned by user */ 2480 /* don't delete l2cap channel if sk is owned by user */
2279 if (sock_owned_by_user(sk)) { 2481 if (sock_owned_by_user(sk)) {
2280 sk->sk_state = BT_DISCONN; 2482 l2cap_state_change(chan, BT_DISCONN);
2281 l2cap_sock_clear_timer(sk); 2483 __clear_chan_timer(chan);
2282 l2cap_sock_set_timer(sk, HZ / 5); 2484 __set_chan_timer(chan, HZ / 5);
2283 break; 2485 break;
2284 } 2486 }
2285 2487
@@ -2293,14 +2495,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2293 2495
2294static inline void set_default_fcs(struct l2cap_chan *chan) 2496static inline void set_default_fcs(struct l2cap_chan *chan)
2295{ 2497{
2296 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2297
2298 /* FCS is enabled only in ERTM or streaming mode, if one or both 2498 /* FCS is enabled only in ERTM or streaming mode, if one or both
2299 * sides request it. 2499 * sides request it.
2300 */ 2500 */
2301 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 2501 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2302 chan->fcs = L2CAP_FCS_NONE; 2502 chan->fcs = L2CAP_FCS_NONE;
2303 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV)) 2503 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2304 chan->fcs = L2CAP_FCS_CRC16; 2504 chan->fcs = L2CAP_FCS_CRC16;
2305} 2505}
2306 2506
@@ -2367,13 +2567,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2367 /* Reset config buffer. */ 2567 /* Reset config buffer. */
2368 chan->conf_len = 0; 2568 chan->conf_len = 0;
2369 2569
2370 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE)) 2570 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2371 goto unlock; 2571 goto unlock;
2372 2572
2373 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) { 2573 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2374 set_default_fcs(chan); 2574 set_default_fcs(chan);
2375 2575
2376 sk->sk_state = BT_CONNECTED; 2576 l2cap_state_change(chan, BT_CONNECTED);
2377 2577
2378 chan->next_tx_seq = 0; 2578 chan->next_tx_seq = 0;
2379 chan->expected_tx_seq = 0; 2579 chan->expected_tx_seq = 0;
@@ -2385,9 +2585,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2385 goto unlock; 2585 goto unlock;
2386 } 2586 }
2387 2587
2388 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) { 2588 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2389 u8 buf[64]; 2589 u8 buf[64];
2390 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2590 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2392 l2cap_build_conf_req(chan, buf), buf); 2591 l2cap_build_conf_req(chan, buf), buf);
2393 chan->num_conf_req++; 2592 chan->num_conf_req++;
@@ -2452,7 +2651,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2452 2651
2453 default: 2652 default:
2454 sk->sk_err = ECONNRESET; 2653 sk->sk_err = ECONNRESET;
2455 l2cap_sock_set_timer(sk, HZ * 5); 2654 __set_chan_timer(chan, HZ * 5);
2456 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2655 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2457 goto done; 2656 goto done;
2458 } 2657 }
@@ -2460,12 +2659,12 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2460 if (flags & 0x01) 2659 if (flags & 0x01)
2461 goto done; 2660 goto done;
2462 2661
2463 chan->conf_state |= L2CAP_CONF_INPUT_DONE; 2662 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2464 2663
2465 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2664 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2466 set_default_fcs(chan); 2665 set_default_fcs(chan);
2467 2666
2468 sk->sk_state = BT_CONNECTED; 2667 l2cap_state_change(chan, BT_CONNECTED);
2469 chan->next_tx_seq = 0; 2668 chan->next_tx_seq = 0;
2470 chan->expected_tx_seq = 0; 2669 chan->expected_tx_seq = 0;
2471 skb_queue_head_init(&chan->tx_q); 2670 skb_queue_head_init(&chan->tx_q);
@@ -2507,9 +2706,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2507 2706
2508 /* don't delete l2cap channel if sk is owned by user */ 2707 /* don't delete l2cap channel if sk is owned by user */
2509 if (sock_owned_by_user(sk)) { 2708 if (sock_owned_by_user(sk)) {
2510 sk->sk_state = BT_DISCONN; 2709 l2cap_state_change(chan, BT_DISCONN);
2511 l2cap_sock_clear_timer(sk); 2710 __clear_chan_timer(chan);
2512 l2cap_sock_set_timer(sk, HZ / 5); 2711 __set_chan_timer(chan, HZ / 5);
2513 bh_unlock_sock(sk); 2712 bh_unlock_sock(sk);
2514 return 0; 2713 return 0;
2515 } 2714 }
@@ -2517,7 +2716,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2517 l2cap_chan_del(chan, ECONNRESET); 2716 l2cap_chan_del(chan, ECONNRESET);
2518 bh_unlock_sock(sk); 2717 bh_unlock_sock(sk);
2519 2718
2520 l2cap_sock_kill(sk); 2719 chan->ops->close(chan->data);
2521 return 0; 2720 return 0;
2522} 2721}
2523 2722
@@ -2541,9 +2740,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2541 2740
2542 /* don't delete l2cap channel if sk is owned by user */ 2741 /* don't delete l2cap channel if sk is owned by user */
2543 if (sock_owned_by_user(sk)) { 2742 if (sock_owned_by_user(sk)) {
2544 sk->sk_state = BT_DISCONN; 2743 l2cap_state_change(chan,BT_DISCONN);
2545 l2cap_sock_clear_timer(sk); 2744 __clear_chan_timer(chan);
2546 l2cap_sock_set_timer(sk, HZ / 5); 2745 __set_chan_timer(chan, HZ / 5);
2547 bh_unlock_sock(sk); 2746 bh_unlock_sock(sk);
2548 return 0; 2747 return 0;
2549 } 2748 }
@@ -2551,7 +2750,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2551 l2cap_chan_del(chan, 0); 2750 l2cap_chan_del(chan, 0);
2552 bh_unlock_sock(sk); 2751 bh_unlock_sock(sk);
2553 2752
2554 l2cap_sock_kill(sk); 2753 chan->ops->close(chan->data);
2555 return 0; 2754 return 0;
2556} 2755}
2557 2756
@@ -2859,18 +3058,18 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2859 3058
2860 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3059 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2861 3060
2862 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3061 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
2863 control |= L2CAP_SUPER_RCV_NOT_READY; 3062 control |= L2CAP_SUPER_RCV_NOT_READY;
2864 l2cap_send_sframe(chan, control); 3063 l2cap_send_sframe(chan, control);
2865 chan->conn_state |= L2CAP_CONN_RNR_SENT; 3064 set_bit(CONN_RNR_SENT, &chan->conn_state);
2866 } 3065 }
2867 3066
2868 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY) 3067 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2869 l2cap_retransmit_frames(chan); 3068 l2cap_retransmit_frames(chan);
2870 3069
2871 l2cap_ertm_send(chan); 3070 l2cap_ertm_send(chan);
2872 3071
2873 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) && 3072 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2874 chan->frames_sent == 0) { 3073 chan->frames_sent == 0) {
2875 control |= L2CAP_SUPER_RCV_READY; 3074 control |= L2CAP_SUPER_RCV_READY;
2876 l2cap_send_sframe(chan, control); 3075 l2cap_send_sframe(chan, control);
@@ -2926,17 +3125,13 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2926 3125
2927 switch (control & L2CAP_CTRL_SAR) { 3126 switch (control & L2CAP_CTRL_SAR) {
2928 case L2CAP_SDU_UNSEGMENTED: 3127 case L2CAP_SDU_UNSEGMENTED:
2929 if (chan->conn_state & L2CAP_CONN_SAR_SDU) 3128 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
2930 goto drop; 3129 goto drop;
2931 3130
2932 err = sock_queue_rcv_skb(chan->sk, skb); 3131 return chan->ops->recv(chan->data, skb);
2933 if (!err)
2934 return err;
2935
2936 break;
2937 3132
2938 case L2CAP_SDU_START: 3133 case L2CAP_SDU_START:
2939 if (chan->conn_state & L2CAP_CONN_SAR_SDU) 3134 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
2940 goto drop; 3135 goto drop;
2941 3136
2942 chan->sdu_len = get_unaligned_le16(skb->data); 3137 chan->sdu_len = get_unaligned_le16(skb->data);
@@ -2955,12 +3150,12 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2955 3150
2956 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3151 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2957 3152
2958 chan->conn_state |= L2CAP_CONN_SAR_SDU; 3153 set_bit(CONN_SAR_SDU, &chan->conn_state);
2959 chan->partial_sdu_len = skb->len; 3154 chan->partial_sdu_len = skb->len;
2960 break; 3155 break;
2961 3156
2962 case L2CAP_SDU_CONTINUE: 3157 case L2CAP_SDU_CONTINUE:
2963 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3158 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
2964 goto disconnect; 3159 goto disconnect;
2965 3160
2966 if (!chan->sdu) 3161 if (!chan->sdu)
@@ -2975,39 +3170,34 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2975 break; 3170 break;
2976 3171
2977 case L2CAP_SDU_END: 3172 case L2CAP_SDU_END:
2978 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3173 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
2979 goto disconnect; 3174 goto disconnect;
2980 3175
2981 if (!chan->sdu) 3176 if (!chan->sdu)
2982 goto disconnect; 3177 goto disconnect;
2983 3178
2984 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) { 3179 chan->partial_sdu_len += skb->len;
2985 chan->partial_sdu_len += skb->len;
2986 3180
2987 if (chan->partial_sdu_len > chan->imtu) 3181 if (chan->partial_sdu_len > chan->imtu)
2988 goto drop; 3182 goto drop;
2989 3183
2990 if (chan->partial_sdu_len != chan->sdu_len) 3184 if (chan->partial_sdu_len != chan->sdu_len)
2991 goto drop; 3185 goto drop;
2992 3186
2993 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3187 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2994 }
2995 3188
2996 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3189 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2997 if (!_skb) { 3190 if (!_skb) {
2998 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2999 return -ENOMEM; 3191 return -ENOMEM;
3000 } 3192 }
3001 3193
3002 err = sock_queue_rcv_skb(chan->sk, _skb); 3194 err = chan->ops->recv(chan->data, _skb);
3003 if (err < 0) { 3195 if (err < 0) {
3004 kfree_skb(_skb); 3196 kfree_skb(_skb);
3005 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3006 return err; 3197 return err;
3007 } 3198 }
3008 3199
3009 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY; 3200 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3010 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3011 3201
3012 kfree_skb(chan->sdu); 3202 kfree_skb(chan->sdu);
3013 break; 3203 break;
@@ -3026,128 +3216,55 @@ disconnect:
3026 return 0; 3216 return 0;
3027} 3217}
3028 3218
3029static int l2cap_try_push_rx_skb(struct l2cap_chan *chan) 3219static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3030{ 3220{
3031 struct sk_buff *skb;
3032 u16 control; 3221 u16 control;
3033 int err;
3034 3222
3035 while ((skb = skb_dequeue(&chan->busy_q))) { 3223 BT_DBG("chan %p, Enter local busy", chan);
3036 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3037 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3038 if (err < 0) {
3039 skb_queue_head(&chan->busy_q, skb);
3040 return -EBUSY;
3041 }
3042
3043 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3044 }
3045 3224
3046 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT)) 3225 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3047 goto done;
3048 3226
3049 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3227 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3050 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 3228 control |= L2CAP_SUPER_RCV_NOT_READY;
3051 l2cap_send_sframe(chan, control); 3229 l2cap_send_sframe(chan, control);
3052 chan->retry_count = 1;
3053
3054 del_timer(&chan->retrans_timer);
3055 __mod_monitor_timer();
3056 3230
3057 chan->conn_state |= L2CAP_CONN_WAIT_F; 3231 set_bit(CONN_RNR_SENT, &chan->conn_state);
3058 3232
3059done: 3233 __clear_ack_timer(chan);
3060 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3061 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3062
3063 BT_DBG("chan %p, Exit local busy", chan);
3064
3065 return 0;
3066} 3234}
3067 3235
3068static void l2cap_busy_work(struct work_struct *work) 3236static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3069{ 3237{
3070 DECLARE_WAITQUEUE(wait, current); 3238 u16 control;
3071 struct l2cap_chan *chan =
3072 container_of(work, struct l2cap_chan, busy_work);
3073 struct sock *sk = chan->sk;
3074 int n_tries = 0, timeo = HZ/5, err;
3075 struct sk_buff *skb;
3076
3077 lock_sock(sk);
3078
3079 add_wait_queue(sk_sleep(sk), &wait);
3080 while ((skb = skb_peek(&chan->busy_q))) {
3081 set_current_state(TASK_INTERRUPTIBLE);
3082
3083 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3084 err = -EBUSY;
3085 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3086 break;
3087 }
3088
3089 if (!timeo)
3090 timeo = HZ/5;
3091 3239
3092 if (signal_pending(current)) { 3240 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3093 err = sock_intr_errno(timeo); 3241 goto done;
3094 break;
3095 }
3096 3242
3097 release_sock(sk); 3243 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3098 timeo = schedule_timeout(timeo); 3244 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3099 lock_sock(sk); 3245 l2cap_send_sframe(chan, control);
3246 chan->retry_count = 1;
3100 3247
3101 err = sock_error(sk); 3248 __clear_retrans_timer(chan);
3102 if (err) 3249 __set_monitor_timer(chan);
3103 break;
3104 3250
3105 if (l2cap_try_push_rx_skb(chan) == 0) 3251 set_bit(CONN_WAIT_F, &chan->conn_state);
3106 break;
3107 }
3108 3252
3109 set_current_state(TASK_RUNNING); 3253done:
3110 remove_wait_queue(sk_sleep(sk), &wait); 3254 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3255 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3111 3256
3112 release_sock(sk); 3257 BT_DBG("chan %p, Exit local busy", chan);
3113} 3258}
3114 3259
3115static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3260void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3116{ 3261{
3117 int sctrl, err; 3262 if (chan->mode == L2CAP_MODE_ERTM) {
3118 3263 if (busy)
3119 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3264 l2cap_ertm_enter_local_busy(chan);
3120 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3265 else
3121 __skb_queue_tail(&chan->busy_q, skb); 3266 l2cap_ertm_exit_local_busy(chan);
3122 return l2cap_try_push_rx_skb(chan);
3123
3124
3125 }
3126
3127 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3128 if (err >= 0) {
3129 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3130 return err;
3131 } 3267 }
3132
3133 /* Busy Condition */
3134 BT_DBG("chan %p, Enter local busy", chan);
3135
3136 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3137 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3138 __skb_queue_tail(&chan->busy_q, skb);
3139
3140 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3141 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3142 l2cap_send_sframe(chan, sctrl);
3143
3144 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3145
3146 del_timer(&chan->ack_timer);
3147
3148 queue_work(_busy_wq, &chan->busy_work);
3149
3150 return err;
3151} 3268}
3152 3269
3153static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3270static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
@@ -3162,19 +3279,19 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3162 3279
3163 switch (control & L2CAP_CTRL_SAR) { 3280 switch (control & L2CAP_CTRL_SAR) {
3164 case L2CAP_SDU_UNSEGMENTED: 3281 case L2CAP_SDU_UNSEGMENTED:
3165 if (chan->conn_state & L2CAP_CONN_SAR_SDU) { 3282 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3166 kfree_skb(chan->sdu); 3283 kfree_skb(chan->sdu);
3167 break; 3284 break;
3168 } 3285 }
3169 3286
3170 err = sock_queue_rcv_skb(chan->sk, skb); 3287 err = chan->ops->recv(chan->data, skb);
3171 if (!err) 3288 if (!err)
3172 return 0; 3289 return 0;
3173 3290
3174 break; 3291 break;
3175 3292
3176 case L2CAP_SDU_START: 3293 case L2CAP_SDU_START:
3177 if (chan->conn_state & L2CAP_CONN_SAR_SDU) { 3294 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3178 kfree_skb(chan->sdu); 3295 kfree_skb(chan->sdu);
3179 break; 3296 break;
3180 } 3297 }
@@ -3195,13 +3312,13 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3195 3312
3196 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3313 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3197 3314
3198 chan->conn_state |= L2CAP_CONN_SAR_SDU; 3315 set_bit(CONN_SAR_SDU, &chan->conn_state);
3199 chan->partial_sdu_len = skb->len; 3316 chan->partial_sdu_len = skb->len;
3200 err = 0; 3317 err = 0;
3201 break; 3318 break;
3202 3319
3203 case L2CAP_SDU_CONTINUE: 3320 case L2CAP_SDU_CONTINUE:
3204 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3321 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3205 break; 3322 break;
3206 3323
3207 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3324 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
@@ -3215,12 +3332,12 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3215 break; 3332 break;
3216 3333
3217 case L2CAP_SDU_END: 3334 case L2CAP_SDU_END:
3218 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3335 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3219 break; 3336 break;
3220 3337
3221 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3338 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3222 3339
3223 chan->conn_state &= ~L2CAP_CONN_SAR_SDU; 3340 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3224 chan->partial_sdu_len += skb->len; 3341 chan->partial_sdu_len += skb->len;
3225 3342
3226 if (chan->partial_sdu_len > chan->imtu) 3343 if (chan->partial_sdu_len > chan->imtu)
@@ -3228,7 +3345,7 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3228 3345
3229 if (chan->partial_sdu_len == chan->sdu_len) { 3346 if (chan->partial_sdu_len == chan->sdu_len) {
3230 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3347 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3231 err = sock_queue_rcv_skb(chan->sk, _skb); 3348 err = chan->ops->recv(chan->data, _skb);
3232 if (err < 0) 3349 if (err < 0)
3233 kfree_skb(_skb); 3350 kfree_skb(_skb);
3234 } 3351 }
@@ -3248,13 +3365,22 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3248 struct sk_buff *skb; 3365 struct sk_buff *skb;
3249 u16 control; 3366 u16 control;
3250 3367
3251 while ((skb = skb_peek(&chan->srej_q))) { 3368 while ((skb = skb_peek(&chan->srej_q)) &&
3369 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3370 int err;
3371
3252 if (bt_cb(skb)->tx_seq != tx_seq) 3372 if (bt_cb(skb)->tx_seq != tx_seq)
3253 break; 3373 break;
3254 3374
3255 skb = skb_dequeue(&chan->srej_q); 3375 skb = skb_dequeue(&chan->srej_q);
3256 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3376 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3257 l2cap_ertm_reassembly_sdu(chan, skb, control); 3377 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3378
3379 if (err < 0) {
3380 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3381 break;
3382 }
3383
3258 chan->buffer_seq_srej = 3384 chan->buffer_seq_srej =
3259 (chan->buffer_seq_srej + 1) % 64; 3385 (chan->buffer_seq_srej + 1) % 64;
3260 tx_seq = (tx_seq + 1) % 64; 3386 tx_seq = (tx_seq + 1) % 64;
@@ -3311,19 +3437,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3311 tx_seq, rx_control); 3437 tx_seq, rx_control);
3312 3438
3313 if (L2CAP_CTRL_FINAL & rx_control && 3439 if (L2CAP_CTRL_FINAL & rx_control &&
3314 chan->conn_state & L2CAP_CONN_WAIT_F) { 3440 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3315 del_timer(&chan->monitor_timer); 3441 __clear_monitor_timer(chan);
3316 if (chan->unacked_frames > 0) 3442 if (chan->unacked_frames > 0)
3317 __mod_retrans_timer(); 3443 __set_retrans_timer(chan);
3318 chan->conn_state &= ~L2CAP_CONN_WAIT_F; 3444 clear_bit(CONN_WAIT_F, &chan->conn_state);
3319 } 3445 }
3320 3446
3321 chan->expected_ack_seq = req_seq; 3447 chan->expected_ack_seq = req_seq;
3322 l2cap_drop_acked_frames(chan); 3448 l2cap_drop_acked_frames(chan);
3323 3449
3324 if (tx_seq == chan->expected_tx_seq)
3325 goto expected;
3326
3327 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; 3450 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3328 if (tx_seq_offset < 0) 3451 if (tx_seq_offset < 0)
3329 tx_seq_offset += 64; 3452 tx_seq_offset += 64;
@@ -3334,10 +3457,13 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3334 goto drop; 3457 goto drop;
3335 } 3458 }
3336 3459
3337 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY) 3460 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3338 goto drop; 3461 goto drop;
3339 3462
3340 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3463 if (tx_seq == chan->expected_tx_seq)
3464 goto expected;
3465
3466 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3341 struct srej_list *first; 3467 struct srej_list *first;
3342 3468
3343 first = list_first_entry(&chan->srej_l, 3469 first = list_first_entry(&chan->srej_l,
@@ -3351,7 +3477,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3351 3477
3352 if (list_empty(&chan->srej_l)) { 3478 if (list_empty(&chan->srej_l)) {
3353 chan->buffer_seq = chan->buffer_seq_srej; 3479 chan->buffer_seq = chan->buffer_seq_srej;
3354 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3480 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3355 l2cap_send_ack(chan); 3481 l2cap_send_ack(chan);
3356 BT_DBG("chan %p, Exit SREJ_SENT", chan); 3482 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3357 } 3483 }
@@ -3380,7 +3506,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3380 if (tx_seq_offset < expected_tx_seq_offset) 3506 if (tx_seq_offset < expected_tx_seq_offset)
3381 goto drop; 3507 goto drop;
3382 3508
3383 chan->conn_state |= L2CAP_CONN_SREJ_SENT; 3509 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3384 3510
3385 BT_DBG("chan %p, Enter SREJ", chan); 3511 BT_DBG("chan %p, Enter SREJ", chan);
3386 3512
@@ -3388,39 +3514,39 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3388 chan->buffer_seq_srej = chan->buffer_seq; 3514 chan->buffer_seq_srej = chan->buffer_seq;
3389 3515
3390 __skb_queue_head_init(&chan->srej_q); 3516 __skb_queue_head_init(&chan->srej_q);
3391 __skb_queue_head_init(&chan->busy_q);
3392 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 3517 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3393 3518
3394 chan->conn_state |= L2CAP_CONN_SEND_PBIT; 3519 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3395 3520
3396 l2cap_send_srejframe(chan, tx_seq); 3521 l2cap_send_srejframe(chan, tx_seq);
3397 3522
3398 del_timer(&chan->ack_timer); 3523 __clear_ack_timer(chan);
3399 } 3524 }
3400 return 0; 3525 return 0;
3401 3526
3402expected: 3527expected:
3403 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3528 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3404 3529
3405 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3530 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3406 bt_cb(skb)->tx_seq = tx_seq; 3531 bt_cb(skb)->tx_seq = tx_seq;
3407 bt_cb(skb)->sar = sar; 3532 bt_cb(skb)->sar = sar;
3408 __skb_queue_tail(&chan->srej_q, skb); 3533 __skb_queue_tail(&chan->srej_q, skb);
3409 return 0; 3534 return 0;
3410 } 3535 }
3411 3536
3412 err = l2cap_push_rx_skb(chan, skb, rx_control); 3537 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3413 if (err < 0) 3538 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3414 return 0; 3539 if (err < 0) {
3540 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3541 return err;
3542 }
3415 3543
3416 if (rx_control & L2CAP_CTRL_FINAL) { 3544 if (rx_control & L2CAP_CTRL_FINAL) {
3417 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3545 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3418 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3419 else
3420 l2cap_retransmit_frames(chan); 3546 l2cap_retransmit_frames(chan);
3421 } 3547 }
3422 3548
3423 __mod_ack_timer(); 3549 __set_ack_timer(chan);
3424 3550
3425 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 3551 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3426 if (chan->num_acked == num_to_ack - 1) 3552 if (chan->num_acked == num_to_ack - 1)
@@ -3442,33 +3568,31 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
3442 l2cap_drop_acked_frames(chan); 3568 l2cap_drop_acked_frames(chan);
3443 3569
3444 if (rx_control & L2CAP_CTRL_POLL) { 3570 if (rx_control & L2CAP_CTRL_POLL) {
3445 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3571 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3446 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3572 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3447 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3573 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3448 (chan->unacked_frames > 0)) 3574 (chan->unacked_frames > 0))
3449 __mod_retrans_timer(); 3575 __set_retrans_timer(chan);
3450 3576
3451 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3577 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3452 l2cap_send_srejtail(chan); 3578 l2cap_send_srejtail(chan);
3453 } else { 3579 } else {
3454 l2cap_send_i_or_rr_or_rnr(chan); 3580 l2cap_send_i_or_rr_or_rnr(chan);
3455 } 3581 }
3456 3582
3457 } else if (rx_control & L2CAP_CTRL_FINAL) { 3583 } else if (rx_control & L2CAP_CTRL_FINAL) {
3458 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3584 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3459 3585
3460 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3586 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3461 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3462 else
3463 l2cap_retransmit_frames(chan); 3587 l2cap_retransmit_frames(chan);
3464 3588
3465 } else { 3589 } else {
3466 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3590 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3467 (chan->unacked_frames > 0)) 3591 (chan->unacked_frames > 0))
3468 __mod_retrans_timer(); 3592 __set_retrans_timer(chan);
3469 3593
3470 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3594 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3471 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) 3595 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3472 l2cap_send_ack(chan); 3596 l2cap_send_ack(chan);
3473 else 3597 else
3474 l2cap_ertm_send(chan); 3598 l2cap_ertm_send(chan);
@@ -3481,21 +3605,19 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
3481 3605
3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3606 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3483 3607
3484 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3485 3609
3486 chan->expected_ack_seq = tx_seq; 3610 chan->expected_ack_seq = tx_seq;
3487 l2cap_drop_acked_frames(chan); 3611 l2cap_drop_acked_frames(chan);
3488 3612
3489 if (rx_control & L2CAP_CTRL_FINAL) { 3613 if (rx_control & L2CAP_CTRL_FINAL) {
3490 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3614 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3491 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3492 else
3493 l2cap_retransmit_frames(chan); 3615 l2cap_retransmit_frames(chan);
3494 } else { 3616 } else {
3495 l2cap_retransmit_frames(chan); 3617 l2cap_retransmit_frames(chan);
3496 3618
3497 if (chan->conn_state & L2CAP_CONN_WAIT_F) 3619 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3498 chan->conn_state |= L2CAP_CONN_REJ_ACT; 3620 set_bit(CONN_REJ_ACT, &chan->conn_state);
3499 } 3621 }
3500} 3622}
3501static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) 3623static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
@@ -3504,32 +3626,32 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
3504 3626
3505 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3627 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3506 3628
3507 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3629 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3508 3630
3509 if (rx_control & L2CAP_CTRL_POLL) { 3631 if (rx_control & L2CAP_CTRL_POLL) {
3510 chan->expected_ack_seq = tx_seq; 3632 chan->expected_ack_seq = tx_seq;
3511 l2cap_drop_acked_frames(chan); 3633 l2cap_drop_acked_frames(chan);
3512 3634
3513 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3635 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3514 l2cap_retransmit_one_frame(chan, tx_seq); 3636 l2cap_retransmit_one_frame(chan, tx_seq);
3515 3637
3516 l2cap_ertm_send(chan); 3638 l2cap_ertm_send(chan);
3517 3639
3518 if (chan->conn_state & L2CAP_CONN_WAIT_F) { 3640 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3519 chan->srej_save_reqseq = tx_seq; 3641 chan->srej_save_reqseq = tx_seq;
3520 chan->conn_state |= L2CAP_CONN_SREJ_ACT; 3642 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3521 } 3643 }
3522 } else if (rx_control & L2CAP_CTRL_FINAL) { 3644 } else if (rx_control & L2CAP_CTRL_FINAL) {
3523 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) && 3645 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3524 chan->srej_save_reqseq == tx_seq) 3646 chan->srej_save_reqseq == tx_seq)
3525 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT; 3647 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3526 else 3648 else
3527 l2cap_retransmit_one_frame(chan, tx_seq); 3649 l2cap_retransmit_one_frame(chan, tx_seq);
3528 } else { 3650 } else {
3529 l2cap_retransmit_one_frame(chan, tx_seq); 3651 l2cap_retransmit_one_frame(chan, tx_seq);
3530 if (chan->conn_state & L2CAP_CONN_WAIT_F) { 3652 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3531 chan->srej_save_reqseq = tx_seq; 3653 chan->srej_save_reqseq = tx_seq;
3532 chan->conn_state |= L2CAP_CONN_SREJ_ACT; 3654 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3533 } 3655 }
3534 } 3656 }
3535} 3657}
@@ -3540,15 +3662,15 @@ static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_c
3540 3662
3541 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3663 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3542 3664
3543 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY; 3665 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3544 chan->expected_ack_seq = tx_seq; 3666 chan->expected_ack_seq = tx_seq;
3545 l2cap_drop_acked_frames(chan); 3667 l2cap_drop_acked_frames(chan);
3546 3668
3547 if (rx_control & L2CAP_CTRL_POLL) 3669 if (rx_control & L2CAP_CTRL_POLL)
3548 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3670 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3549 3671
3550 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) { 3672 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3551 del_timer(&chan->retrans_timer); 3673 __clear_retrans_timer(chan);
3552 if (rx_control & L2CAP_CTRL_POLL) 3674 if (rx_control & L2CAP_CTRL_POLL)
3553 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); 3675 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3554 return; 3676 return;
@@ -3565,11 +3687,11 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
3565 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); 3687 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3566 3688
3567 if (L2CAP_CTRL_FINAL & rx_control && 3689 if (L2CAP_CTRL_FINAL & rx_control &&
3568 chan->conn_state & L2CAP_CONN_WAIT_F) { 3690 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3569 del_timer(&chan->monitor_timer); 3691 __clear_monitor_timer(chan);
3570 if (chan->unacked_frames > 0) 3692 if (chan->unacked_frames > 0)
3571 __mod_retrans_timer(); 3693 __set_retrans_timer(chan);
3572 chan->conn_state &= ~L2CAP_CONN_WAIT_F; 3694 clear_bit(CONN_WAIT_F, &chan->conn_state);
3573 } 3695 }
3574 3696
3575 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 3697 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
@@ -3668,7 +3790,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3668{ 3790{
3669 struct l2cap_chan *chan; 3791 struct l2cap_chan *chan;
3670 struct sock *sk = NULL; 3792 struct sock *sk = NULL;
3671 struct l2cap_pinfo *pi;
3672 u16 control; 3793 u16 control;
3673 u8 tx_seq; 3794 u8 tx_seq;
3674 int len; 3795 int len;
@@ -3680,11 +3801,10 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3680 } 3801 }
3681 3802
3682 sk = chan->sk; 3803 sk = chan->sk;
3683 pi = l2cap_pi(sk);
3684 3804
3685 BT_DBG("chan %p, len %d", chan, skb->len); 3805 BT_DBG("chan %p, len %d", chan, skb->len);
3686 3806
3687 if (sk->sk_state != BT_CONNECTED) 3807 if (chan->state != BT_CONNECTED)
3688 goto drop; 3808 goto drop;
3689 3809
3690 switch (chan->mode) { 3810 switch (chan->mode) {
@@ -3697,7 +3817,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3697 if (chan->imtu < skb->len) 3817 if (chan->imtu < skb->len)
3698 goto drop; 3818 goto drop;
3699 3819
3700 if (!sock_queue_rcv_skb(sk, skb)) 3820 if (!chan->ops->recv(chan->data, skb))
3701 goto done; 3821 goto done;
3702 break; 3822 break;
3703 3823
@@ -3769,13 +3889,13 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
3769 3889
3770 BT_DBG("sk %p, len %d", sk, skb->len); 3890 BT_DBG("sk %p, len %d", sk, skb->len);
3771 3891
3772 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3892 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3773 goto drop; 3893 goto drop;
3774 3894
3775 if (l2cap_pi(sk)->chan->imtu < skb->len) 3895 if (chan->imtu < skb->len)
3776 goto drop; 3896 goto drop;
3777 3897
3778 if (!sock_queue_rcv_skb(sk, skb)) 3898 if (!chan->ops->recv(chan->data, skb))
3779 goto done; 3899 goto done;
3780 3900
3781drop: 3901drop:
@@ -3802,13 +3922,13 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
3802 3922
3803 BT_DBG("sk %p, len %d", sk, skb->len); 3923 BT_DBG("sk %p, len %d", sk, skb->len);
3804 3924
3805 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3925 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3806 goto drop; 3926 goto drop;
3807 3927
3808 if (l2cap_pi(sk)->chan->imtu < skb->len) 3928 if (chan->imtu < skb->len)
3809 goto drop; 3929 goto drop;
3810 3930
3811 if (!sock_queue_rcv_skb(sk, skb)) 3931 if (!chan->ops->recv(chan->data, skb))
3812 goto done; 3932 goto done;
3813 3933
3814drop: 3934drop:
@@ -3853,6 +3973,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3853 l2cap_att_channel(conn, cid, skb); 3973 l2cap_att_channel(conn, cid, skb);
3854 break; 3974 break;
3855 3975
3976 case L2CAP_CID_SMP:
3977 if (smp_sig_channel(conn, skb))
3978 l2cap_conn_del(conn->hcon, EACCES);
3979 break;
3980
3856 default: 3981 default:
3857 l2cap_data_channel(conn, cid, skb); 3982 l2cap_data_channel(conn, cid, skb);
3858 break; 3983 break;
@@ -3876,7 +4001,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3876 list_for_each_entry(c, &chan_list, global_l) { 4001 list_for_each_entry(c, &chan_list, global_l) {
3877 struct sock *sk = c->sk; 4002 struct sock *sk = c->sk;
3878 4003
3879 if (sk->sk_state != BT_LISTEN) 4004 if (c->state != BT_LISTEN)
3880 continue; 4005 continue;
3881 4006
3882 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 4007 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
@@ -3909,7 +4034,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3909 if (conn) 4034 if (conn)
3910 l2cap_conn_ready(conn); 4035 l2cap_conn_ready(conn);
3911 } else 4036 } else
3912 l2cap_conn_del(hcon, bt_err(status)); 4037 l2cap_conn_del(hcon, bt_to_errno(status));
3913 4038
3914 return 0; 4039 return 0;
3915} 4040}
@@ -3920,7 +4045,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
3920 4045
3921 BT_DBG("hcon %p", hcon); 4046 BT_DBG("hcon %p", hcon);
3922 4047
3923 if (hcon->type != ACL_LINK || !conn) 4048 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3924 return 0x13; 4049 return 0x13;
3925 4050
3926 return conn->disc_reason; 4051 return conn->disc_reason;
@@ -3933,27 +4058,25 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3933 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) 4058 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3934 return -EINVAL; 4059 return -EINVAL;
3935 4060
3936 l2cap_conn_del(hcon, bt_err(reason)); 4061 l2cap_conn_del(hcon, bt_to_errno(reason));
3937 4062
3938 return 0; 4063 return 0;
3939} 4064}
3940 4065
3941static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 4066static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3942{ 4067{
3943 struct sock *sk = chan->sk; 4068 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
3944
3945 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3946 return; 4069 return;
3947 4070
3948 if (encrypt == 0x00) { 4071 if (encrypt == 0x00) {
3949 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4072 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3950 l2cap_sock_clear_timer(sk); 4073 __clear_chan_timer(chan);
3951 l2cap_sock_set_timer(sk, HZ * 5); 4074 __set_chan_timer(chan, HZ * 5);
3952 } else if (chan->sec_level == BT_SECURITY_HIGH) 4075 } else if (chan->sec_level == BT_SECURITY_HIGH)
3953 __l2cap_sock_close(sk, ECONNREFUSED); 4076 l2cap_chan_close(chan, ECONNREFUSED);
3954 } else { 4077 } else {
3955 if (chan->sec_level == BT_SECURITY_MEDIUM) 4078 if (chan->sec_level == BT_SECURITY_MEDIUM)
3956 l2cap_sock_clear_timer(sk); 4079 __clear_chan_timer(chan);
3957 } 4080 }
3958} 4081}
3959 4082
@@ -3974,34 +4097,48 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3974 4097
3975 bh_lock_sock(sk); 4098 bh_lock_sock(sk);
3976 4099
3977 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) { 4100 BT_DBG("chan->scid %d", chan->scid);
4101
4102 if (chan->scid == L2CAP_CID_LE_DATA) {
4103 if (!status && encrypt) {
4104 chan->sec_level = hcon->sec_level;
4105 del_timer(&conn->security_timer);
4106 l2cap_chan_ready(sk);
4107 smp_distribute_keys(conn, 0);
4108 }
4109
4110 bh_unlock_sock(sk);
4111 continue;
4112 }
4113
4114 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
3978 bh_unlock_sock(sk); 4115 bh_unlock_sock(sk);
3979 continue; 4116 continue;
3980 } 4117 }
3981 4118
3982 if (!status && (sk->sk_state == BT_CONNECTED || 4119 if (!status && (chan->state == BT_CONNECTED ||
3983 sk->sk_state == BT_CONFIG)) { 4120 chan->state == BT_CONFIG)) {
3984 l2cap_check_encryption(chan, encrypt); 4121 l2cap_check_encryption(chan, encrypt);
3985 bh_unlock_sock(sk); 4122 bh_unlock_sock(sk);
3986 continue; 4123 continue;
3987 } 4124 }
3988 4125
3989 if (sk->sk_state == BT_CONNECT) { 4126 if (chan->state == BT_CONNECT) {
3990 if (!status) { 4127 if (!status) {
3991 struct l2cap_conn_req req; 4128 struct l2cap_conn_req req;
3992 req.scid = cpu_to_le16(chan->scid); 4129 req.scid = cpu_to_le16(chan->scid);
3993 req.psm = chan->psm; 4130 req.psm = chan->psm;
3994 4131
3995 chan->ident = l2cap_get_ident(conn); 4132 chan->ident = l2cap_get_ident(conn);
3996 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 4133 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3997 4134
3998 l2cap_send_cmd(conn, chan->ident, 4135 l2cap_send_cmd(conn, chan->ident,
3999 L2CAP_CONN_REQ, sizeof(req), &req); 4136 L2CAP_CONN_REQ, sizeof(req), &req);
4000 } else { 4137 } else {
4001 l2cap_sock_clear_timer(sk); 4138 __clear_chan_timer(chan);
4002 l2cap_sock_set_timer(sk, HZ / 10); 4139 __set_chan_timer(chan, HZ / 10);
4003 } 4140 }
4004 } else if (sk->sk_state == BT_CONNECT2) { 4141 } else if (chan->state == BT_CONNECT2) {
4005 struct l2cap_conn_rsp rsp; 4142 struct l2cap_conn_rsp rsp;
4006 __u16 res, stat; 4143 __u16 res, stat;
4007 4144
@@ -4013,13 +4150,13 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4013 if (parent) 4150 if (parent)
4014 parent->sk_data_ready(parent, 0); 4151 parent->sk_data_ready(parent, 0);
4015 } else { 4152 } else {
4016 sk->sk_state = BT_CONFIG; 4153 l2cap_state_change(chan, BT_CONFIG);
4017 res = L2CAP_CR_SUCCESS; 4154 res = L2CAP_CR_SUCCESS;
4018 stat = L2CAP_CS_NO_INFO; 4155 stat = L2CAP_CS_NO_INFO;
4019 } 4156 }
4020 } else { 4157 } else {
4021 sk->sk_state = BT_DISCONN; 4158 l2cap_state_change(chan, BT_DISCONN);
4022 l2cap_sock_set_timer(sk, HZ / 10); 4159 __set_chan_timer(chan, HZ / 10);
4023 res = L2CAP_CR_SEC_BLOCK; 4160 res = L2CAP_CR_SEC_BLOCK;
4024 stat = L2CAP_CS_NO_INFO; 4161 stat = L2CAP_CS_NO_INFO;
4025 } 4162 }
@@ -4163,7 +4300,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4163 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4300 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4164 batostr(&bt_sk(sk)->src), 4301 batostr(&bt_sk(sk)->src),
4165 batostr(&bt_sk(sk)->dst), 4302 batostr(&bt_sk(sk)->dst),
4166 sk->sk_state, __le16_to_cpu(c->psm), 4303 c->state, __le16_to_cpu(c->psm),
4167 c->scid, c->dcid, c->imtu, c->omtu, 4304 c->scid, c->dcid, c->imtu, c->omtu,
4168 c->sec_level, c->mode); 4305 c->sec_level, c->mode);
4169 } 4306 }
@@ -4206,12 +4343,6 @@ int __init l2cap_init(void)
4206 if (err < 0) 4343 if (err < 0)
4207 return err; 4344 return err;
4208 4345
4209 _busy_wq = create_singlethread_workqueue("l2cap");
4210 if (!_busy_wq) {
4211 err = -ENOMEM;
4212 goto error;
4213 }
4214
4215 err = hci_register_proto(&l2cap_hci_proto); 4346 err = hci_register_proto(&l2cap_hci_proto);
4216 if (err < 0) { 4347 if (err < 0) {
4217 BT_ERR("L2CAP protocol registration failed"); 4348 BT_ERR("L2CAP protocol registration failed");
@@ -4229,7 +4360,6 @@ int __init l2cap_init(void)
4229 return 0; 4360 return 0;
4230 4361
4231error: 4362error:
4232 destroy_workqueue(_busy_wq);
4233 l2cap_cleanup_sockets(); 4363 l2cap_cleanup_sockets();
4234 return err; 4364 return err;
4235} 4365}
@@ -4238,9 +4368,6 @@ void l2cap_exit(void)
4238{ 4368{
4239 debugfs_remove(l2cap_debugfs); 4369 debugfs_remove(l2cap_debugfs);
4240 4370
4241 flush_workqueue(_busy_wq);
4242 destroy_workqueue(_busy_wq);
4243
4244 if (hci_unregister_proto(&l2cap_hci_proto) < 0) 4371 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4245 BT_ERR("L2CAP protocol unregistration failed"); 4372 BT_ERR("L2CAP protocol unregistration failed");
4246 4373
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 8248303f44e..61f1f623091 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -29,54 +29,11 @@
29#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/smp.h>
32 33
33static const struct proto_ops l2cap_sock_ops; 34static const struct proto_ops l2cap_sock_ops;
34 35static void l2cap_sock_init(struct sock *sk, struct sock *parent);
35/* ---- L2CAP timers ---- */ 36static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
36static void l2cap_sock_timeout(unsigned long arg)
37{
38 struct sock *sk = (struct sock *) arg;
39 int reason;
40
41 BT_DBG("sock %p state %d", sk, sk->sk_state);
42
43 bh_lock_sock(sk);
44
45 if (sock_owned_by_user(sk)) {
46 /* sk is owned by user. Try again later */
47 l2cap_sock_set_timer(sk, HZ / 5);
48 bh_unlock_sock(sk);
49 sock_put(sk);
50 return;
51 }
52
53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
54 reason = ECONNREFUSED;
55 else if (sk->sk_state == BT_CONNECT &&
56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
57 reason = ECONNREFUSED;
58 else
59 reason = ETIMEDOUT;
60
61 __l2cap_sock_close(sk, reason);
62
63 bh_unlock_sock(sk);
64
65 l2cap_sock_kill(sk);
66 sock_put(sk);
67}
68
69void l2cap_sock_set_timer(struct sock *sk, long timeout)
70{
71 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
72 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
73}
74
75void l2cap_sock_clear_timer(struct sock *sk)
76{
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
78 sk_stop_timer(sk, &sk->sk_timer);
79}
80 37
81static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 38static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82{ 39{
@@ -133,6 +90,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
133 chan->sec_level = BT_SECURITY_SDP; 90 chan->sec_level = BT_SECURITY_SDP;
134 91
135 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 92 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
93
94 chan->state = BT_BOUND;
136 sk->sk_state = BT_BOUND; 95 sk->sk_state = BT_BOUND;
137 96
138done: 97done:
@@ -162,7 +121,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
162 121
163 lock_sock(sk); 122 lock_sock(sk);
164 123
165 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) 124 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
166 && !(la.l2_psm || la.l2_cid)) { 125 && !(la.l2_psm || la.l2_cid)) {
167 err = -EINVAL; 126 err = -EINVAL;
168 goto done; 127 goto done;
@@ -204,8 +163,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
204 } 163 }
205 164
206 /* PSM must be odd and lsb of upper byte must be 0 */ 165 /* PSM must be odd and lsb of upper byte must be 0 */
207 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && 166 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
208 sk->sk_type != SOCK_RAW && !la.l2_cid) { 167 chan->chan_type != L2CAP_CHAN_RAW) {
209 err = -EINVAL; 168 err = -EINVAL;
210 goto done; 169 goto done;
211 } 170 }
@@ -258,6 +217,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
258 217
259 sk->sk_max_ack_backlog = backlog; 218 sk->sk_max_ack_backlog = backlog;
260 sk->sk_ack_backlog = 0; 219 sk->sk_ack_backlog = 0;
220
221 chan->state = BT_LISTEN;
261 sk->sk_state = BT_LISTEN; 222 sk->sk_state = BT_LISTEN;
262 223
263done: 224done:
@@ -274,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
274 235
275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 236 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
276 237
277 if (sk->sk_state != BT_LISTEN) {
278 err = -EBADFD;
279 goto done;
280 }
281
282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 238 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
283 239
284 BT_DBG("sk %p timeo %ld", sk, timeo); 240 BT_DBG("sk %p timeo %ld", sk, timeo);
285 241
286 /* Wait for an incoming connection. (wake-one). */ 242 /* Wait for an incoming connection. (wake-one). */
287 add_wait_queue_exclusive(sk_sleep(sk), &wait); 243 add_wait_queue_exclusive(sk_sleep(sk), &wait);
288 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 244 while (1) {
289 set_current_state(TASK_INTERRUPTIBLE); 245 set_current_state(TASK_INTERRUPTIBLE);
290 if (!timeo) { 246
291 err = -EAGAIN; 247 if (sk->sk_state != BT_LISTEN) {
248 err = -EBADFD;
292 break; 249 break;
293 } 250 }
294 251
295 release_sock(sk); 252 nsk = bt_accept_dequeue(sk, newsock);
296 timeo = schedule_timeout(timeo); 253 if (nsk)
297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 254 break;
298 255
299 if (sk->sk_state != BT_LISTEN) { 256 if (!timeo) {
300 err = -EBADFD; 257 err = -EAGAIN;
301 break; 258 break;
302 } 259 }
303 260
@@ -305,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
305 err = sock_intr_errno(timeo); 262 err = sock_intr_errno(timeo);
306 break; 263 break;
307 } 264 }
265
266 release_sock(sk);
267 timeo = schedule_timeout(timeo);
268 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
308 } 269 }
309 set_current_state(TASK_RUNNING); 270 __set_current_state(TASK_RUNNING);
310 remove_wait_queue(sk_sleep(sk), &wait); 271 remove_wait_queue(sk_sleep(sk), &wait);
311 272
312 if (err) 273 if (err)
@@ -437,6 +398,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
437 struct sock *sk = sock->sk; 398 struct sock *sk = sock->sk;
438 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 399 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
439 struct bt_security sec; 400 struct bt_security sec;
401 struct bt_power pwr;
440 int len, err = 0; 402 int len, err = 0;
441 403
442 BT_DBG("sk %p", sk); 404 BT_DBG("sk %p", sk);
@@ -454,14 +416,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
454 416
455 switch (optname) { 417 switch (optname) {
456 case BT_SECURITY: 418 case BT_SECURITY:
457 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 419 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
458 && sk->sk_type != SOCK_RAW) { 420 chan->chan_type != L2CAP_CHAN_RAW) {
459 err = -EINVAL; 421 err = -EINVAL;
460 break; 422 break;
461 } 423 }
462 424
425 memset(&sec, 0, sizeof(sec));
463 sec.level = chan->sec_level; 426 sec.level = chan->sec_level;
464 427
428 if (sk->sk_state == BT_CONNECTED)
429 sec.key_size = chan->conn->hcon->enc_key_size;
430
465 len = min_t(unsigned int, len, sizeof(sec)); 431 len = min_t(unsigned int, len, sizeof(sec));
466 if (copy_to_user(optval, (char *) &sec, len)) 432 if (copy_to_user(optval, (char *) &sec, len))
467 err = -EFAULT; 433 err = -EFAULT;
@@ -485,6 +451,21 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
485 451
486 break; 452 break;
487 453
454 case BT_POWER:
455 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
456 && sk->sk_type != SOCK_RAW) {
457 err = -EINVAL;
458 break;
459 }
460
461 pwr.force_active = chan->force_active;
462
463 len = min_t(unsigned int, len, sizeof(pwr));
464 if (copy_to_user(optval, (char *) &pwr, len))
465 err = -EFAULT;
466
467 break;
468
488 default: 469 default:
489 err = -ENOPROTOOPT; 470 err = -ENOPROTOOPT;
490 break; 471 break;
@@ -535,7 +516,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
535 chan->mode = opts.mode; 516 chan->mode = opts.mode;
536 switch (chan->mode) { 517 switch (chan->mode) {
537 case L2CAP_MODE_BASIC: 518 case L2CAP_MODE_BASIC:
538 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; 519 clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
539 break; 520 break;
540 case L2CAP_MODE_ERTM: 521 case L2CAP_MODE_ERTM:
541 case L2CAP_MODE_STREAMING: 522 case L2CAP_MODE_STREAMING:
@@ -585,6 +566,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
585 struct sock *sk = sock->sk; 566 struct sock *sk = sock->sk;
586 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 567 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
587 struct bt_security sec; 568 struct bt_security sec;
569 struct bt_power pwr;
570 struct l2cap_conn *conn;
588 int len, err = 0; 571 int len, err = 0;
589 u32 opt; 572 u32 opt;
590 573
@@ -600,8 +583,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
600 583
601 switch (optname) { 584 switch (optname) {
602 case BT_SECURITY: 585 case BT_SECURITY:
603 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 586 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
604 && sk->sk_type != SOCK_RAW) { 587 chan->chan_type != L2CAP_CHAN_RAW) {
605 err = -EINVAL; 588 err = -EINVAL;
606 break; 589 break;
607 } 590 }
@@ -621,6 +604,20 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
621 } 604 }
622 605
623 chan->sec_level = sec.level; 606 chan->sec_level = sec.level;
607
608 conn = chan->conn;
609 if (conn && chan->scid == L2CAP_CID_LE_DATA) {
610 if (!conn->hcon->out) {
611 err = -EINVAL;
612 break;
613 }
614
615 if (smp_conn_security(conn, sec.level))
616 break;
617
618 err = 0;
619 sk->sk_state = BT_CONFIG;
620 }
624 break; 621 break;
625 622
626 case BT_DEFER_SETUP: 623 case BT_DEFER_SETUP:
@@ -661,6 +658,23 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
661 chan->flushable = opt; 658 chan->flushable = opt;
662 break; 659 break;
663 660
661 case BT_POWER:
662 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
663 chan->chan_type != L2CAP_CHAN_RAW) {
664 err = -EINVAL;
665 break;
666 }
667
668 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
669
670 len = min_t(unsigned int, sizeof(pwr), optlen);
671 if (copy_from_user((char *) &pwr, optval, len)) {
672 err = -EFAULT;
673 break;
674 }
675 chan->force_active = pwr.force_active;
676 break;
677
664 default: 678 default:
665 err = -ENOPROTOOPT; 679 err = -ENOPROTOOPT;
666 break; 680 break;
@@ -674,8 +688,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
674{ 688{
675 struct sock *sk = sock->sk; 689 struct sock *sk = sock->sk;
676 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 690 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
677 struct sk_buff *skb;
678 u16 control;
679 int err; 691 int err;
680 692
681 BT_DBG("sock %p, sk %p", sock, sk); 693 BT_DBG("sock %p, sk %p", sock, sk);
@@ -690,87 +702,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
690 lock_sock(sk); 702 lock_sock(sk);
691 703
692 if (sk->sk_state != BT_CONNECTED) { 704 if (sk->sk_state != BT_CONNECTED) {
693 err = -ENOTCONN; 705 release_sock(sk);
694 goto done; 706 return -ENOTCONN;
695 }
696
697 /* Connectionless channel */
698 if (sk->sk_type == SOCK_DGRAM) {
699 skb = l2cap_create_connless_pdu(chan, msg, len);
700 if (IS_ERR(skb)) {
701 err = PTR_ERR(skb);
702 } else {
703 l2cap_do_send(chan, skb);
704 err = len;
705 }
706 goto done;
707 } 707 }
708 708
709 switch (chan->mode) { 709 err = l2cap_chan_send(chan, msg, len);
710 case L2CAP_MODE_BASIC:
711 /* Check outgoing MTU */
712 if (len > chan->omtu) {
713 err = -EMSGSIZE;
714 goto done;
715 }
716
717 /* Create a basic PDU */
718 skb = l2cap_create_basic_pdu(chan, msg, len);
719 if (IS_ERR(skb)) {
720 err = PTR_ERR(skb);
721 goto done;
722 }
723
724 l2cap_do_send(chan, skb);
725 err = len;
726 break;
727
728 case L2CAP_MODE_ERTM:
729 case L2CAP_MODE_STREAMING:
730 /* Entire SDU fits into one PDU */
731 if (len <= chan->remote_mps) {
732 control = L2CAP_SDU_UNSEGMENTED;
733 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
734 0);
735 if (IS_ERR(skb)) {
736 err = PTR_ERR(skb);
737 goto done;
738 }
739 __skb_queue_tail(&chan->tx_q, skb);
740
741 if (chan->tx_send_head == NULL)
742 chan->tx_send_head = skb;
743
744 } else {
745 /* Segment SDU into multiples PDUs */
746 err = l2cap_sar_segment_sdu(chan, msg, len);
747 if (err < 0)
748 goto done;
749 }
750
751 if (chan->mode == L2CAP_MODE_STREAMING) {
752 l2cap_streaming_send(chan);
753 err = len;
754 break;
755 }
756
757 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
758 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
759 err = len;
760 break;
761 }
762 err = l2cap_ertm_send(chan);
763
764 if (err >= 0)
765 err = len;
766 break;
767
768 default:
769 BT_DBG("bad state %1.1x", chan->mode);
770 err = -EBADFD;
771 }
772 710
773done:
774 release_sock(sk); 711 release_sock(sk);
775 return err; 712 return err;
776} 713}
@@ -778,13 +715,15 @@ done:
778static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) 715static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
779{ 716{
780 struct sock *sk = sock->sk; 717 struct sock *sk = sock->sk;
718 struct l2cap_pinfo *pi = l2cap_pi(sk);
719 int err;
781 720
782 lock_sock(sk); 721 lock_sock(sk);
783 722
784 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 723 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
785 sk->sk_state = BT_CONFIG; 724 sk->sk_state = BT_CONFIG;
786 725
787 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan); 726 __l2cap_connect_rsp_defer(pi->chan);
788 release_sock(sk); 727 release_sock(sk);
789 return 0; 728 return 0;
790 } 729 }
@@ -792,15 +731,43 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
792 release_sock(sk); 731 release_sock(sk);
793 732
794 if (sock->type == SOCK_STREAM) 733 if (sock->type == SOCK_STREAM)
795 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); 734 err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
735 else
736 err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
737
738 if (pi->chan->mode != L2CAP_MODE_ERTM)
739 return err;
740
741 /* Attempt to put pending rx data in the socket buffer */
742
743 lock_sock(sk);
744
745 if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
746 goto done;
747
748 if (pi->rx_busy_skb) {
749 if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
750 pi->rx_busy_skb = NULL;
751 else
752 goto done;
753 }
796 754
797 return bt_sock_recvmsg(iocb, sock, msg, len, flags); 755 /* Restore data flow when half of the receive buffer is
756 * available. This avoids resending large numbers of
757 * frames.
758 */
759 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
760 l2cap_chan_busy(pi->chan, 0);
761
762done:
763 release_sock(sk);
764 return err;
798} 765}
799 766
800/* Kill socket (only if zapped and orphan) 767/* Kill socket (only if zapped and orphan)
801 * Must be called on unlocked socket. 768 * Must be called on unlocked socket.
802 */ 769 */
803void l2cap_sock_kill(struct sock *sk) 770static void l2cap_sock_kill(struct sock *sk)
804{ 771{
805 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 772 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
806 return; 773 return;
@@ -814,87 +781,6 @@ void l2cap_sock_kill(struct sock *sk)
814 sock_put(sk); 781 sock_put(sk);
815} 782}
816 783
817/* Must be called on unlocked socket. */
818static void l2cap_sock_close(struct sock *sk)
819{
820 l2cap_sock_clear_timer(sk);
821 lock_sock(sk);
822 __l2cap_sock_close(sk, ECONNRESET);
823 release_sock(sk);
824 l2cap_sock_kill(sk);
825}
826
827static void l2cap_sock_cleanup_listen(struct sock *parent)
828{
829 struct sock *sk;
830
831 BT_DBG("parent %p", parent);
832
833 /* Close not yet accepted channels */
834 while ((sk = bt_accept_dequeue(parent, NULL)))
835 l2cap_sock_close(sk);
836
837 parent->sk_state = BT_CLOSED;
838 sock_set_flag(parent, SOCK_ZAPPED);
839}
840
841void __l2cap_sock_close(struct sock *sk, int reason)
842{
843 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
844 struct l2cap_conn *conn = chan->conn;
845
846 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
847
848 switch (sk->sk_state) {
849 case BT_LISTEN:
850 l2cap_sock_cleanup_listen(sk);
851 break;
852
853 case BT_CONNECTED:
854 case BT_CONFIG:
855 if ((sk->sk_type == SOCK_SEQPACKET ||
856 sk->sk_type == SOCK_STREAM) &&
857 conn->hcon->type == ACL_LINK) {
858 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
859 l2cap_send_disconn_req(conn, chan, reason);
860 } else
861 l2cap_chan_del(chan, reason);
862 break;
863
864 case BT_CONNECT2:
865 if ((sk->sk_type == SOCK_SEQPACKET ||
866 sk->sk_type == SOCK_STREAM) &&
867 conn->hcon->type == ACL_LINK) {
868 struct l2cap_conn_rsp rsp;
869 __u16 result;
870
871 if (bt_sk(sk)->defer_setup)
872 result = L2CAP_CR_SEC_BLOCK;
873 else
874 result = L2CAP_CR_BAD_PSM;
875
876 rsp.scid = cpu_to_le16(chan->dcid);
877 rsp.dcid = cpu_to_le16(chan->scid);
878 rsp.result = cpu_to_le16(result);
879 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
880 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
881 sizeof(rsp), &rsp);
882 }
883
884 l2cap_chan_del(chan, reason);
885 break;
886
887 case BT_CONNECT:
888 case BT_DISCONN:
889 l2cap_chan_del(chan, reason);
890 break;
891
892 default:
893 sock_set_flag(sk, SOCK_ZAPPED);
894 break;
895 }
896}
897
898static int l2cap_sock_shutdown(struct socket *sock, int how) 784static int l2cap_sock_shutdown(struct socket *sock, int how)
899{ 785{
900 struct sock *sk = sock->sk; 786 struct sock *sk = sock->sk;
@@ -912,8 +798,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
912 err = __l2cap_wait_ack(sk); 798 err = __l2cap_wait_ack(sk);
913 799
914 sk->sk_shutdown = SHUTDOWN_MASK; 800 sk->sk_shutdown = SHUTDOWN_MASK;
915 l2cap_sock_clear_timer(sk); 801 l2cap_chan_close(chan, 0);
916 __l2cap_sock_close(sk, 0);
917 802
918 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 803 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
919 err = bt_sock_wait_state(sk, BT_CLOSED, 804 err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -944,15 +829,85 @@ static int l2cap_sock_release(struct socket *sock)
944 return err; 829 return err;
945} 830}
946 831
832static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
833{
834 struct sock *sk, *parent = data;
835
836 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
837 GFP_ATOMIC);
838 if (!sk)
839 return NULL;
840
841 l2cap_sock_init(sk, parent);
842
843 return l2cap_pi(sk)->chan;
844}
845
846static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
847{
848 int err;
849 struct sock *sk = data;
850 struct l2cap_pinfo *pi = l2cap_pi(sk);
851
852 if (pi->rx_busy_skb)
853 return -ENOMEM;
854
855 err = sock_queue_rcv_skb(sk, skb);
856
857 /* For ERTM, handle one skb that doesn't fit into the recv
858 * buffer. This is important to do because the data frames
859 * have already been acked, so the skb cannot be discarded.
860 *
861 * Notify the l2cap core that the buffer is full, so the
862 * LOCAL_BUSY state is entered and no more frames are
863 * acked and reassembled until there is buffer space
864 * available.
865 */
866 if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
867 pi->rx_busy_skb = skb;
868 l2cap_chan_busy(pi->chan, 1);
869 err = 0;
870 }
871
872 return err;
873}
874
875static void l2cap_sock_close_cb(void *data)
876{
877 struct sock *sk = data;
878
879 l2cap_sock_kill(sk);
880}
881
882static void l2cap_sock_state_change_cb(void *data, int state)
883{
884 struct sock *sk = data;
885
886 sk->sk_state = state;
887}
888
889static struct l2cap_ops l2cap_chan_ops = {
890 .name = "L2CAP Socket Interface",
891 .new_connection = l2cap_sock_new_connection_cb,
892 .recv = l2cap_sock_recv_cb,
893 .close = l2cap_sock_close_cb,
894 .state_change = l2cap_sock_state_change_cb,
895};
896
947static void l2cap_sock_destruct(struct sock *sk) 897static void l2cap_sock_destruct(struct sock *sk)
948{ 898{
949 BT_DBG("sk %p", sk); 899 BT_DBG("sk %p", sk);
950 900
901 if (l2cap_pi(sk)->rx_busy_skb) {
902 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
903 l2cap_pi(sk)->rx_busy_skb = NULL;
904 }
905
951 skb_queue_purge(&sk->sk_receive_queue); 906 skb_queue_purge(&sk->sk_receive_queue);
952 skb_queue_purge(&sk->sk_write_queue); 907 skb_queue_purge(&sk->sk_write_queue);
953} 908}
954 909
955void l2cap_sock_init(struct sock *sk, struct sock *parent) 910static void l2cap_sock_init(struct sock *sk, struct sock *parent)
956{ 911{
957 struct l2cap_pinfo *pi = l2cap_pi(sk); 912 struct l2cap_pinfo *pi = l2cap_pi(sk);
958 struct l2cap_chan *chan = pi->chan; 913 struct l2cap_chan *chan = pi->chan;
@@ -965,6 +920,7 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
965 sk->sk_type = parent->sk_type; 920 sk->sk_type = parent->sk_type;
966 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 921 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
967 922
923 chan->chan_type = pchan->chan_type;
968 chan->imtu = pchan->imtu; 924 chan->imtu = pchan->imtu;
969 chan->omtu = pchan->omtu; 925 chan->omtu = pchan->omtu;
970 chan->conf_state = pchan->conf_state; 926 chan->conf_state = pchan->conf_state;
@@ -976,12 +932,27 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
976 chan->role_switch = pchan->role_switch; 932 chan->role_switch = pchan->role_switch;
977 chan->force_reliable = pchan->force_reliable; 933 chan->force_reliable = pchan->force_reliable;
978 chan->flushable = pchan->flushable; 934 chan->flushable = pchan->flushable;
935 chan->force_active = pchan->force_active;
979 } else { 936 } else {
937
938 switch (sk->sk_type) {
939 case SOCK_RAW:
940 chan->chan_type = L2CAP_CHAN_RAW;
941 break;
942 case SOCK_DGRAM:
943 chan->chan_type = L2CAP_CHAN_CONN_LESS;
944 break;
945 case SOCK_SEQPACKET:
946 case SOCK_STREAM:
947 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
948 break;
949 }
950
980 chan->imtu = L2CAP_DEFAULT_MTU; 951 chan->imtu = L2CAP_DEFAULT_MTU;
981 chan->omtu = 0; 952 chan->omtu = 0;
982 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { 953 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
983 chan->mode = L2CAP_MODE_ERTM; 954 chan->mode = L2CAP_MODE_ERTM;
984 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE; 955 set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
985 } else { 956 } else {
986 chan->mode = L2CAP_MODE_BASIC; 957 chan->mode = L2CAP_MODE_BASIC;
987 } 958 }
@@ -992,10 +963,15 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
992 chan->role_switch = 0; 963 chan->role_switch = 0;
993 chan->force_reliable = 0; 964 chan->force_reliable = 0;
994 chan->flushable = BT_FLUSHABLE_OFF; 965 chan->flushable = BT_FLUSHABLE_OFF;
966 chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
967
995 } 968 }
996 969
997 /* Default config options */ 970 /* Default config options */
998 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 971 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
972
973 chan->data = sk;
974 chan->ops = &l2cap_chan_ops;
999} 975}
1000 976
1001static struct proto l2cap_proto = { 977static struct proto l2cap_proto = {
@@ -1004,9 +980,10 @@ static struct proto l2cap_proto = {
1004 .obj_size = sizeof(struct l2cap_pinfo) 980 .obj_size = sizeof(struct l2cap_pinfo)
1005}; 981};
1006 982
1007struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) 983static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1008{ 984{
1009 struct sock *sk; 985 struct sock *sk;
986 struct l2cap_chan *chan;
1010 987
1011 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); 988 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1012 if (!sk) 989 if (!sk)
@@ -1016,14 +993,20 @@ struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, g
1016 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 993 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1017 994
1018 sk->sk_destruct = l2cap_sock_destruct; 995 sk->sk_destruct = l2cap_sock_destruct;
1019 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); 996 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
1020 997
1021 sock_reset_flag(sk, SOCK_ZAPPED); 998 sock_reset_flag(sk, SOCK_ZAPPED);
1022 999
1023 sk->sk_protocol = proto; 1000 sk->sk_protocol = proto;
1024 sk->sk_state = BT_OPEN; 1001 sk->sk_state = BT_OPEN;
1025 1002
1026 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); 1003 chan = l2cap_chan_create(sk);
1004 if (!chan) {
1005 l2cap_sock_kill(sk);
1006 return NULL;
1007 }
1008
1009 l2cap_pi(sk)->chan = chan;
1027 1010
1028 return sk; 1011 return sk;
1029} 1012}
@@ -1032,7 +1015,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1032 int kern) 1015 int kern)
1033{ 1016{
1034 struct sock *sk; 1017 struct sock *sk;
1035 struct l2cap_chan *chan;
1036 1018
1037 BT_DBG("sock %p", sock); 1019 BT_DBG("sock %p", sock);
1038 1020
@@ -1051,14 +1033,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1051 if (!sk) 1033 if (!sk)
1052 return -ENOMEM; 1034 return -ENOMEM;
1053 1035
1054 chan = l2cap_chan_create(sk);
1055 if (!chan) {
1056 l2cap_sock_kill(sk);
1057 return -ENOMEM;
1058 }
1059
1060 l2cap_pi(sk)->chan = chan;
1061
1062 l2cap_sock_init(sk, NULL); 1036 l2cap_sock_init(sk, NULL);
1063 return 0; 1037 return 0;
1064} 1038}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b826d1bf10d..86a6bed229d 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -59,7 +59,7 @@ char *batostr(bdaddr_t *ba)
59EXPORT_SYMBOL(batostr); 59EXPORT_SYMBOL(batostr);
60 60
61/* Bluetooth error codes to Unix errno mapping */ 61/* Bluetooth error codes to Unix errno mapping */
62int bt_err(__u16 code) 62int bt_to_errno(__u16 code)
63{ 63{
64 switch (code) { 64 switch (code) {
65 case 0: 65 case 0:
@@ -149,4 +149,23 @@ int bt_err(__u16 code)
149 return ENOSYS; 149 return ENOSYS;
150 } 150 }
151} 151}
152EXPORT_SYMBOL(bt_err); 152EXPORT_SYMBOL(bt_to_errno);
153
154int bt_printk(const char *level, const char *format, ...)
155{
156 struct va_format vaf;
157 va_list args;
158 int r;
159
160 va_start(args, format);
161
162 vaf.fmt = format;
163 vaf.va = &args;
164
165 r = printk("%sBluetooth: %pV\n", level, &vaf);
166
167 va_end(args);
168
169 return r;
170}
171EXPORT_SYMBOL(bt_printk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index dae382ce702..98327213d93 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -41,7 +41,7 @@ struct pending_cmd {
41 void *user_data; 41 void *user_data;
42}; 42};
43 43
44LIST_HEAD(cmd_list); 44static LIST_HEAD(cmd_list);
45 45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
47{ 47{
@@ -179,7 +179,7 @@ static int read_controller_info(struct sock *sk, u16 index)
179 179
180 hci_del_off_timer(hdev); 180 hci_del_off_timer(hdev);
181 181
182 hci_dev_lock(hdev); 182 hci_dev_lock_bh(hdev);
183 183
184 set_bit(HCI_MGMT, &hdev->flags); 184 set_bit(HCI_MGMT, &hdev->flags);
185 185
@@ -208,7 +208,7 @@ static int read_controller_info(struct sock *sk, u16 index)
208 208
209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210 210
211 hci_dev_unlock(hdev); 211 hci_dev_unlock_bh(hdev);
212 hci_dev_put(hdev); 212 hci_dev_put(hdev);
213 213
214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -316,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
316 if (!hdev) 316 if (!hdev)
317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
318 318
319 hci_dev_lock(hdev); 319 hci_dev_lock_bh(hdev);
320 320
321 up = test_bit(HCI_UP, &hdev->flags); 321 up = test_bit(HCI_UP, &hdev->flags);
322 if ((cp->val && up) || (!cp->val && !up)) { 322 if ((cp->val && up) || (!cp->val && !up)) {
@@ -343,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
343 err = 0; 343 err = 0;
344 344
345failed: 345failed:
346 hci_dev_unlock(hdev); 346 hci_dev_unlock_bh(hdev);
347 hci_dev_put(hdev); 347 hci_dev_put(hdev);
348 return err; 348 return err;
349} 349}
@@ -368,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
368 if (!hdev) 368 if (!hdev)
369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
370 370
371 hci_dev_lock(hdev); 371 hci_dev_lock_bh(hdev);
372 372
373 if (!test_bit(HCI_UP, &hdev->flags)) { 373 if (!test_bit(HCI_UP, &hdev->flags)) {
374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -403,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
403 mgmt_pending_remove(cmd); 403 mgmt_pending_remove(cmd);
404 404
405failed: 405failed:
406 hci_dev_unlock(hdev); 406 hci_dev_unlock_bh(hdev);
407 hci_dev_put(hdev); 407 hci_dev_put(hdev);
408 408
409 return err; 409 return err;
@@ -429,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
429 if (!hdev) 429 if (!hdev)
430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
431 431
432 hci_dev_lock(hdev); 432 hci_dev_lock_bh(hdev);
433 433
434 if (!test_bit(HCI_UP, &hdev->flags)) { 434 if (!test_bit(HCI_UP, &hdev->flags)) {
435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -463,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
463 mgmt_pending_remove(cmd); 463 mgmt_pending_remove(cmd);
464 464
465failed: 465failed:
466 hci_dev_unlock(hdev); 466 hci_dev_unlock_bh(hdev);
467 hci_dev_put(hdev); 467 hci_dev_put(hdev);
468 468
469 return err; 469 return err;
@@ -522,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
522 if (!hdev) 522 if (!hdev)
523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
524 524
525 hci_dev_lock(hdev); 525 hci_dev_lock_bh(hdev);
526 526
527 if (cp->val) 527 if (cp->val)
528 set_bit(HCI_PAIRABLE, &hdev->flags); 528 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -538,7 +538,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
539 539
540failed: 540failed:
541 hci_dev_unlock(hdev); 541 hci_dev_unlock_bh(hdev);
542 hci_dev_put(hdev); 542 hci_dev_put(hdev);
543 543
544 return err; 544 return err;
@@ -739,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
739 if (!hdev) 739 if (!hdev)
740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
741 741
742 hci_dev_lock(hdev); 742 hci_dev_lock_bh(hdev);
743 743
744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
745 if (!uuid) { 745 if (!uuid) {
@@ -763,7 +763,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
764 764
765failed: 765failed:
766 hci_dev_unlock(hdev); 766 hci_dev_unlock_bh(hdev);
767 hci_dev_put(hdev); 767 hci_dev_put(hdev);
768 768
769 return err; 769 return err;
@@ -788,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
788 if (!hdev) 788 if (!hdev)
789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
790 790
791 hci_dev_lock(hdev); 791 hci_dev_lock_bh(hdev);
792 792
793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
794 err = hci_uuids_clear(hdev); 794 err = hci_uuids_clear(hdev);
@@ -823,7 +823,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
824 824
825unlock: 825unlock:
826 hci_dev_unlock(hdev); 826 hci_dev_unlock_bh(hdev);
827 hci_dev_put(hdev); 827 hci_dev_put(hdev);
828 828
829 return err; 829 return err;
@@ -847,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
847 if (!hdev) 847 if (!hdev)
848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
849 849
850 hci_dev_lock(hdev); 850 hci_dev_lock_bh(hdev);
851 851
852 hdev->major_class = cp->major; 852 hdev->major_class = cp->major;
853 hdev->minor_class = cp->minor; 853 hdev->minor_class = cp->minor;
@@ -857,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
857 if (err == 0) 857 if (err == 0)
858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
859 859
860 hci_dev_unlock(hdev); 860 hci_dev_unlock_bh(hdev);
861 hci_dev_put(hdev); 861 hci_dev_put(hdev);
862 862
863 return err; 863 return err;
@@ -879,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
879 if (!hdev) 879 if (!hdev)
880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
881 881
882 hci_dev_lock(hdev); 882 hci_dev_lock_bh(hdev);
883 883
884 BT_DBG("hci%u enable %d", index, cp->enable); 884 BT_DBG("hci%u enable %d", index, cp->enable);
885 885
@@ -897,7 +897,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
898 0); 898 0);
899 899
900 hci_dev_unlock(hdev); 900 hci_dev_unlock_bh(hdev);
901 hci_dev_put(hdev); 901 hci_dev_put(hdev);
902 902
903 return err; 903 return err;
@@ -908,7 +908,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
908 struct hci_dev *hdev; 908 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp; 909 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len; 910 u16 key_count, expected_len;
911 int i; 911 int i, err;
912 912
913 cp = (void *) data; 913 cp = (void *) data;
914 914
@@ -918,9 +918,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
918 key_count = get_unaligned_le16(&cp->key_count); 918 key_count = get_unaligned_le16(&cp->key_count);
919 919
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); 920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len != len) { 921 if (expected_len > len) {
922 BT_ERR("load_keys: expected %u bytes, got %u bytes", 922 BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
923 len, expected_len); 923 expected_len, len);
924 return -EINVAL; 924 return -EINVAL;
925 } 925 }
926 926
@@ -931,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
932 key_count); 932 key_count);
933 933
934 hci_dev_lock(hdev); 934 hci_dev_lock_bh(hdev);
935 935
936 hci_link_keys_clear(hdev); 936 hci_link_keys_clear(hdev);
937 937
@@ -942,17 +942,36 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
942 else 942 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944 944
945 for (i = 0; i < key_count; i++) { 945 len -= sizeof(*cp);
946 struct mgmt_key_info *key = &cp->keys[i]; 946 i = 0;
947
948 while (i < len) {
949 struct mgmt_key_info *key = (void *) cp->keys + i;
950
951 i += sizeof(*key) + key->dlen;
952
953 if (key->type == HCI_LK_SMP_LTK) {
954 struct key_master_id *id = (void *) key->data;
955
956 if (key->dlen != sizeof(struct key_master_id))
957 continue;
958
959 hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
960 id->ediv, id->rand, key->val);
961
962 continue;
963 }
947 964
948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 965 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
949 key->pin_len); 966 key->pin_len);
950 } 967 }
951 968
952 hci_dev_unlock(hdev); 969 err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
970
971 hci_dev_unlock_bh(hdev);
953 hci_dev_put(hdev); 972 hci_dev_put(hdev);
954 973
955 return 0; 974 return err;
956} 975}
957 976
958static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 977static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -971,7 +990,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
971 if (!hdev) 990 if (!hdev)
972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 991 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
973 992
974 hci_dev_lock(hdev); 993 hci_dev_lock_bh(hdev);
975 994
976 err = hci_remove_link_key(hdev, &cp->bdaddr); 995 err = hci_remove_link_key(hdev, &cp->bdaddr);
977 if (err < 0) { 996 if (err < 0) {
@@ -990,11 +1009,11 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
990 1009
991 put_unaligned_le16(conn->handle, &dc.handle); 1010 put_unaligned_le16(conn->handle, &dc.handle);
992 dc.reason = 0x13; /* Remote User Terminated Connection */ 1011 dc.reason = 0x13; /* Remote User Terminated Connection */
993 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL); 1012 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
994 } 1013 }
995 1014
996unlock: 1015unlock:
997 hci_dev_unlock(hdev); 1016 hci_dev_unlock_bh(hdev);
998 hci_dev_put(hdev); 1017 hci_dev_put(hdev);
999 1018
1000 return err; 1019 return err;
@@ -1020,7 +1039,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1020 if (!hdev) 1039 if (!hdev)
1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 1040 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
1022 1041
1023 hci_dev_lock(hdev); 1042 hci_dev_lock_bh(hdev);
1024 1043
1025 if (!test_bit(HCI_UP, &hdev->flags)) { 1044 if (!test_bit(HCI_UP, &hdev->flags)) {
1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 1045 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -1055,7 +1074,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1055 mgmt_pending_remove(cmd); 1074 mgmt_pending_remove(cmd);
1056 1075
1057failed: 1076failed:
1058 hci_dev_unlock(hdev); 1077 hci_dev_unlock_bh(hdev);
1059 hci_dev_put(hdev); 1078 hci_dev_put(hdev);
1060 1079
1061 return err; 1080 return err;
@@ -1076,7 +1095,7 @@ static int get_connections(struct sock *sk, u16 index)
1076 if (!hdev) 1095 if (!hdev)
1077 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 1096 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
1078 1097
1079 hci_dev_lock(hdev); 1098 hci_dev_lock_bh(hdev);
1080 1099
1081 count = 0; 1100 count = 0;
1082 list_for_each(p, &hdev->conn_hash.list) { 1101 list_for_each(p, &hdev->conn_hash.list) {
@@ -1092,8 +1111,6 @@ static int get_connections(struct sock *sk, u16 index)
1092 1111
1093 put_unaligned_le16(count, &rp->conn_count); 1112 put_unaligned_le16(count, &rp->conn_count);
1094 1113
1095 read_lock(&hci_dev_list_lock);
1096
1097 i = 0; 1114 i = 0;
1098 list_for_each(p, &hdev->conn_hash.list) { 1115 list_for_each(p, &hdev->conn_hash.list) {
1099 struct hci_conn *c = list_entry(p, struct hci_conn, list); 1116 struct hci_conn *c = list_entry(p, struct hci_conn, list);
@@ -1101,22 +1118,41 @@ static int get_connections(struct sock *sk, u16 index)
1101 bacpy(&rp->conn[i++], &c->dst); 1118 bacpy(&rp->conn[i++], &c->dst);
1102 } 1119 }
1103 1120
1104 read_unlock(&hci_dev_list_lock);
1105
1106 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 1121 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
1107 1122
1108unlock: 1123unlock:
1109 kfree(rp); 1124 kfree(rp);
1110 hci_dev_unlock(hdev); 1125 hci_dev_unlock_bh(hdev);
1111 hci_dev_put(hdev); 1126 hci_dev_put(hdev);
1112 return err; 1127 return err;
1113} 1128}
1114 1129
1130static int send_pin_code_neg_reply(struct sock *sk, u16 index,
1131 struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
1132{
1133 struct pending_cmd *cmd;
1134 int err;
1135
1136 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
1137 sizeof(*cp));
1138 if (!cmd)
1139 return -ENOMEM;
1140
1141 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1142 &cp->bdaddr);
1143 if (err < 0)
1144 mgmt_pending_remove(cmd);
1145
1146 return err;
1147}
1148
1115static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, 1149static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1116 u16 len) 1150 u16 len)
1117{ 1151{
1118 struct hci_dev *hdev; 1152 struct hci_dev *hdev;
1153 struct hci_conn *conn;
1119 struct mgmt_cp_pin_code_reply *cp; 1154 struct mgmt_cp_pin_code_reply *cp;
1155 struct mgmt_cp_pin_code_neg_reply ncp;
1120 struct hci_cp_pin_code_reply reply; 1156 struct hci_cp_pin_code_reply reply;
1121 struct pending_cmd *cmd; 1157 struct pending_cmd *cmd;
1122 int err; 1158 int err;
@@ -1132,13 +1168,32 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1132 if (!hdev) 1168 if (!hdev)
1133 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 1169 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
1134 1170
1135 hci_dev_lock(hdev); 1171 hci_dev_lock_bh(hdev);
1136 1172
1137 if (!test_bit(HCI_UP, &hdev->flags)) { 1173 if (!test_bit(HCI_UP, &hdev->flags)) {
1138 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 1174 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
1139 goto failed; 1175 goto failed;
1140 } 1176 }
1141 1177
1178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1179 if (!conn) {
1180 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
1181 goto failed;
1182 }
1183
1184 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1185 bacpy(&ncp.bdaddr, &cp->bdaddr);
1186
1187 BT_ERR("PIN code is not 16 bytes long");
1188
1189 err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
1190 if (err >= 0)
1191 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1192 EINVAL);
1193
1194 goto failed;
1195 }
1196
1142 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); 1197 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
1143 if (!cmd) { 1198 if (!cmd) {
1144 err = -ENOMEM; 1199 err = -ENOMEM;
@@ -1147,14 +1202,14 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1147 1202
1148 bacpy(&reply.bdaddr, &cp->bdaddr); 1203 bacpy(&reply.bdaddr, &cp->bdaddr);
1149 reply.pin_len = cp->pin_len; 1204 reply.pin_len = cp->pin_len;
1150 memcpy(reply.pin_code, cp->pin_code, 16); 1205 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1151 1206
1152 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); 1207 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1153 if (err < 0) 1208 if (err < 0)
1154 mgmt_pending_remove(cmd); 1209 mgmt_pending_remove(cmd);
1155 1210
1156failed: 1211failed:
1157 hci_dev_unlock(hdev); 1212 hci_dev_unlock_bh(hdev);
1158 hci_dev_put(hdev); 1213 hci_dev_put(hdev);
1159 1214
1160 return err; 1215 return err;
@@ -1165,7 +1220,6 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1165{ 1220{
1166 struct hci_dev *hdev; 1221 struct hci_dev *hdev;
1167 struct mgmt_cp_pin_code_neg_reply *cp; 1222 struct mgmt_cp_pin_code_neg_reply *cp;
1168 struct pending_cmd *cmd;
1169 int err; 1223 int err;
1170 1224
1171 BT_DBG(""); 1225 BT_DBG("");
@@ -1181,7 +1235,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1181 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1235 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1182 ENODEV); 1236 ENODEV);
1183 1237
1184 hci_dev_lock(hdev); 1238 hci_dev_lock_bh(hdev);
1185 1239
1186 if (!test_bit(HCI_UP, &hdev->flags)) { 1240 if (!test_bit(HCI_UP, &hdev->flags)) {
1187 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1241 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1189,20 +1243,10 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1189 goto failed; 1243 goto failed;
1190 } 1244 }
1191 1245
1192 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, 1246 err = send_pin_code_neg_reply(sk, index, hdev, cp);
1193 data, len);
1194 if (!cmd) {
1195 err = -ENOMEM;
1196 goto failed;
1197 }
1198
1199 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1200 &cp->bdaddr);
1201 if (err < 0)
1202 mgmt_pending_remove(cmd);
1203 1247
1204failed: 1248failed:
1205 hci_dev_unlock(hdev); 1249 hci_dev_unlock_bh(hdev);
1206 hci_dev_put(hdev); 1250 hci_dev_put(hdev);
1207 1251
1208 return err; 1252 return err;
@@ -1225,14 +1269,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1225 if (!hdev) 1269 if (!hdev)
1226 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1270 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1227 1271
1228 hci_dev_lock(hdev); 1272 hci_dev_lock_bh(hdev);
1229 1273
1230 hdev->io_capability = cp->io_capability; 1274 hdev->io_capability = cp->io_capability;
1231 1275
1232 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1276 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1233 hdev->io_capability); 1277 hdev->io_capability);
1234 1278
1235 hci_dev_unlock(hdev); 1279 hci_dev_unlock_bh(hdev);
1236 hci_dev_put(hdev); 1280 hci_dev_put(hdev);
1237 1281
1238 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1282 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1318,7 +1362,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1318 if (!hdev) 1362 if (!hdev)
1319 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1363 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1320 1364
1321 hci_dev_lock(hdev); 1365 hci_dev_lock_bh(hdev);
1322 1366
1323 if (cp->io_cap == 0x03) { 1367 if (cp->io_cap == 0x03) {
1324 sec_level = BT_SECURITY_MEDIUM; 1368 sec_level = BT_SECURITY_MEDIUM;
@@ -1328,7 +1372,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1328 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1372 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1329 } 1373 }
1330 1374
1331 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); 1375 conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type);
1332 if (IS_ERR(conn)) { 1376 if (IS_ERR(conn)) {
1333 err = PTR_ERR(conn); 1377 err = PTR_ERR(conn);
1334 goto unlock; 1378 goto unlock;
@@ -1360,7 +1404,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1360 err = 0; 1404 err = 0;
1361 1405
1362unlock: 1406unlock:
1363 hci_dev_unlock(hdev); 1407 hci_dev_unlock_bh(hdev);
1364 hci_dev_put(hdev); 1408 hci_dev_put(hdev);
1365 1409
1366 return err; 1410 return err;
@@ -1392,7 +1436,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1392 if (!hdev) 1436 if (!hdev)
1393 return cmd_status(sk, index, mgmt_op, ENODEV); 1437 return cmd_status(sk, index, mgmt_op, ENODEV);
1394 1438
1395 hci_dev_lock(hdev); 1439 hci_dev_lock_bh(hdev);
1396 1440
1397 if (!test_bit(HCI_UP, &hdev->flags)) { 1441 if (!test_bit(HCI_UP, &hdev->flags)) {
1398 err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1442 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1410,7 +1454,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1410 mgmt_pending_remove(cmd); 1454 mgmt_pending_remove(cmd);
1411 1455
1412failed: 1456failed:
1413 hci_dev_unlock(hdev); 1457 hci_dev_unlock_bh(hdev);
1414 hci_dev_put(hdev); 1458 hci_dev_put(hdev);
1415 1459
1416 return err; 1460 return err;
@@ -1434,7 +1478,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1434 if (!hdev) 1478 if (!hdev)
1435 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); 1479 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1436 1480
1437 hci_dev_lock(hdev); 1481 hci_dev_lock_bh(hdev);
1438 1482
1439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); 1483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1440 if (!cmd) { 1484 if (!cmd) {
@@ -1449,7 +1493,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1449 mgmt_pending_remove(cmd); 1493 mgmt_pending_remove(cmd);
1450 1494
1451failed: 1495failed:
1452 hci_dev_unlock(hdev); 1496 hci_dev_unlock_bh(hdev);
1453 hci_dev_put(hdev); 1497 hci_dev_put(hdev);
1454 1498
1455 return err; 1499 return err;
@@ -1468,7 +1512,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1468 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1512 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1469 ENODEV); 1513 ENODEV);
1470 1514
1471 hci_dev_lock(hdev); 1515 hci_dev_lock_bh(hdev);
1472 1516
1473 if (!test_bit(HCI_UP, &hdev->flags)) { 1517 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1518 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
@@ -1498,7 +1542,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1498 mgmt_pending_remove(cmd); 1542 mgmt_pending_remove(cmd);
1499 1543
1500unlock: 1544unlock:
1501 hci_dev_unlock(hdev); 1545 hci_dev_unlock_bh(hdev);
1502 hci_dev_put(hdev); 1546 hci_dev_put(hdev);
1503 1547
1504 return err; 1548 return err;
@@ -1522,7 +1566,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1522 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, 1566 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1523 ENODEV); 1567 ENODEV);
1524 1568
1525 hci_dev_lock(hdev); 1569 hci_dev_lock_bh(hdev);
1526 1570
1527 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, 1571 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1528 cp->randomizer); 1572 cp->randomizer);
@@ -1532,7 +1576,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1532 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 1576 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1533 0); 1577 0);
1534 1578
1535 hci_dev_unlock(hdev); 1579 hci_dev_unlock_bh(hdev);
1536 hci_dev_put(hdev); 1580 hci_dev_put(hdev);
1537 1581
1538 return err; 1582 return err;
@@ -1556,7 +1600,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1556 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1600 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1557 ENODEV); 1601 ENODEV);
1558 1602
1559 hci_dev_lock(hdev); 1603 hci_dev_lock_bh(hdev);
1560 1604
1561 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); 1605 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1562 if (err < 0) 1606 if (err < 0)
@@ -1566,7 +1610,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1566 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1610 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1567 NULL, 0); 1611 NULL, 0);
1568 1612
1569 hci_dev_unlock(hdev); 1613 hci_dev_unlock_bh(hdev);
1570 hci_dev_put(hdev); 1614 hci_dev_put(hdev);
1571 1615
1572 return err; 1616 return err;
@@ -1641,6 +1685,70 @@ failed:
1641 return err; 1685 return err;
1642} 1686}
1643 1687
1688static int block_device(struct sock *sk, u16 index, unsigned char *data,
1689 u16 len)
1690{
1691 struct hci_dev *hdev;
1692 struct mgmt_cp_block_device *cp;
1693 int err;
1694
1695 BT_DBG("hci%u", index);
1696
1697 cp = (void *) data;
1698
1699 if (len != sizeof(*cp))
1700 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1701 EINVAL);
1702
1703 hdev = hci_dev_get(index);
1704 if (!hdev)
1705 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1706 ENODEV);
1707
1708 err = hci_blacklist_add(hdev, &cp->bdaddr);
1709
1710 if (err < 0)
1711 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
1712 else
1713 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1714 NULL, 0);
1715 hci_dev_put(hdev);
1716
1717 return err;
1718}
1719
1720static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len)
1722{
1723 struct hci_dev *hdev;
1724 struct mgmt_cp_unblock_device *cp;
1725 int err;
1726
1727 BT_DBG("hci%u", index);
1728
1729 cp = (void *) data;
1730
1731 if (len != sizeof(*cp))
1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1733 EINVAL);
1734
1735 hdev = hci_dev_get(index);
1736 if (!hdev)
1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1738 ENODEV);
1739
1740 err = hci_blacklist_del(hdev, &cp->bdaddr);
1741
1742 if (err < 0)
1743 err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
1744 else
1745 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1746 NULL, 0);
1747 hci_dev_put(hdev);
1748
1749 return err;
1750}
1751
1644int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1752int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1645{ 1753{
1646 unsigned char *buf; 1754 unsigned char *buf;
@@ -1755,6 +1863,12 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1755 case MGMT_OP_STOP_DISCOVERY: 1863 case MGMT_OP_STOP_DISCOVERY:
1756 err = stop_discovery(sk, index); 1864 err = stop_discovery(sk, index);
1757 break; 1865 break;
1866 case MGMT_OP_BLOCK_DEVICE:
1867 err = block_device(sk, index, buf + sizeof(*hdr), len);
1868 break;
1869 case MGMT_OP_UNBLOCK_DEVICE:
1870 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
1871 break;
1758 default: 1872 default:
1759 BT_DBG("Unknown op %u", opcode); 1873 BT_DBG("Unknown op %u", opcode);
1760 err = cmd_status(sk, index, opcode, 0x01); 1874 err = cmd_status(sk, index, opcode, 0x01);
@@ -1863,17 +1977,28 @@ int mgmt_connectable(u16 index, u8 connectable)
1863 1977
1864int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) 1978int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1865{ 1979{
1866 struct mgmt_ev_new_key ev; 1980 struct mgmt_ev_new_key *ev;
1981 int err, total;
1867 1982
1868 memset(&ev, 0, sizeof(ev)); 1983 total = sizeof(struct mgmt_ev_new_key) + key->dlen;
1984 ev = kzalloc(total, GFP_ATOMIC);
1985 if (!ev)
1986 return -ENOMEM;
1869 1987
1870 ev.store_hint = persistent; 1988 bacpy(&ev->key.bdaddr, &key->bdaddr);
1871 bacpy(&ev.key.bdaddr, &key->bdaddr); 1989 ev->key.type = key->type;
1872 ev.key.type = key->type; 1990 memcpy(ev->key.val, key->val, 16);
1873 memcpy(ev.key.val, key->val, 16); 1991 ev->key.pin_len = key->pin_len;
1874 ev.key.pin_len = key->pin_len; 1992 ev->key.dlen = key->dlen;
1993 ev->store_hint = persistent;
1875 1994
1876 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1995 memcpy(ev->key.data, key->data, key->dlen);
1996
1997 err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
1998
1999 kfree(ev);
2000
2001 return err;
1877} 2002}
1878 2003
1879int mgmt_connected(u16 index, bdaddr_t *bdaddr) 2004int mgmt_connected(u16 index, bdaddr_t *bdaddr)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5759bb7054f..c2486a53714 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
62#define rfcomm_lock() mutex_lock(&rfcomm_mutex) 62#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) 63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
64 64
65static unsigned long rfcomm_event;
66 65
67static LIST_HEAD(session_list); 66static LIST_HEAD(session_list);
68 67
@@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void)
120{ 119{
121 if (!rfcomm_thread) 120 if (!rfcomm_thread)
122 return; 121 return;
123 set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
124 wake_up_process(rfcomm_thread); 122 wake_up_process(rfcomm_thread);
125} 123}
126 124
@@ -466,7 +464,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
466 464
467 switch (d->state) { 465 switch (d->state) {
468 case BT_CONNECT: 466 case BT_CONNECT:
469 case BT_CONFIG:
470 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 467 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
471 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 468 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
472 rfcomm_schedule(); 469 rfcomm_schedule();
@@ -2038,19 +2035,18 @@ static int rfcomm_run(void *unused)
2038 2035
2039 rfcomm_add_listener(BDADDR_ANY); 2036 rfcomm_add_listener(BDADDR_ANY);
2040 2037
2041 while (!kthread_should_stop()) { 2038 while (1) {
2042 set_current_state(TASK_INTERRUPTIBLE); 2039 set_current_state(TASK_INTERRUPTIBLE);
2043 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { 2040
2044 /* No pending events. Let's sleep. 2041 if (kthread_should_stop())
2045 * Incoming connections and data will wake us up. */ 2042 break;
2046 schedule();
2047 }
2048 set_current_state(TASK_RUNNING);
2049 2043
2050 /* Process stuff */ 2044 /* Process stuff */
2051 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
2052 rfcomm_process_sessions(); 2045 rfcomm_process_sessions();
2046
2047 schedule();
2053 } 2048 }
2049 __set_current_state(TASK_RUNNING);
2054 2050
2055 rfcomm_kill_listener(); 2051 rfcomm_kill_listener();
2056 2052
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1b10727ce52..b02f0d47ab8 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
485 485
486 lock_sock(sk); 486 lock_sock(sk);
487 487
488 if (sk->sk_state != BT_LISTEN) {
489 err = -EBADFD;
490 goto done;
491 }
492
493 if (sk->sk_type != SOCK_STREAM) { 488 if (sk->sk_type != SOCK_STREAM) {
494 err = -EINVAL; 489 err = -EINVAL;
495 goto done; 490 goto done;
@@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
501 496
502 /* Wait for an incoming connection. (wake-one). */ 497 /* Wait for an incoming connection. (wake-one). */
503 add_wait_queue_exclusive(sk_sleep(sk), &wait); 498 add_wait_queue_exclusive(sk_sleep(sk), &wait);
504 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 499 while (1) {
505 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
506 if (!timeo) { 501
507 err = -EAGAIN; 502 if (sk->sk_state != BT_LISTEN) {
503 err = -EBADFD;
508 break; 504 break;
509 } 505 }
510 506
511 release_sock(sk); 507 nsk = bt_accept_dequeue(sk, newsock);
512 timeo = schedule_timeout(timeo); 508 if (nsk)
513 lock_sock(sk); 509 break;
514 510
515 if (sk->sk_state != BT_LISTEN) { 511 if (!timeo) {
516 err = -EBADFD; 512 err = -EAGAIN;
517 break; 513 break;
518 } 514 }
519 515
@@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
521 err = sock_intr_errno(timeo); 517 err = sock_intr_errno(timeo);
522 break; 518 break;
523 } 519 }
520
521 release_sock(sk);
522 timeo = schedule_timeout(timeo);
523 lock_sock(sk);
524 } 524 }
525 set_current_state(TASK_RUNNING); 525 __set_current_state(TASK_RUNNING);
526 remove_wait_queue(sk_sleep(sk), &wait); 526 remove_wait_queue(sk_sleep(sk), &wait);
527 527
528 if (err) 528 if (err)
@@ -679,7 +679,8 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
679{ 679{
680 struct sock *sk = sock->sk; 680 struct sock *sk = sock->sk;
681 struct bt_security sec; 681 struct bt_security sec;
682 int len, err = 0; 682 int err = 0;
683 size_t len;
683 u32 opt; 684 u32 opt;
684 685
685 BT_DBG("sk %p", sk); 686 BT_DBG("sk %p", sk);
@@ -741,7 +742,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
741static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 742static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
742{ 743{
743 struct sock *sk = sock->sk; 744 struct sock *sk = sock->sk;
744 struct sock *l2cap_sk;
745 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
747 int len, err = 0; 747 int len, err = 0;
@@ -786,7 +786,6 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
786 break; 786 break;
787 } 787 }
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790 789
791 memset(&cinfo, 0, sizeof(cinfo)); 790 memset(&cinfo, 0, sizeof(cinfo));
792 cinfo.hci_handle = conn->hcon->handle; 791 cinfo.hci_handle = conn->hcon->handle;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cb4fb7837e5..d3d48b5b542 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -177,6 +177,7 @@ static int sco_connect(struct sock *sk)
177{ 177{
178 bdaddr_t *src = &bt_sk(sk)->src; 178 bdaddr_t *src = &bt_sk(sk)->src;
179 bdaddr_t *dst = &bt_sk(sk)->dst; 179 bdaddr_t *dst = &bt_sk(sk)->dst;
180 __u16 pkt_type = sco_pi(sk)->pkt_type;
180 struct sco_conn *conn; 181 struct sco_conn *conn;
181 struct hci_conn *hcon; 182 struct hci_conn *hcon;
182 struct hci_dev *hdev; 183 struct hci_dev *hdev;
@@ -192,10 +193,12 @@ static int sco_connect(struct sock *sk)
192 193
193 if (lmp_esco_capable(hdev) && !disable_esco) 194 if (lmp_esco_capable(hdev) && !disable_esco)
194 type = ESCO_LINK; 195 type = ESCO_LINK;
195 else 196 else {
196 type = SCO_LINK; 197 type = SCO_LINK;
198 pkt_type &= SCO_ESCO_MASK;
199 }
197 200
198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 201 hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
199 if (IS_ERR(hcon)) { 202 if (IS_ERR(hcon)) {
200 err = PTR_ERR(hcon); 203 err = PTR_ERR(hcon);
201 goto done; 204 goto done;
@@ -460,18 +463,22 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
460 return 0; 463 return 0;
461} 464}
462 465
463static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 466static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
464{ 467{
465 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; 468 struct sockaddr_sco sa;
466 struct sock *sk = sock->sk; 469 struct sock *sk = sock->sk;
467 bdaddr_t *src = &sa->sco_bdaddr; 470 bdaddr_t *src = &sa.sco_bdaddr;
468 int err = 0; 471 int len, err = 0;
469 472
470 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); 473 BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
471 474
472 if (!addr || addr->sa_family != AF_BLUETOOTH) 475 if (!addr || addr->sa_family != AF_BLUETOOTH)
473 return -EINVAL; 476 return -EINVAL;
474 477
478 memset(&sa, 0, sizeof(sa));
479 len = min_t(unsigned int, sizeof(sa), alen);
480 memcpy(&sa, addr, len);
481
475 lock_sock(sk); 482 lock_sock(sk);
476 483
477 if (sk->sk_state != BT_OPEN) { 484 if (sk->sk_state != BT_OPEN) {
@@ -485,7 +492,8 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
485 err = -EADDRINUSE; 492 err = -EADDRINUSE;
486 } else { 493 } else {
487 /* Save source address */ 494 /* Save source address */
488 bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); 495 bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
496 sco_pi(sk)->pkt_type = sa.sco_pkt_type;
489 sk->sk_state = BT_BOUND; 497 sk->sk_state = BT_BOUND;
490 } 498 }
491 499
@@ -498,27 +506,34 @@ done:
498 506
499static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 507static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
500{ 508{
501 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
502 struct sock *sk = sock->sk; 509 struct sock *sk = sock->sk;
503 int err = 0; 510 struct sockaddr_sco sa;
504 511 int len, err = 0;
505 512
506 BT_DBG("sk %p", sk); 513 BT_DBG("sk %p", sk);
507 514
508 if (alen < sizeof(struct sockaddr_sco) || 515 if (!addr || addr->sa_family != AF_BLUETOOTH)
509 addr->sa_family != AF_BLUETOOTH)
510 return -EINVAL; 516 return -EINVAL;
511 517
512 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) 518 memset(&sa, 0, sizeof(sa));
513 return -EBADFD; 519 len = min_t(unsigned int, sizeof(sa), alen);
514 520 memcpy(&sa, addr, len);
515 if (sk->sk_type != SOCK_SEQPACKET)
516 return -EINVAL;
517 521
518 lock_sock(sk); 522 lock_sock(sk);
519 523
524 if (sk->sk_type != SOCK_SEQPACKET) {
525 err = -EINVAL;
526 goto done;
527 }
528
529 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
530 err = -EBADFD;
531 goto done;
532 }
533
520 /* Set destination address and psm */ 534 /* Set destination address and psm */
521 bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); 535 bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
536 sco_pi(sk)->pkt_type = sa.sco_pkt_type;
522 537
523 err = sco_connect(sk); 538 err = sco_connect(sk);
524 if (err) 539 if (err)
@@ -564,30 +579,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
564 579
565 lock_sock(sk); 580 lock_sock(sk);
566 581
567 if (sk->sk_state != BT_LISTEN) {
568 err = -EBADFD;
569 goto done;
570 }
571
572 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 582 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
573 583
574 BT_DBG("sk %p timeo %ld", sk, timeo); 584 BT_DBG("sk %p timeo %ld", sk, timeo);
575 585
576 /* Wait for an incoming connection. (wake-one). */ 586 /* Wait for an incoming connection. (wake-one). */
577 add_wait_queue_exclusive(sk_sleep(sk), &wait); 587 add_wait_queue_exclusive(sk_sleep(sk), &wait);
578 while (!(ch = bt_accept_dequeue(sk, newsock))) { 588 while (1) {
579 set_current_state(TASK_INTERRUPTIBLE); 589 set_current_state(TASK_INTERRUPTIBLE);
580 if (!timeo) { 590
581 err = -EAGAIN; 591 if (sk->sk_state != BT_LISTEN) {
592 err = -EBADFD;
582 break; 593 break;
583 } 594 }
584 595
585 release_sock(sk); 596 ch = bt_accept_dequeue(sk, newsock);
586 timeo = schedule_timeout(timeo); 597 if (ch)
587 lock_sock(sk); 598 break;
588 599
589 if (sk->sk_state != BT_LISTEN) { 600 if (!timeo) {
590 err = -EBADFD; 601 err = -EAGAIN;
591 break; 602 break;
592 } 603 }
593 604
@@ -595,8 +606,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
595 err = sock_intr_errno(timeo); 606 err = sock_intr_errno(timeo);
596 break; 607 break;
597 } 608 }
609
610 release_sock(sk);
611 timeo = schedule_timeout(timeo);
612 lock_sock(sk);
598 } 613 }
599 set_current_state(TASK_RUNNING); 614 __set_current_state(TASK_RUNNING);
600 remove_wait_queue(sk_sleep(sk), &wait); 615 remove_wait_queue(sk_sleep(sk), &wait);
601 616
602 if (err) 617 if (err)
@@ -625,6 +640,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
625 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); 640 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
626 else 641 else
627 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); 642 bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
643 sa->sco_pkt_type = sco_pi(sk)->pkt_type;
628 644
629 return 0; 645 return 0;
630} 646}
@@ -932,7 +948,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
932 if (conn) 948 if (conn)
933 sco_conn_ready(conn); 949 sco_conn_ready(conn);
934 } else 950 } else
935 sco_conn_del(hcon, bt_err(status)); 951 sco_conn_del(hcon, bt_to_errno(status));
936 952
937 return 0; 953 return 0;
938} 954}
@@ -944,7 +960,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
944 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 960 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
945 return -EINVAL; 961 return -EINVAL;
946 962
947 sco_conn_del(hcon, bt_err(reason)); 963 sco_conn_del(hcon, bt_to_errno(reason));
948 964
949 return 0; 965 return 0;
950} 966}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
new file mode 100644
index 00000000000..391888b88a9
--- /dev/null
+++ b/net/bluetooth/smp.c
@@ -0,0 +1,702 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/smp.h>
27#include <linux/crypto.h>
28#include <linux/scatterlist.h>
29#include <crypto/b128ops.h>
30
31#define SMP_TIMEOUT 30000 /* 30 seconds */
32
33static inline void swap128(u8 src[16], u8 dst[16])
34{
35 int i;
36 for (i = 0; i < 16; i++)
37 dst[15 - i] = src[i];
38}
39
40static inline void swap56(u8 src[7], u8 dst[7])
41{
42 int i;
43 for (i = 0; i < 7; i++)
44 dst[6 - i] = src[i];
45}
46
47static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
48{
49 struct blkcipher_desc desc;
50 struct scatterlist sg;
51 int err, iv_len;
52 unsigned char iv[128];
53
54 if (tfm == NULL) {
55 BT_ERR("tfm %p", tfm);
56 return -EINVAL;
57 }
58
59 desc.tfm = tfm;
60 desc.flags = 0;
61
62 err = crypto_blkcipher_setkey(tfm, k, 16);
63 if (err) {
64 BT_ERR("cipher setkey failed: %d", err);
65 return err;
66 }
67
68 sg_init_one(&sg, r, 16);
69
70 iv_len = crypto_blkcipher_ivsize(tfm);
71 if (iv_len) {
72 memset(&iv, 0xff, iv_len);
73 crypto_blkcipher_set_iv(tfm, iv, iv_len);
74 }
75
76 err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
77 if (err)
78 BT_ERR("Encrypt data error %d", err);
79
80 return err;
81}
82
83static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
84 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
85 u8 _rat, bdaddr_t *ra, u8 res[16])
86{
87 u8 p1[16], p2[16];
88 int err;
89
90 memset(p1, 0, 16);
91
92 /* p1 = pres || preq || _rat || _iat */
93 swap56(pres, p1);
94 swap56(preq, p1 + 7);
95 p1[14] = _rat;
96 p1[15] = _iat;
97
98 memset(p2, 0, 16);
99
100 /* p2 = padding || ia || ra */
101 baswap((bdaddr_t *) (p2 + 4), ia);
102 baswap((bdaddr_t *) (p2 + 10), ra);
103
104 /* res = r XOR p1 */
105 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
106
107 /* res = e(k, res) */
108 err = smp_e(tfm, k, res);
109 if (err) {
110 BT_ERR("Encrypt data error");
111 return err;
112 }
113
114 /* res = res XOR p2 */
115 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
116
117 /* res = e(k, res) */
118 err = smp_e(tfm, k, res);
119 if (err)
120 BT_ERR("Encrypt data error");
121
122 return err;
123}
124
125static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
126 u8 r1[16], u8 r2[16], u8 _r[16])
127{
128 int err;
129
130 /* Just least significant octets from r1 and r2 are considered */
131 memcpy(_r, r1 + 8, 8);
132 memcpy(_r + 8, r2 + 8, 8);
133
134 err = smp_e(tfm, k, _r);
135 if (err)
136 BT_ERR("Encrypt data error");
137
138 return err;
139}
140
141static int smp_rand(u8 *buf)
142{
143 get_random_bytes(buf, 16);
144
145 return 0;
146}
147
148static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
149 u16 dlen, void *data)
150{
151 struct sk_buff *skb;
152 struct l2cap_hdr *lh;
153 int len;
154
155 len = L2CAP_HDR_SIZE + sizeof(code) + dlen;
156
157 if (len > conn->mtu)
158 return NULL;
159
160 skb = bt_skb_alloc(len, GFP_ATOMIC);
161 if (!skb)
162 return NULL;
163
164 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
165 lh->len = cpu_to_le16(sizeof(code) + dlen);
166 lh->cid = cpu_to_le16(L2CAP_CID_SMP);
167
168 memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
169
170 memcpy(skb_put(skb, dlen), data, dlen);
171
172 return skb;
173}
174
175static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
176{
177 struct sk_buff *skb = smp_build_cmd(conn, code, len, data);
178
179 BT_DBG("code 0x%2.2x", code);
180
181 if (!skb)
182 return;
183
184 hci_send_acl(conn->hcon, skb, 0);
185}
186
187static __u8 seclevel_to_authreq(__u8 level)
188{
189 switch (level) {
190 case BT_SECURITY_HIGH:
191 /* Right now we don't support bonding */
192 return SMP_AUTH_MITM;
193
194 default:
195 return SMP_AUTH_NONE;
196 }
197}
198
199static void build_pairing_cmd(struct l2cap_conn *conn,
200 struct smp_cmd_pairing *req,
201 struct smp_cmd_pairing *rsp,
202 __u8 authreq)
203{
204 u8 dist_keys;
205
206 dist_keys = 0;
207 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
208 dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
209 authreq |= SMP_AUTH_BONDING;
210 }
211
212 if (rsp == NULL) {
213 req->io_capability = conn->hcon->io_capability;
214 req->oob_flag = SMP_OOB_NOT_PRESENT;
215 req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
216 req->init_key_dist = dist_keys;
217 req->resp_key_dist = dist_keys;
218 req->auth_req = authreq;
219 return;
220 }
221
222 rsp->io_capability = conn->hcon->io_capability;
223 rsp->oob_flag = SMP_OOB_NOT_PRESENT;
224 rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
225 rsp->init_key_dist = req->init_key_dist & dist_keys;
226 rsp->resp_key_dist = req->resp_key_dist & dist_keys;
227 rsp->auth_req = authreq;
228}
229
230static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
231{
232 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
233 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
234 return SMP_ENC_KEY_SIZE;
235
236 conn->smp_key_size = max_key_size;
237
238 return 0;
239}
240
241static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
242{
243 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
244 u8 key_size;
245
246 BT_DBG("conn %p", conn);
247
248 conn->preq[0] = SMP_CMD_PAIRING_REQ;
249 memcpy(&conn->preq[1], req, sizeof(*req));
250 skb_pull(skb, sizeof(*req));
251
252 if (req->oob_flag)
253 return SMP_OOB_NOT_AVAIL;
254
255 /* We didn't start the pairing, so no requirements */
256 build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE);
257
258 key_size = min(req->max_key_size, rsp.max_key_size);
259 if (check_enc_key_size(conn, key_size))
260 return SMP_ENC_KEY_SIZE;
261
262 /* Just works */
263 memset(conn->tk, 0, sizeof(conn->tk));
264
265 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
266 memcpy(&conn->prsp[1], &rsp, sizeof(rsp));
267
268 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
269
270 mod_timer(&conn->security_timer, jiffies +
271 msecs_to_jiffies(SMP_TIMEOUT));
272
273 return 0;
274}
275
276static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
277{
278 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
279 struct smp_cmd_pairing_confirm cp;
280 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
281 int ret;
282 u8 res[16], key_size;
283
284 BT_DBG("conn %p", conn);
285
286 skb_pull(skb, sizeof(*rsp));
287
288 req = (void *) &conn->preq[1];
289
290 key_size = min(req->max_key_size, rsp->max_key_size);
291 if (check_enc_key_size(conn, key_size))
292 return SMP_ENC_KEY_SIZE;
293
294 if (rsp->oob_flag)
295 return SMP_OOB_NOT_AVAIL;
296
297 /* Just works */
298 memset(conn->tk, 0, sizeof(conn->tk));
299
300 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
301 memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
302
303 ret = smp_rand(conn->prnd);
304 if (ret)
305 return SMP_UNSPECIFIED;
306
307 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0,
308 conn->src, conn->hcon->dst_type, conn->dst, res);
309 if (ret)
310 return SMP_UNSPECIFIED;
311
312 swap128(res, cp.confirm_val);
313
314 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
315
316 return 0;
317}
318
319static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
320{
321 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
322
323 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
324
325 memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf));
326 skb_pull(skb, sizeof(conn->pcnf));
327
328 if (conn->hcon->out) {
329 u8 random[16];
330
331 swap128(conn->prnd, random);
332 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
333 random);
334 } else {
335 struct smp_cmd_pairing_confirm cp;
336 int ret;
337 u8 res[16];
338
339 ret = smp_rand(conn->prnd);
340 if (ret)
341 return SMP_UNSPECIFIED;
342
343 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
344 conn->hcon->dst_type, conn->dst,
345 0, conn->src, res);
346 if (ret)
347 return SMP_CONFIRM_FAILED;
348
349 swap128(res, cp.confirm_val);
350
351 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
352 }
353
354 mod_timer(&conn->security_timer, jiffies +
355 msecs_to_jiffies(SMP_TIMEOUT));
356
357 return 0;
358}
359
360static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
361{
362 struct hci_conn *hcon = conn->hcon;
363 struct crypto_blkcipher *tfm = hcon->hdev->tfm;
364 int ret;
365 u8 key[16], res[16], random[16], confirm[16];
366
367 swap128(skb->data, random);
368 skb_pull(skb, sizeof(random));
369
370 if (conn->hcon->out)
371 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
372 conn->src, conn->hcon->dst_type, conn->dst,
373 res);
374 else
375 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
376 conn->hcon->dst_type, conn->dst, 0, conn->src,
377 res);
378 if (ret)
379 return SMP_UNSPECIFIED;
380
381 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
382
383 swap128(res, confirm);
384
385 if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
386 BT_ERR("Pairing failed (confirmation values mismatch)");
387 return SMP_CONFIRM_FAILED;
388 }
389
390 if (conn->hcon->out) {
391 u8 stk[16], rand[8];
392 __le16 ediv;
393
394 memset(rand, 0, sizeof(rand));
395 ediv = 0;
396
397 smp_s1(tfm, conn->tk, random, conn->prnd, key);
398 swap128(key, stk);
399
400 memset(stk + conn->smp_key_size, 0,
401 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
402
403 hci_le_start_enc(hcon, ediv, rand, stk);
404 hcon->enc_key_size = conn->smp_key_size;
405 } else {
406 u8 stk[16], r[16], rand[8];
407 __le16 ediv;
408
409 memset(rand, 0, sizeof(rand));
410 ediv = 0;
411
412 swap128(conn->prnd, r);
413 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
414
415 smp_s1(tfm, conn->tk, conn->prnd, random, key);
416 swap128(key, stk);
417
418 memset(stk + conn->smp_key_size, 0,
419 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
420
421 hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size,
422 ediv, rand, stk);
423 }
424
425 return 0;
426}
427
428static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
429{
430 struct smp_cmd_security_req *rp = (void *) skb->data;
431 struct smp_cmd_pairing cp;
432 struct hci_conn *hcon = conn->hcon;
433
434 BT_DBG("conn %p", conn);
435
436 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
437 return 0;
438
439 skb_pull(skb, sizeof(*rp));
440
441 memset(&cp, 0, sizeof(cp));
442 build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
443
444 conn->preq[0] = SMP_CMD_PAIRING_REQ;
445 memcpy(&conn->preq[1], &cp, sizeof(cp));
446
447 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
448
449 mod_timer(&conn->security_timer, jiffies +
450 msecs_to_jiffies(SMP_TIMEOUT));
451
452 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
453
454 return 0;
455}
456
457int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
458{
459 struct hci_conn *hcon = conn->hcon;
460 __u8 authreq;
461
462 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
463
464 if (!lmp_host_le_capable(hcon->hdev))
465 return 1;
466
467 if (IS_ERR(hcon->hdev->tfm))
468 return 1;
469
470 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
471 return 0;
472
473 if (sec_level == BT_SECURITY_LOW)
474 return 1;
475
476 if (hcon->sec_level >= sec_level)
477 return 1;
478
479 authreq = seclevel_to_authreq(sec_level);
480
481 if (hcon->link_mode & HCI_LM_MASTER) {
482 struct smp_cmd_pairing cp;
483 struct link_key *key;
484
485 key = hci_find_link_key_type(hcon->hdev, conn->dst,
486 HCI_LK_SMP_LTK);
487 if (key) {
488 struct key_master_id *master = (void *) key->data;
489
490 hci_le_start_enc(hcon, master->ediv, master->rand,
491 key->val);
492 hcon->enc_key_size = key->pin_len;
493
494 goto done;
495 }
496
497 build_pairing_cmd(conn, &cp, NULL, authreq);
498 conn->preq[0] = SMP_CMD_PAIRING_REQ;
499 memcpy(&conn->preq[1], &cp, sizeof(cp));
500
501 mod_timer(&conn->security_timer, jiffies +
502 msecs_to_jiffies(SMP_TIMEOUT));
503
504 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
505 } else {
506 struct smp_cmd_security_req cp;
507 cp.auth_req = authreq;
508 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
509 }
510
511done:
512 hcon->pending_sec_level = sec_level;
513 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
514
515 return 0;
516}
517
518static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
519{
520 struct smp_cmd_encrypt_info *rp = (void *) skb->data;
521
522 skb_pull(skb, sizeof(*rp));
523
524 memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
525
526 return 0;
527}
528
529static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
530{
531 struct smp_cmd_master_ident *rp = (void *) skb->data;
532
533 skb_pull(skb, sizeof(*rp));
534
535 hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
536 rp->ediv, rp->rand, conn->tk);
537
538 smp_distribute_keys(conn, 1);
539
540 return 0;
541}
542
543int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
544{
545 __u8 code = skb->data[0];
546 __u8 reason;
547 int err = 0;
548
549 if (!lmp_host_le_capable(conn->hcon->hdev)) {
550 err = -ENOTSUPP;
551 reason = SMP_PAIRING_NOTSUPP;
552 goto done;
553 }
554
555 if (IS_ERR(conn->hcon->hdev->tfm)) {
556 err = PTR_ERR(conn->hcon->hdev->tfm);
557 reason = SMP_PAIRING_NOTSUPP;
558 goto done;
559 }
560
561 skb_pull(skb, sizeof(code));
562
563 switch (code) {
564 case SMP_CMD_PAIRING_REQ:
565 reason = smp_cmd_pairing_req(conn, skb);
566 break;
567
568 case SMP_CMD_PAIRING_FAIL:
569 reason = 0;
570 err = -EPERM;
571 break;
572
573 case SMP_CMD_PAIRING_RSP:
574 reason = smp_cmd_pairing_rsp(conn, skb);
575 break;
576
577 case SMP_CMD_SECURITY_REQ:
578 reason = smp_cmd_security_req(conn, skb);
579 break;
580
581 case SMP_CMD_PAIRING_CONFIRM:
582 reason = smp_cmd_pairing_confirm(conn, skb);
583 break;
584
585 case SMP_CMD_PAIRING_RANDOM:
586 reason = smp_cmd_pairing_random(conn, skb);
587 break;
588
589 case SMP_CMD_ENCRYPT_INFO:
590 reason = smp_cmd_encrypt_info(conn, skb);
591 break;
592
593 case SMP_CMD_MASTER_IDENT:
594 reason = smp_cmd_master_ident(conn, skb);
595 break;
596
597 case SMP_CMD_IDENT_INFO:
598 case SMP_CMD_IDENT_ADDR_INFO:
599 case SMP_CMD_SIGN_INFO:
600 /* Just ignored */
601 reason = 0;
602 break;
603
604 default:
605 BT_DBG("Unknown command code 0x%2.2x", code);
606
607 reason = SMP_CMD_NOTSUPP;
608 err = -EOPNOTSUPP;
609 goto done;
610 }
611
612done:
613 if (reason)
614 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
615 &reason);
616
617 kfree_skb(skb);
618 return err;
619}
620
621int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
622{
623 struct smp_cmd_pairing *req, *rsp;
624 __u8 *keydist;
625
626 BT_DBG("conn %p force %d", conn, force);
627
628 if (IS_ERR(conn->hcon->hdev->tfm))
629 return PTR_ERR(conn->hcon->hdev->tfm);
630
631 rsp = (void *) &conn->prsp[1];
632
633 /* The responder sends its keys first */
634 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
635 return 0;
636
637 req = (void *) &conn->preq[1];
638
639 if (conn->hcon->out) {
640 keydist = &rsp->init_key_dist;
641 *keydist &= req->init_key_dist;
642 } else {
643 keydist = &rsp->resp_key_dist;
644 *keydist &= req->resp_key_dist;
645 }
646
647
648 BT_DBG("keydist 0x%x", *keydist);
649
650 if (*keydist & SMP_DIST_ENC_KEY) {
651 struct smp_cmd_encrypt_info enc;
652 struct smp_cmd_master_ident ident;
653 __le16 ediv;
654
655 get_random_bytes(enc.ltk, sizeof(enc.ltk));
656 get_random_bytes(&ediv, sizeof(ediv));
657 get_random_bytes(ident.rand, sizeof(ident.rand));
658
659 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
660
661 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size,
662 ediv, ident.rand, enc.ltk);
663
664 ident.ediv = cpu_to_le16(ediv);
665
666 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
667
668 *keydist &= ~SMP_DIST_ENC_KEY;
669 }
670
671 if (*keydist & SMP_DIST_ID_KEY) {
672 struct smp_cmd_ident_addr_info addrinfo;
673 struct smp_cmd_ident_info idinfo;
674
675 /* Send a dummy key */
676 get_random_bytes(idinfo.irk, sizeof(idinfo.irk));
677
678 smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
679
680 /* Just public address */
681 memset(&addrinfo, 0, sizeof(addrinfo));
682 bacpy(&addrinfo.bdaddr, conn->src);
683
684 smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
685 &addrinfo);
686
687 *keydist &= ~SMP_DIST_ID_KEY;
688 }
689
690 if (*keydist & SMP_DIST_SIGN) {
691 struct smp_cmd_sign_info sign;
692
693 /* Send a dummy key */
694 get_random_bytes(sign.csrk, sizeof(sign.csrk));
695
696 smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
697
698 *keydist &= ~SMP_DIST_SIGN;
699 }
700
701 return 0;
702}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ff3ed6086ce..dac6a214746 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -38,16 +38,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
38 } 38 }
39#endif 39#endif
40 40
41 u64_stats_update_begin(&brstats->syncp);
42 brstats->tx_packets++;
43 brstats->tx_bytes += skb->len;
44 u64_stats_update_end(&brstats->syncp);
45
46 BR_INPUT_SKB_CB(skb)->brdev = dev; 41 BR_INPUT_SKB_CB(skb)->brdev = dev;
47 42
48 skb_reset_mac_header(skb); 43 skb_reset_mac_header(skb);
49 skb_pull(skb, ETH_HLEN); 44 skb_pull(skb, ETH_HLEN);
50 45
46 u64_stats_update_begin(&brstats->syncp);
47 brstats->tx_packets++;
48 /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
49 brstats->tx_bytes += skb->len;
50 u64_stats_update_end(&brstats->syncp);
51
51 rcu_read_lock(); 52 rcu_read_lock();
52 if (is_broadcast_ether_addr(dest)) 53 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb); 54 br_flood_deliver(br, skb);
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f2dc69cffb5..681084d76a9 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -14,6 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \
14 inet_fragment.o ping.o 14 inet_fragment.o ping.o
15 15
16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o 16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
17obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
17obj-$(CONFIG_PROC_FS) += proc.o 18obj-$(CONFIG_PROC_FS) += proc.o
18obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o 19obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
19obj-$(CONFIG_IP_MROUTE) += ipmr.o 20obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ef1528af7ab..4d60f12c7b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -118,6 +118,19 @@
118#include <linux/mroute.h> 118#include <linux/mroute.h>
119#endif 119#endif
120 120
121#ifdef CONFIG_ANDROID_PARANOID_NETWORK
122#include <linux/android_aid.h>
123
124static inline int current_has_network(void)
125{
126 return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
127}
128#else
129static inline int current_has_network(void)
130{
131 return 1;
132}
133#endif
121 134
122/* The inetsw table contains everything that inet_create needs to 135/* The inetsw table contains everything that inet_create needs to
123 * build a new socket. 136 * build a new socket.
@@ -258,6 +271,7 @@ static inline int inet_netns_ok(struct net *net, int protocol)
258 return ipprot->netns_ok; 271 return ipprot->netns_ok;
259} 272}
260 273
274
261/* 275/*
262 * Create an inet socket. 276 * Create an inet socket.
263 */ 277 */
@@ -274,6 +288,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
274 int try_loading_module = 0; 288 int try_loading_module = 0;
275 int err; 289 int err;
276 290
291 if (!current_has_network())
292 return -EACCES;
293
277 if (unlikely(!inet_ehash_secret)) 294 if (unlikely(!inet_ehash_secret))
278 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 295 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
279 build_ehash_secret(); 296 build_ehash_secret();
@@ -874,6 +891,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
874 case SIOCSIFPFLAGS: 891 case SIOCSIFPFLAGS:
875 case SIOCGIFPFLAGS: 892 case SIOCGIFPFLAGS:
876 case SIOCSIFFLAGS: 893 case SIOCSIFFLAGS:
894 case SIOCKILLADDR:
877 err = devinet_ioctl(net, cmd, (void __user *)arg); 895 err = devinet_ioctl(net, cmd, (void __user *)arg);
878 break; 896 break;
879 default: 897 default:
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7d7fb20b0a1..c48323ad268 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -59,6 +59,7 @@
59 59
60#include <net/arp.h> 60#include <net/arp.h>
61#include <net/ip.h> 61#include <net/ip.h>
62#include <net/tcp.h>
62#include <net/route.h> 63#include <net/route.h>
63#include <net/ip_fib.h> 64#include <net/ip_fib.h>
64#include <net/rtnetlink.h> 65#include <net/rtnetlink.h>
@@ -735,6 +736,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
735 case SIOCSIFBRDADDR: /* Set the broadcast address */ 736 case SIOCSIFBRDADDR: /* Set the broadcast address */
736 case SIOCSIFDSTADDR: /* Set the destination address */ 737 case SIOCSIFDSTADDR: /* Set the destination address */
737 case SIOCSIFNETMASK: /* Set the netmask for the interface */ 738 case SIOCSIFNETMASK: /* Set the netmask for the interface */
739 case SIOCKILLADDR: /* Nuke all sockets on this address */
738 ret = -EACCES; 740 ret = -EACCES;
739 if (!capable(CAP_NET_ADMIN)) 741 if (!capable(CAP_NET_ADMIN))
740 goto out; 742 goto out;
@@ -786,7 +788,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
786 } 788 }
787 789
788 ret = -EADDRNOTAVAIL; 790 ret = -EADDRNOTAVAIL;
789 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) 791 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
792 && cmd != SIOCKILLADDR)
790 goto done; 793 goto done;
791 794
792 switch (cmd) { 795 switch (cmd) {
@@ -912,6 +915,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
912 inet_insert_ifa(ifa); 915 inet_insert_ifa(ifa);
913 } 916 }
914 break; 917 break;
918 case SIOCKILLADDR: /* Nuke all connections on this address */
919 ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
920 break;
915 } 921 }
916done: 922done:
917 rtnl_unlock(); 923 rtnl_unlock();
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a03fd..73b4e91a87e 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -113,6 +113,18 @@ config IP_NF_TARGET_REJECT
113 113
114 To compile it as a module, choose M here. If unsure, say N. 114 To compile it as a module, choose M here. If unsure, say N.
115 115
116config IP_NF_TARGET_REJECT_SKERR
117 bool "Force socket error when rejecting with icmp*"
118 depends on IP_NF_TARGET_REJECT
119 default n
120 help
121 This option enables turning a "--reject-with icmp*" into a matching
122 socket error also.
123 The REJECT target normally allows sending an ICMP message. But it
124 leaves the local socket unaware of any ingress rejects.
125
126 If unsure, say N.
127
116config IP_NF_TARGET_LOG 128config IP_NF_TARGET_LOG
117 tristate "LOG target support" 129 tristate "LOG target support"
118 default m if NETFILTER_ADVANCED=n 130 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 51f13f8ec72..9dd754c7f2b 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -128,6 +128,14 @@ static void send_reset(struct sk_buff *oldskb, int hook)
128static inline void send_unreach(struct sk_buff *skb_in, int code) 128static inline void send_unreach(struct sk_buff *skb_in, int code)
129{ 129{
130 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 130 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
131#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
132 if (skb_in->sk) {
133 skb_in->sk->sk_err = icmp_err_convert[code].errno;
134 skb_in->sk->sk_error_report(skb_in->sk);
135 pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
136 skb_in->sk->sk_err, skb_in, skb_in->sk);
137 }
138#endif
131} 139}
132 140
133static unsigned int 141static unsigned int
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 00000000000..0cbbf10026a
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
1/*
2 * net/ipv4/sysfs_net_ipv4.c
3 *
4 * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
5 *
6 * Copyright (C) 2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/kobject.h>
21#include <linux/string.h>
22#include <linux/sysfs.h>
23#include <linux/init.h>
24#include <net/tcp.h>
25
26#define CREATE_IPV4_FILE(_name, _var) \
27static ssize_t _name##_show(struct kobject *kobj, \
28 struct kobj_attribute *attr, char *buf) \
29{ \
30 return sprintf(buf, "%d\n", _var); \
31} \
32static ssize_t _name##_store(struct kobject *kobj, \
33 struct kobj_attribute *attr, \
34 const char *buf, size_t count) \
35{ \
36 int val, ret; \
37 ret = sscanf(buf, "%d", &val); \
38 if (ret != 1) \
39 return -EINVAL; \
40 if (val < 0) \
41 return -EINVAL; \
42 _var = val; \
43 return count; \
44} \
45static struct kobj_attribute _name##_attr = \
46 __ATTR(_name, 0644, _name##_show, _name##_store)
47
48CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
49CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
50CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
51
52CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
53CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
54CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
55
56static struct attribute *ipv4_attrs[] = {
57 &tcp_wmem_min_attr.attr,
58 &tcp_wmem_def_attr.attr,
59 &tcp_wmem_max_attr.attr,
60 &tcp_rmem_min_attr.attr,
61 &tcp_rmem_def_attr.attr,
62 &tcp_rmem_max_attr.attr,
63 NULL
64};
65
66static struct attribute_group ipv4_attr_group = {
67 .attrs = ipv4_attrs,
68};
69
70static __init int sysfs_ipv4_init(void)
71{
72 struct kobject *ipv4_kobject;
73 int ret;
74
75 ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
76 if (!ipv4_kobject)
77 return -ENOMEM;
78
79 ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
80 if (ret) {
81 kobject_put(ipv4_kobject);
82 return ret;
83 }
84
85 return 0;
86}
87
88subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b6ec23c7ffc..31741cf9bb6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -266,11 +266,15 @@
266#include <linux/crypto.h> 266#include <linux/crypto.h>
267#include <linux/time.h> 267#include <linux/time.h>
268#include <linux/slab.h> 268#include <linux/slab.h>
269#include <linux/uid_stat.h>
269 270
270#include <net/icmp.h> 271#include <net/icmp.h>
271#include <net/tcp.h> 272#include <net/tcp.h>
272#include <net/xfrm.h> 273#include <net/xfrm.h>
273#include <net/ip.h> 274#include <net/ip.h>
275#include <net/ip6_route.h>
276#include <net/ipv6.h>
277#include <net/transp_v6.h>
274#include <net/netdma.h> 278#include <net/netdma.h>
275#include <net/sock.h> 279#include <net/sock.h>
276 280
@@ -1111,6 +1115,9 @@ out:
1111 if (copied) 1115 if (copied)
1112 tcp_push(sk, flags, mss_now, tp->nonagle); 1116 tcp_push(sk, flags, mss_now, tp->nonagle);
1113 release_sock(sk); 1117 release_sock(sk);
1118
1119 if (copied > 0)
1120 uid_stat_tcp_snd(current_uid(), copied);
1114 return copied; 1121 return copied;
1115 1122
1116do_fault: 1123do_fault:
@@ -1387,8 +1394,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1387 tcp_rcv_space_adjust(sk); 1394 tcp_rcv_space_adjust(sk);
1388 1395
1389 /* Clean up data we have read: This will do ACK frames. */ 1396 /* Clean up data we have read: This will do ACK frames. */
1390 if (copied > 0) 1397 if (copied > 0) {
1391 tcp_cleanup_rbuf(sk, copied); 1398 tcp_cleanup_rbuf(sk, copied);
1399 uid_stat_tcp_rcv(current_uid(), copied);
1400 }
1401
1392 return copied; 1402 return copied;
1393} 1403}
1394EXPORT_SYMBOL(tcp_read_sock); 1404EXPORT_SYMBOL(tcp_read_sock);
@@ -1770,6 +1780,9 @@ skip_copy:
1770 tcp_cleanup_rbuf(sk, copied); 1780 tcp_cleanup_rbuf(sk, copied);
1771 1781
1772 release_sock(sk); 1782 release_sock(sk);
1783
1784 if (copied > 0)
1785 uid_stat_tcp_rcv(current_uid(), copied);
1773 return copied; 1786 return copied;
1774 1787
1775out: 1788out:
@@ -1778,6 +1791,8 @@ out:
1778 1791
1779recv_urg: 1792recv_urg:
1780 err = tcp_recv_urg(sk, msg, len, flags); 1793 err = tcp_recv_urg(sk, msg, len, flags);
1794 if (err > 0)
1795 uid_stat_tcp_rcv(current_uid(), err);
1781 goto out; 1796 goto out;
1782} 1797}
1783EXPORT_SYMBOL(tcp_recvmsg); 1798EXPORT_SYMBOL(tcp_recvmsg);
@@ -3313,3 +3328,107 @@ void __init tcp_init(void)
3313 tcp_secret_retiring = &tcp_secret_two; 3328 tcp_secret_retiring = &tcp_secret_two;
3314 tcp_secret_secondary = &tcp_secret_two; 3329 tcp_secret_secondary = &tcp_secret_two;
3315} 3330}
3331
3332static int tcp_is_local(struct net *net, __be32 addr) {
3333 struct rtable *rt;
3334 struct flowi4 fl4 = { .daddr = addr };
3335 rt = ip_route_output_key(net, &fl4);
3336 if (IS_ERR_OR_NULL(rt))
3337 return 0;
3338 return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
3339}
3340
3341#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3342static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
3343 struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
3344 return rt6 && rt6->rt6i_dev && (rt6->rt6i_dev->flags & IFF_LOOPBACK);
3345}
3346#endif
3347
3348/*
3349 * tcp_nuke_addr - destroy all sockets on the given local address
3350 * if local address is the unspecified address (0.0.0.0 or ::), destroy all
3351 * sockets with local addresses that are not configured.
3352 */
3353int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
3354{
3355 int family = addr->sa_family;
3356 unsigned int bucket;
3357
3358 struct in_addr *in;
3359#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3360 struct in6_addr *in6;
3361#endif
3362 if (family == AF_INET) {
3363 in = &((struct sockaddr_in *)addr)->sin_addr;
3364#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3365 } else if (family == AF_INET6) {
3366 in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
3367#endif
3368 } else {
3369 return -EAFNOSUPPORT;
3370 }
3371
3372 for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
3373 struct hlist_nulls_node *node;
3374 struct sock *sk;
3375 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
3376
3377restart:
3378 spin_lock_bh(lock);
3379 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
3380 struct inet_sock *inet = inet_sk(sk);
3381
3382 if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
3383 continue;
3384 if (sock_flag(sk, SOCK_DEAD))
3385 continue;
3386
3387 if (family == AF_INET) {
3388 __be32 s4 = inet->inet_rcv_saddr;
3389 if (s4 == LOOPBACK4_IPV6)
3390 continue;
3391
3392 if (in->s_addr != s4 &&
3393 !(in->s_addr == INADDR_ANY &&
3394 !tcp_is_local(net, s4)))
3395 continue;
3396 }
3397
3398#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3399 if (family == AF_INET6) {
3400 struct in6_addr *s6;
3401 if (!inet->pinet6)
3402 continue;
3403
3404 s6 = &inet->pinet6->rcv_saddr;
3405 if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
3406 continue;
3407
3408 if (!ipv6_addr_equal(in6, s6) &&
3409 !(ipv6_addr_equal(in6, &in6addr_any) &&
3410 !tcp_is_local6(net, s6)))
3411 continue;
3412 }
3413#endif
3414
3415 sock_hold(sk);
3416 spin_unlock_bh(lock);
3417
3418 local_bh_disable();
3419 bh_lock_sock(sk);
3420 sk->sk_err = ETIMEDOUT;
3421 sk->sk_error_report(sk);
3422
3423 tcp_done(sk);
3424 bh_unlock_sock(sk);
3425 local_bh_enable();
3426 sock_put(sk);
3427
3428 goto restart;
3429 }
3430 spin_unlock_bh(lock);
3431 }
3432
3433 return 0;
3434}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index be29337ea39..8a4bf719c25 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -828,12 +828,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
828{ 828{
829 struct inet6_dev *idev = ifp->idev; 829 struct inet6_dev *idev = ifp->idev;
830 struct in6_addr addr, *tmpaddr; 830 struct in6_addr addr, *tmpaddr;
831 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age; 831 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
832 unsigned long regen_advance; 832 unsigned long regen_advance;
833 int tmp_plen; 833 int tmp_plen;
834 int ret = 0; 834 int ret = 0;
835 int max_addresses; 835 int max_addresses;
836 u32 addr_flags; 836 u32 addr_flags;
837 unsigned long now = jiffies;
837 838
838 write_lock(&idev->lock); 839 write_lock(&idev->lock);
839 if (ift) { 840 if (ift) {
@@ -878,7 +879,7 @@ retry:
878 goto out; 879 goto out;
879 } 880 }
880 memcpy(&addr.s6_addr[8], idev->rndid, 8); 881 memcpy(&addr.s6_addr[8], idev->rndid, 8);
881 age = (jiffies - ifp->tstamp) / HZ; 882 age = (now - ifp->tstamp) / HZ;
882 tmp_valid_lft = min_t(__u32, 883 tmp_valid_lft = min_t(__u32,
883 ifp->valid_lft, 884 ifp->valid_lft,
884 idev->cnf.temp_valid_lft + age); 885 idev->cnf.temp_valid_lft + age);
@@ -888,7 +889,6 @@ retry:
888 idev->cnf.max_desync_factor); 889 idev->cnf.max_desync_factor);
889 tmp_plen = ifp->prefix_len; 890 tmp_plen = ifp->prefix_len;
890 max_addresses = idev->cnf.max_addresses; 891 max_addresses = idev->cnf.max_addresses;
891 tmp_cstamp = ifp->cstamp;
892 tmp_tstamp = ifp->tstamp; 892 tmp_tstamp = ifp->tstamp;
893 spin_unlock_bh(&ifp->lock); 893 spin_unlock_bh(&ifp->lock);
894 894
@@ -933,7 +933,7 @@ retry:
933 ift->ifpub = ifp; 933 ift->ifpub = ifp;
934 ift->valid_lft = tmp_valid_lft; 934 ift->valid_lft = tmp_valid_lft;
935 ift->prefered_lft = tmp_prefered_lft; 935 ift->prefered_lft = tmp_prefered_lft;
936 ift->cstamp = tmp_cstamp; 936 ift->cstamp = now;
937 ift->tstamp = tmp_tstamp; 937 ift->tstamp = tmp_tstamp;
938 spin_unlock_bh(&ift->lock); 938 spin_unlock_bh(&ift->lock);
939 939
@@ -1992,25 +1992,50 @@ ok:
1992#ifdef CONFIG_IPV6_PRIVACY 1992#ifdef CONFIG_IPV6_PRIVACY
1993 read_lock_bh(&in6_dev->lock); 1993 read_lock_bh(&in6_dev->lock);
1994 /* update all temporary addresses in the list */ 1994 /* update all temporary addresses in the list */
1995 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { 1995 list_for_each_entry(ift, &in6_dev->tempaddr_list,
1996 /* 1996 tmp_list) {
1997 * When adjusting the lifetimes of an existing 1997 int age, max_valid, max_prefered;
1998 * temporary address, only lower the lifetimes. 1998
1999 * Implementations must not increase the
2000 * lifetimes of an existing temporary address
2001 * when processing a Prefix Information Option.
2002 */
2003 if (ifp != ift->ifpub) 1999 if (ifp != ift->ifpub)
2004 continue; 2000 continue;
2005 2001
2002 /*
2003 * RFC 4941 section 3.3:
2004 * If a received option will extend the lifetime
2005 * of a public address, the lifetimes of
2006 * temporary addresses should be extended,
2007 * subject to the overall constraint that no
2008 * temporary addresses should ever remain
2009 * "valid" or "preferred" for a time longer than
2010 * (TEMP_VALID_LIFETIME) or
2011 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
2012 * respectively.
2013 */
2014 age = (now - ift->cstamp) / HZ;
2015 max_valid = in6_dev->cnf.temp_valid_lft - age;
2016 if (max_valid < 0)
2017 max_valid = 0;
2018
2019 max_prefered = in6_dev->cnf.temp_prefered_lft -
2020 in6_dev->cnf.max_desync_factor -
2021 age;
2022 if (max_prefered < 0)
2023 max_prefered = 0;
2024
2025 if (valid_lft > max_valid)
2026 valid_lft = max_valid;
2027
2028 if (prefered_lft > max_prefered)
2029 prefered_lft = max_prefered;
2030
2006 spin_lock(&ift->lock); 2031 spin_lock(&ift->lock);
2007 flags = ift->flags; 2032 flags = ift->flags;
2008 if (ift->valid_lft > valid_lft && 2033 ift->valid_lft = valid_lft;
2009 ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ) 2034 ift->prefered_lft = prefered_lft;
2010 ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ; 2035 ift->tstamp = now;
2011 if (ift->prefered_lft > prefered_lft && 2036 if (prefered_lft > 0)
2012 ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ) 2037 ift->flags &= ~IFA_F_DEPRECATED;
2013 ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ; 2038
2014 spin_unlock(&ift->lock); 2039 spin_unlock(&ift->lock);
2015 if (!(flags&IFA_F_TENTATIVE)) 2040 if (!(flags&IFA_F_TENTATIVE))
2016 ipv6_ifa_notify(0, ift); 2041 ipv6_ifa_notify(0, ift);
@@ -2018,9 +2043,11 @@ ok:
2018 2043
2019 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) { 2044 if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
2020 /* 2045 /*
2021 * When a new public address is created as described in [ADDRCONF], 2046 * When a new public address is created as
2022 * also create a new temporary address. Also create a temporary 2047 * described in [ADDRCONF], also create a new
2023 * address if it's enabled but no temporary address currently exists. 2048 * temporary address. Also create a temporary
2049 * address if it's enabled but no temporary
2050 * address currently exists.
2024 */ 2051 */
2025 read_unlock_bh(&in6_dev->lock); 2052 read_unlock_bh(&in6_dev->lock);
2026 ipv6_create_tempaddr(ifp, NULL); 2053 ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 559123644e5..7e8340ef5a2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -63,6 +63,20 @@
63#include <asm/system.h> 63#include <asm/system.h>
64#include <linux/mroute6.h> 64#include <linux/mroute6.h>
65 65
66#ifdef CONFIG_ANDROID_PARANOID_NETWORK
67#include <linux/android_aid.h>
68
69static inline int current_has_network(void)
70{
71 return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
72}
73#else
74static inline int current_has_network(void)
75{
76 return 1;
77}
78#endif
79
66MODULE_AUTHOR("Cast of dozens"); 80MODULE_AUTHOR("Cast of dozens");
67MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 81MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
68MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
@@ -109,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
109 int try_loading_module = 0; 123 int try_loading_module = 0;
110 int err; 124 int err;
111 125
126 if (!current_has_network())
127 return -EACCES;
128
112 if (sock->type != SOCK_RAW && 129 if (sock->type != SOCK_RAW &&
113 sock->type != SOCK_DGRAM && 130 sock->type != SOCK_DGRAM &&
114 !inet_ehash_secret) 131 !inet_ehash_secret)
@@ -477,6 +494,21 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
477 494
478EXPORT_SYMBOL(inet6_getname); 495EXPORT_SYMBOL(inet6_getname);
479 496
497int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
498 struct in6_ifreq ireq;
499 struct sockaddr_in6 sin6;
500
501 if (!capable(CAP_NET_ADMIN))
502 return -EACCES;
503
504 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
505 return -EFAULT;
506
507 sin6.sin6_family = AF_INET6;
508 ipv6_addr_copy(&sin6.sin6_addr, &ireq.ifr6_addr);
509 return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
510}
511
480int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 512int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
481{ 513{
482 struct sock *sk = sock->sk; 514 struct sock *sk = sock->sk;
@@ -501,6 +533,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
501 return addrconf_del_ifaddr(net, (void __user *) arg); 533 return addrconf_del_ifaddr(net, (void __user *) arg);
502 case SIOCSIFDSTADDR: 534 case SIOCSIFDSTADDR:
503 return addrconf_set_dstaddr(net, (void __user *) arg); 535 return addrconf_set_dstaddr(net, (void __user *) arg);
536 case SIOCKILLADDR:
537 return inet6_killaddr_ioctl(net, (void __user *) arg);
504 default: 538 default:
505 if (!sk->sk_prot->ioctl) 539 if (!sk->sk_prot->ioctl)
506 return -ENOIOCTLCMD; 540 return -ENOIOCTLCMD;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 448464844a2..5bbf5316920 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -174,6 +174,18 @@ config IP6_NF_TARGET_REJECT
174 174
175 To compile it as a module, choose M here. If unsure, say N. 175 To compile it as a module, choose M here. If unsure, say N.
176 176
177config IP6_NF_TARGET_REJECT_SKERR
178 bool "Force socket error when rejecting with icmp*"
179 depends on IP6_NF_TARGET_REJECT
180 default n
181 help
182 This option enables turning a "--reject-with icmp*" into a matching
183 socket error also.
184 The REJECT target normally allows sending an ICMP message. But it
185 leaves the local socket unaware of any ingress rejects.
186
187 If unsure, say N.
188
177config IP6_NF_MANGLE 189config IP6_NF_MANGLE
178 tristate "Packet mangling" 190 tristate "Packet mangling"
179 default m if NETFILTER_ADVANCED=n 191 default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcd..14cb310064f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -2292,16 +2292,15 @@ static void __exit ip6_tables_fini(void)
2292 * "No next header". 2292 * "No next header".
2293 * 2293 *
2294 * If target header is found, its offset is set in *offset and return protocol 2294 * If target header is found, its offset is set in *offset and return protocol
2295 * number. Otherwise, return -1. 2295 * number. Otherwise, return -ENOENT or -EBADMSG.
2296 * 2296 *
2297 * If the first fragment doesn't contain the final protocol header or 2297 * If the first fragment doesn't contain the final protocol header or
2298 * NEXTHDR_NONE it is considered invalid. 2298 * NEXTHDR_NONE it is considered invalid.
2299 * 2299 *
2300 * Note that non-1st fragment is special case that "the protocol number 2300 * Note that non-1st fragment is special case that "the protocol number
2301 * of last header" is "next header" field in Fragment header. In this case, 2301 * of last header" is "next header" field in Fragment header. In this case,
2302 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff 2302 * *offset is meaningless. If fragoff is not NULL, the fragment offset is
2303 * isn't NULL. 2303 * stored in *fragoff; if it is NULL, return -EINVAL.
2304 *
2305 */ 2304 */
2306int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, 2305int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2307 int target, unsigned short *fragoff) 2306 int target, unsigned short *fragoff)
@@ -2342,9 +2341,12 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2342 if (target < 0 && 2341 if (target < 0 &&
2343 ((!ipv6_ext_hdr(hp->nexthdr)) || 2342 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2344 hp->nexthdr == NEXTHDR_NONE)) { 2343 hp->nexthdr == NEXTHDR_NONE)) {
2345 if (fragoff) 2344 if (fragoff) {
2346 *fragoff = _frag_off; 2345 *fragoff = _frag_off;
2347 return hp->nexthdr; 2346 return hp->nexthdr;
2347 } else {
2348 return -EINVAL;
2349 }
2348 } 2350 }
2349 return -ENOENT; 2351 return -ENOENT;
2350 } 2352 }
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5dd539..09d30498c92 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -177,6 +177,15 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
177 skb_in->dev = net->loopback_dev; 177 skb_in->dev = net->loopback_dev;
178 178
179 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); 179 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
180#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
181 if (skb_in->sk) {
182 icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
183 &skb_in->sk->sk_err);
184 skb_in->sk->sk_error_report(skb_in->sk);
185 pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
186 skb_in->sk->sk_err, skb_in, skb_in->sk);
187 }
188#endif
180} 189}
181 190
182static unsigned int 191static unsigned int
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ca7bf1052eb..3ff633e81b6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -334,6 +334,7 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
334 ieee80211_sta_debugfs_add(sta); 334 ieee80211_sta_debugfs_add(sta);
335 rate_control_add_sta_debugfs(sta); 335 rate_control_add_sta_debugfs(sta);
336 336
337 memset(&sinfo, 0, sizeof(sinfo));
337 sinfo.filled = 0; 338 sinfo.filled = 0;
338 sinfo.generation = local->sta_generation; 339 sinfo.generation = local->sta_generation;
339 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); 340 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d86cb..5bd5c612a9b 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -902,6 +902,8 @@ config NETFILTER_XT_MATCH_OWNER
902 based on who created the socket: the user or group. It is also 902 based on who created the socket: the user or group. It is also
903 possible to check whether a socket actually exists. 903 possible to check whether a socket actually exists.
904 904
905 Conflicts with '"quota, tag, uid" match'
906
905config NETFILTER_XT_MATCH_POLICY 907config NETFILTER_XT_MATCH_POLICY
906 tristate 'IPsec "policy" match support' 908 tristate 'IPsec "policy" match support'
907 depends on XFRM 909 depends on XFRM
@@ -935,6 +937,22 @@ config NETFILTER_XT_MATCH_PKTTYPE
935 937
936 To compile it as a module, choose M here. If unsure, say N. 938 To compile it as a module, choose M here. If unsure, say N.
937 939
940config NETFILTER_XT_MATCH_QTAGUID
941 bool '"quota, tag, owner" match and stats support'
942 depends on NETFILTER_XT_MATCH_SOCKET
943 depends on NETFILTER_XT_MATCH_OWNER=n
944 help
945 This option replaces the `owner' match. In addition to matching
946 on uid, it keeps stats based on a tag assigned to a socket.
947 The full tag is comprised of a UID and an accounting tag.
948 The tags are assignable to sockets from user space (e.g. a download
949 manager can assign the socket to another UID for accounting).
950 Stats and control are done via /proc/net/xt_qtaguid/.
951 It replaces owner as it takes the same arguments, but should
952 really be recognized by the iptables tool.
953
954 If unsure, say `N'.
955
938config NETFILTER_XT_MATCH_QUOTA 956config NETFILTER_XT_MATCH_QUOTA
939 tristate '"quota" match support' 957 tristate '"quota" match support'
940 depends on NETFILTER_ADVANCED 958 depends on NETFILTER_ADVANCED
@@ -945,6 +963,30 @@ config NETFILTER_XT_MATCH_QUOTA
945 If you want to compile it as a module, say M here and read 963 If you want to compile it as a module, say M here and read
946 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 964 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
947 965
966config NETFILTER_XT_MATCH_QUOTA2
967 tristate '"quota2" match support'
968 depends on NETFILTER_ADVANCED
969 help
970 This option adds a `quota2' match, which allows to match on a
971 byte counter correctly and not per CPU.
972 It allows naming the quotas.
973 This is based on http://xtables-addons.git.sourceforge.net
974
975 If you want to compile it as a module, say M here and read
976 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
977
978config NETFILTER_XT_MATCH_QUOTA2_LOG
979 bool '"quota2" Netfilter LOG support'
980 depends on NETFILTER_XT_MATCH_QUOTA2
981 depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no
982 default n
983 help
984 This option allows `quota2' to log ONCE when a quota limit
985 is passed. It logs via NETLINK using the NETLINK_NFLOG family.
986 It logs similarly to how ipt_ULOG would without data.
987
988 If unsure, say `N'.
989
948config NETFILTER_XT_MATCH_RATEEST 990config NETFILTER_XT_MATCH_RATEEST
949 tristate '"rateest" match support' 991 tristate '"rateest" match support'
950 depends on NETFILTER_ADVANCED 992 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853df86..6d917176c3b 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -95,7 +95,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
95obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o 95obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
96obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o 96obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
97obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o 97obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
98obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
98obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o 99obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
100obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
99obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o 101obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
100obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o 102obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
101obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o 103obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 00000000000..08086d680c2
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,2785 @@
1/*
2 * Kernel iptables module to track stats for packets based on user tags.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * There are run-time debug flags enabled via the debug_mask module param, or
13 * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
14 */
15#define DEBUG
16
17#include <linux/file.h>
18#include <linux/inetdevice.h>
19#include <linux/module.h>
20#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter/xt_qtaguid.h>
22#include <linux/skbuff.h>
23#include <linux/workqueue.h>
24#include <net/addrconf.h>
25#include <net/sock.h>
26#include <net/tcp.h>
27#include <net/udp.h>
28
29#include <linux/netfilter/xt_socket.h>
30#include "xt_qtaguid_internal.h"
31#include "xt_qtaguid_print.h"
32
33/*
34 * We only use the xt_socket funcs within a similar context to avoid unexpected
35 * return values.
36 */
37#define XT_SOCKET_SUPPORTED_HOOKS \
38 ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
39
40
41static const char *module_procdirname = "xt_qtaguid";
42static struct proc_dir_entry *xt_qtaguid_procdir;
43
44static unsigned int proc_iface_perms = S_IRUGO;
45module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
46
47static struct proc_dir_entry *xt_qtaguid_stats_file;
48static unsigned int proc_stats_perms = S_IRUGO;
49module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
50
51static struct proc_dir_entry *xt_qtaguid_ctrl_file;
52#ifdef CONFIG_ANDROID_PARANOID_NETWORK
53static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
54#else
55static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUSR;
56#endif
57module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
58
59#ifdef CONFIG_ANDROID_PARANOID_NETWORK
60#include <linux/android_aid.h>
61static gid_t proc_stats_readall_gid = AID_NET_BW_STATS;
62static gid_t proc_ctrl_write_gid = AID_NET_BW_ACCT;
63#else
64/* 0 means, don't limit anybody */
65static gid_t proc_stats_readall_gid;
66static gid_t proc_ctrl_write_gid;
67#endif
68module_param_named(stats_readall_gid, proc_stats_readall_gid, uint,
69 S_IRUGO | S_IWUSR);
70module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint,
71 S_IRUGO | S_IWUSR);
72
73/*
74 * Limit the number of active tags (via socket tags) for a given UID.
75 * Multiple processes could share the UID.
76 */
77static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
78module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
79
80/*
81 * After the kernel has initiallized this module, it is still possible
82 * to make it passive.
83 * Setting passive to Y:
84 * - the iface stats handling will not act on notifications.
85 * - iptables matches will never match.
86 * - ctrl commands silently succeed.
87 * - stats are always empty.
88 * This is mostly usefull when a bug is suspected.
89 */
90static bool module_passive;
91module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
92
93/*
94 * Control how qtaguid data is tracked per proc/uid.
95 * Setting tag_tracking_passive to Y:
96 * - don't create proc specific structs to track tags
97 * - don't check that active tag stats exceed some limits.
98 * - don't clean up socket tags on process exits.
99 * This is mostly usefull when a bug is suspected.
100 */
101static bool qtu_proc_handling_passive;
102module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
103 S_IRUGO | S_IWUSR);
104
105#define QTU_DEV_NAME "xt_qtaguid"
106
107uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
108module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
109
110/*---------------------------------------------------------------------------*/
111static const char *iface_stat_procdirname = "iface_stat";
112static struct proc_dir_entry *iface_stat_procdir;
113static const char *iface_stat_all_procfilename = "iface_stat_all";
114static struct proc_dir_entry *iface_stat_all_procfile;
115
116/*
117 * Ordering of locks:
118 * outer locks:
119 * iface_stat_list_lock
120 * sock_tag_list_lock
121 * inner locks:
122 * uid_tag_data_tree_lock
123 * tag_counter_set_list_lock
124 * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock
125 * is acquired.
126 *
127 * Call tree with all lock holders as of 2011-09-25:
128 *
129 * iface_stat_all_proc_read()
130 * iface_stat_list_lock
131 * (struct iface_stat)
132 *
133 * qtaguid_ctrl_proc_read()
134 * sock_tag_list_lock
135 * (sock_tag_tree)
136 * (struct proc_qtu_data->sock_tag_list)
137 * prdebug_full_state()
138 * sock_tag_list_lock
139 * (sock_tag_tree)
140 * uid_tag_data_tree_lock
141 * (uid_tag_data_tree)
142 * (proc_qtu_data_tree)
143 * iface_stat_list_lock
144 *
145 * qtaguid_stats_proc_read()
146 * iface_stat_list_lock
147 * struct iface_stat->tag_stat_list_lock
148 *
149 * qtudev_open()
150 * uid_tag_data_tree_lock
151 *
152 * qtudev_release()
153 * sock_tag_data_list_lock
154 * uid_tag_data_tree_lock
155 * prdebug_full_state()
156 * sock_tag_list_lock
157 * uid_tag_data_tree_lock
158 * iface_stat_list_lock
159 *
160 * iface_netdev_event_handler()
161 * iface_stat_create()
162 * iface_stat_list_lock
163 * iface_stat_update()
164 * iface_stat_list_lock
165 *
166 * iface_inetaddr_event_handler()
167 * iface_stat_create()
168 * iface_stat_list_lock
169 * iface_stat_update()
170 * iface_stat_list_lock
171 *
172 * iface_inet6addr_event_handler()
173 * iface_stat_create_ipv6()
174 * iface_stat_list_lock
175 * iface_stat_update()
176 * iface_stat_list_lock
177 *
178 * qtaguid_mt()
179 * account_for_uid()
180 * if_tag_stat_update()
181 * get_sock_stat()
182 * sock_tag_list_lock
183 * struct iface_stat->tag_stat_list_lock
184 * tag_stat_update()
185 * get_active_counter_set()
186 * tag_counter_set_list_lock
187 * tag_stat_update()
188 * get_active_counter_set()
189 * tag_counter_set_list_lock
190 *
191 *
192 * qtaguid_ctrl_parse()
193 * ctrl_cmd_delete()
194 * sock_tag_list_lock
195 * tag_counter_set_list_lock
196 * iface_stat_list_lock
197 * struct iface_stat->tag_stat_list_lock
198 * uid_tag_data_tree_lock
199 * ctrl_cmd_counter_set()
200 * tag_counter_set_list_lock
201 * ctrl_cmd_tag()
202 * sock_tag_list_lock
203 * (sock_tag_tree)
204 * get_tag_ref()
205 * uid_tag_data_tree_lock
206 * (uid_tag_data_tree)
207 * uid_tag_data_tree_lock
208 * (proc_qtu_data_tree)
209 * ctrl_cmd_untag()
210 * sock_tag_list_lock
211 * uid_tag_data_tree_lock
212 *
213 */
214static LIST_HEAD(iface_stat_list);
215static DEFINE_SPINLOCK(iface_stat_list_lock);
216
217static struct rb_root sock_tag_tree = RB_ROOT;
218static DEFINE_SPINLOCK(sock_tag_list_lock);
219
220static struct rb_root tag_counter_set_tree = RB_ROOT;
221static DEFINE_SPINLOCK(tag_counter_set_list_lock);
222
223static struct rb_root uid_tag_data_tree = RB_ROOT;
224static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
225
226static struct rb_root proc_qtu_data_tree = RB_ROOT;
227/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
228
229static struct qtaguid_event_counts qtu_events;
230/*----------------------------------------------*/
231static bool can_manipulate_uids(void)
232{
233 /* root pwnd */
234 return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid)
235 || in_egroup_p(proc_ctrl_write_gid);
236}
237
238static bool can_impersonate_uid(uid_t uid)
239{
240 return uid == current_fsuid() || can_manipulate_uids();
241}
242
243static bool can_read_other_uid_stats(uid_t uid)
244{
245 /* root pwnd */
246 return unlikely(!current_fsuid()) || uid == current_fsuid()
247 || unlikely(!proc_stats_readall_gid)
248 || in_egroup_p(proc_stats_readall_gid);
249}
250
251static inline void dc_add_byte_packets(struct data_counters *counters, int set,
252 enum ifs_tx_rx direction,
253 enum ifs_proto ifs_proto,
254 int bytes,
255 int packets)
256{
257 counters->bpc[set][direction][ifs_proto].bytes += bytes;
258 counters->bpc[set][direction][ifs_proto].packets += packets;
259}
260
261static inline uint64_t dc_sum_bytes(struct data_counters *counters,
262 int set,
263 enum ifs_tx_rx direction)
264{
265 return counters->bpc[set][direction][IFS_TCP].bytes
266 + counters->bpc[set][direction][IFS_UDP].bytes
267 + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
268}
269
270static inline uint64_t dc_sum_packets(struct data_counters *counters,
271 int set,
272 enum ifs_tx_rx direction)
273{
274 return counters->bpc[set][direction][IFS_TCP].packets
275 + counters->bpc[set][direction][IFS_UDP].packets
276 + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
277}
278
279static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
280{
281 struct rb_node *node = root->rb_node;
282
283 while (node) {
284 struct tag_node *data = rb_entry(node, struct tag_node, node);
285 int result;
286 RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
287 " node=%p data=%p\n", tag, node, data);
288 result = tag_compare(tag, data->tag);
289 RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
290 " data.tag=0x%llx (uid=%u) res=%d\n",
291 tag, data->tag, get_uid_from_tag(data->tag), result);
292 if (result < 0)
293 node = node->rb_left;
294 else if (result > 0)
295 node = node->rb_right;
296 else
297 return data;
298 }
299 return NULL;
300}
301
302static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
303{
304 struct rb_node **new = &(root->rb_node), *parent = NULL;
305
306 /* Figure out where to put new node */
307 while (*new) {
308 struct tag_node *this = rb_entry(*new, struct tag_node,
309 node);
310 int result = tag_compare(data->tag, this->tag);
311 RB_DEBUG("qtaguid: %s(): tag=0x%llx"
312 " (uid=%u)\n", __func__,
313 this->tag,
314 get_uid_from_tag(this->tag));
315 parent = *new;
316 if (result < 0)
317 new = &((*new)->rb_left);
318 else if (result > 0)
319 new = &((*new)->rb_right);
320 else
321 BUG();
322 }
323
324 /* Add new node and rebalance tree. */
325 rb_link_node(&data->node, parent, new);
326 rb_insert_color(&data->node, root);
327}
328
329static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
330{
331 tag_node_tree_insert(&data->tn, root);
332}
333
334static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
335{
336 struct tag_node *node = tag_node_tree_search(root, tag);
337 if (!node)
338 return NULL;
339 return rb_entry(&node->node, struct tag_stat, tn.node);
340}
341
342static void tag_counter_set_tree_insert(struct tag_counter_set *data,
343 struct rb_root *root)
344{
345 tag_node_tree_insert(&data->tn, root);
346}
347
348static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
349 tag_t tag)
350{
351 struct tag_node *node = tag_node_tree_search(root, tag);
352 if (!node)
353 return NULL;
354 return rb_entry(&node->node, struct tag_counter_set, tn.node);
355
356}
357
358static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
359{
360 tag_node_tree_insert(&data->tn, root);
361}
362
363static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
364{
365 struct tag_node *node = tag_node_tree_search(root, tag);
366 if (!node)
367 return NULL;
368 return rb_entry(&node->node, struct tag_ref, tn.node);
369}
370
371static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
372 const struct sock *sk)
373{
374 struct rb_node *node = root->rb_node;
375
376 while (node) {
377 struct sock_tag *data = rb_entry(node, struct sock_tag,
378 sock_node);
379 if (sk < data->sk)
380 node = node->rb_left;
381 else if (sk > data->sk)
382 node = node->rb_right;
383 else
384 return data;
385 }
386 return NULL;
387}
388
389static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
390{
391 struct rb_node **new = &(root->rb_node), *parent = NULL;
392
393 /* Figure out where to put new node */
394 while (*new) {
395 struct sock_tag *this = rb_entry(*new, struct sock_tag,
396 sock_node);
397 parent = *new;
398 if (data->sk < this->sk)
399 new = &((*new)->rb_left);
400 else if (data->sk > this->sk)
401 new = &((*new)->rb_right);
402 else
403 BUG();
404 }
405
406 /* Add new node and rebalance tree. */
407 rb_link_node(&data->sock_node, parent, new);
408 rb_insert_color(&data->sock_node, root);
409}
410
411static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
412{
413 struct rb_node *node;
414 struct sock_tag *st_entry;
415
416 node = rb_first(st_to_free_tree);
417 while (node) {
418 st_entry = rb_entry(node, struct sock_tag, sock_node);
419 node = rb_next(node);
420 CT_DEBUG("qtaguid: %s(): "
421 "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
422 st_entry->sk,
423 st_entry->tag,
424 get_uid_from_tag(st_entry->tag));
425 rb_erase(&st_entry->sock_node, st_to_free_tree);
426 sockfd_put(st_entry->socket);
427 kfree(st_entry);
428 }
429}
430
431static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
432 const pid_t pid)
433{
434 struct rb_node *node = root->rb_node;
435
436 while (node) {
437 struct proc_qtu_data *data = rb_entry(node,
438 struct proc_qtu_data,
439 node);
440 if (pid < data->pid)
441 node = node->rb_left;
442 else if (pid > data->pid)
443 node = node->rb_right;
444 else
445 return data;
446 }
447 return NULL;
448}
449
450static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
451 struct rb_root *root)
452{
453 struct rb_node **new = &(root->rb_node), *parent = NULL;
454
455 /* Figure out where to put new node */
456 while (*new) {
457 struct proc_qtu_data *this = rb_entry(*new,
458 struct proc_qtu_data,
459 node);
460 parent = *new;
461 if (data->pid < this->pid)
462 new = &((*new)->rb_left);
463 else if (data->pid > this->pid)
464 new = &((*new)->rb_right);
465 else
466 BUG();
467 }
468
469 /* Add new node and rebalance tree. */
470 rb_link_node(&data->node, parent, new);
471 rb_insert_color(&data->node, root);
472}
473
474static void uid_tag_data_tree_insert(struct uid_tag_data *data,
475 struct rb_root *root)
476{
477 struct rb_node **new = &(root->rb_node), *parent = NULL;
478
479 /* Figure out where to put new node */
480 while (*new) {
481 struct uid_tag_data *this = rb_entry(*new,
482 struct uid_tag_data,
483 node);
484 parent = *new;
485 if (data->uid < this->uid)
486 new = &((*new)->rb_left);
487 else if (data->uid > this->uid)
488 new = &((*new)->rb_right);
489 else
490 BUG();
491 }
492
493 /* Add new node and rebalance tree. */
494 rb_link_node(&data->node, parent, new);
495 rb_insert_color(&data->node, root);
496}
497
498static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
499 uid_t uid)
500{
501 struct rb_node *node = root->rb_node;
502
503 while (node) {
504 struct uid_tag_data *data = rb_entry(node,
505 struct uid_tag_data,
506 node);
507 if (uid < data->uid)
508 node = node->rb_left;
509 else if (uid > data->uid)
510 node = node->rb_right;
511 else
512 return data;
513 }
514 return NULL;
515}
516
517/*
518 * Allocates a new uid_tag_data struct if needed.
519 * Returns a pointer to the found or allocated uid_tag_data.
520 * Returns a PTR_ERR on failures, and lock is not held.
521 * If found is not NULL:
522 * sets *found to true if not allocated.
523 * sets *found to false if allocated.
524 */
525struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
526{
527 struct uid_tag_data *utd_entry;
528
529 /* Look for top level uid_tag_data for the UID */
530 utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
531 DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
532
533 if (found_res)
534 *found_res = utd_entry;
535 if (utd_entry)
536 return utd_entry;
537
538 utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
539 if (!utd_entry) {
540 pr_err("qtaguid: get_uid_data(%u): "
541 "tag data alloc failed\n", uid);
542 return ERR_PTR(-ENOMEM);
543 }
544
545 utd_entry->uid = uid;
546 utd_entry->tag_ref_tree = RB_ROOT;
547 uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
548 DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
549 return utd_entry;
550}
551
552/* Never returns NULL. Either PTR_ERR or a valid ptr. */
553static struct tag_ref *new_tag_ref(tag_t new_tag,
554 struct uid_tag_data *utd_entry)
555{
556 struct tag_ref *tr_entry;
557 int res;
558
559 if (utd_entry->num_active_tags + 1 > max_sock_tags) {
560 pr_info("qtaguid: new_tag_ref(0x%llx): "
561 "tag ref alloc quota exceeded. max=%d\n",
562 new_tag, max_sock_tags);
563 res = -EMFILE;
564 goto err_res;
565
566 }
567
568 tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
569 if (!tr_entry) {
570 pr_err("qtaguid: new_tag_ref(0x%llx): "
571 "tag ref alloc failed\n",
572 new_tag);
573 res = -ENOMEM;
574 goto err_res;
575 }
576 tr_entry->tn.tag = new_tag;
577 /* tr_entry->num_sock_tags handled by caller */
578 utd_entry->num_active_tags++;
579 tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
580 DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
581 " inserted new tag ref %p\n",
582 new_tag, tr_entry);
583 return tr_entry;
584
585err_res:
586 return ERR_PTR(res);
587}
588
589static struct tag_ref *lookup_tag_ref(tag_t full_tag,
590 struct uid_tag_data **utd_res)
591{
592 struct uid_tag_data *utd_entry;
593 struct tag_ref *tr_entry;
594 bool found_utd;
595 uid_t uid = get_uid_from_tag(full_tag);
596
597 DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
598 full_tag, uid);
599
600 utd_entry = get_uid_data(uid, &found_utd);
601 if (IS_ERR_OR_NULL(utd_entry)) {
602 if (utd_res)
603 *utd_res = utd_entry;
604 return NULL;
605 }
606
607 tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
608 if (utd_res)
609 *utd_res = utd_entry;
610 DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
611 full_tag, utd_entry, tr_entry);
612 return tr_entry;
613}
614
615/* Never returns NULL. Either PTR_ERR or a valid ptr. */
616static struct tag_ref *get_tag_ref(tag_t full_tag,
617 struct uid_tag_data **utd_res)
618{
619 struct uid_tag_data *utd_entry;
620 struct tag_ref *tr_entry;
621
622 DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
623 full_tag);
624 spin_lock_bh(&uid_tag_data_tree_lock);
625 tr_entry = lookup_tag_ref(full_tag, &utd_entry);
626 BUG_ON(IS_ERR_OR_NULL(utd_entry));
627 if (!tr_entry)
628 tr_entry = new_tag_ref(full_tag, utd_entry);
629
630 spin_unlock_bh(&uid_tag_data_tree_lock);
631 if (utd_res)
632 *utd_res = utd_entry;
633 DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
634 full_tag, utd_entry, tr_entry);
635 return tr_entry;
636}
637
638/* Checks and maybe frees the UID Tag Data entry */
639static void put_utd_entry(struct uid_tag_data *utd_entry)
640{
641 /* Are we done with the UID tag data entry? */
642 if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
643 !utd_entry->num_pqd) {
644 DR_DEBUG("qtaguid: %s(): "
645 "erase utd_entry=%p uid=%u "
646 "by pid=%u tgid=%u uid=%u\n", __func__,
647 utd_entry, utd_entry->uid,
648 current->pid, current->tgid, current_fsuid());
649 BUG_ON(utd_entry->num_active_tags);
650 rb_erase(&utd_entry->node, &uid_tag_data_tree);
651 kfree(utd_entry);
652 } else {
653 DR_DEBUG("qtaguid: %s(): "
654 "utd_entry=%p still has %d tags %d proc_qtu_data\n",
655 __func__, utd_entry, utd_entry->num_active_tags,
656 utd_entry->num_pqd);
657 BUG_ON(!(utd_entry->num_active_tags ||
658 utd_entry->num_pqd));
659 }
660}
661
662/*
663 * If no sock_tags are using this tag_ref,
664 * decrements refcount of utd_entry, removes tr_entry
665 * from utd_entry->tag_ref_tree and frees.
666 */
667static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
668 struct uid_tag_data *utd_entry)
669{
670 DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
671 tr_entry, tr_entry->tn.tag,
672 get_uid_from_tag(tr_entry->tn.tag));
673 if (!tr_entry->num_sock_tags) {
674 BUG_ON(!utd_entry->num_active_tags);
675 utd_entry->num_active_tags--;
676 rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
677 DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
678 kfree(tr_entry);
679 }
680}
681
682static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
683{
684 struct rb_node *node;
685 struct tag_ref *tr_entry;
686 tag_t acct_tag;
687
688 DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
689 full_tag, get_uid_from_tag(full_tag));
690 acct_tag = get_atag_from_tag(full_tag);
691 node = rb_first(&utd_entry->tag_ref_tree);
692 while (node) {
693 tr_entry = rb_entry(node, struct tag_ref, tn.node);
694 node = rb_next(node);
695 if (!acct_tag || tr_entry->tn.tag == full_tag)
696 free_tag_ref_from_utd_entry(tr_entry, utd_entry);
697 }
698}
699
700static int read_proc_u64(char *page, char **start, off_t off,
701 int count, int *eof, void *data)
702{
703 int len;
704 uint64_t value;
705 char *p = page;
706 uint64_t *iface_entry = data;
707
708 if (!data)
709 return 0;
710
711 value = *iface_entry;
712 p += sprintf(p, "%llu\n", value);
713 len = (p - page) - off;
714 *eof = (len <= count) ? 1 : 0;
715 *start = page + off;
716 return len;
717}
718
719static int read_proc_bool(char *page, char **start, off_t off,
720 int count, int *eof, void *data)
721{
722 int len;
723 bool value;
724 char *p = page;
725 bool *bool_entry = data;
726
727 if (!data)
728 return 0;
729
730 value = *bool_entry;
731 p += sprintf(p, "%u\n", value);
732 len = (p - page) - off;
733 *eof = (len <= count) ? 1 : 0;
734 *start = page + off;
735 return len;
736}
737
738static int get_active_counter_set(tag_t tag)
739{
740 int active_set = 0;
741 struct tag_counter_set *tcs;
742
743 MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
744 " (uid=%u)\n",
745 tag, get_uid_from_tag(tag));
746 /* For now we only handle UID tags for active sets */
747 tag = get_utag_from_tag(tag);
748 spin_lock_bh(&tag_counter_set_list_lock);
749 tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
750 if (tcs)
751 active_set = tcs->active_set;
752 spin_unlock_bh(&tag_counter_set_list_lock);
753 return active_set;
754}
755
756/*
757 * Find the entry for tracking the specified interface.
758 * Caller must hold iface_stat_list_lock
759 */
760static struct iface_stat *get_iface_entry(const char *ifname)
761{
762 struct iface_stat *iface_entry;
763
764 /* Find the entry for tracking the specified tag within the interface */
765 if (ifname == NULL) {
766 pr_info("qtaguid: iface_stat: get() NULL device name\n");
767 return NULL;
768 }
769
770 /* Iterate over interfaces */
771 list_for_each_entry(iface_entry, &iface_stat_list, list) {
772 if (!strcmp(ifname, iface_entry->ifname))
773 goto done;
774 }
775 iface_entry = NULL;
776done:
777 return iface_entry;
778}
779
780static int iface_stat_all_proc_read(char *page, char **num_items_returned,
781 off_t items_to_skip, int char_count,
782 int *eof, void *data)
783{
784 char *outp = page;
785 int item_index = 0;
786 int len;
787 struct iface_stat *iface_entry;
788 struct rtnl_link_stats64 dev_stats, *stats;
789 struct rtnl_link_stats64 no_dev_stats = {0};
790
791 if (unlikely(module_passive)) {
792 *eof = 1;
793 return 0;
794 }
795
796 CT_DEBUG("qtaguid:proc iface_stat_all "
797 "page=%p *num_items_returned=%p off=%ld "
798 "char_count=%d *eof=%d\n", page, *num_items_returned,
799 items_to_skip, char_count, *eof);
800
801 if (*eof)
802 return 0;
803
804 /*
805 * This lock will prevent iface_stat_update() from changing active,
806 * and in turn prevent an interface from unregistering itself.
807 */
808 spin_lock_bh(&iface_stat_list_lock);
809 list_for_each_entry(iface_entry, &iface_stat_list, list) {
810 if (item_index++ < items_to_skip)
811 continue;
812
813 if (iface_entry->active) {
814 stats = dev_get_stats(iface_entry->net_dev,
815 &dev_stats);
816 } else {
817 stats = &no_dev_stats;
818 }
819 len = snprintf(outp, char_count,
820 "%s %d "
821 "%llu %llu %llu %llu "
822 "%llu %llu %llu %llu\n",
823 iface_entry->ifname,
824 iface_entry->active,
825 iface_entry->totals[IFS_RX].bytes,
826 iface_entry->totals[IFS_RX].packets,
827 iface_entry->totals[IFS_TX].bytes,
828 iface_entry->totals[IFS_TX].packets,
829 stats->rx_bytes, stats->rx_packets,
830 stats->tx_bytes, stats->tx_packets);
831 if (len >= char_count) {
832 spin_unlock_bh(&iface_stat_list_lock);
833 *outp = '\0';
834 return outp - page;
835 }
836 outp += len;
837 char_count -= len;
838 (*num_items_returned)++;
839 }
840 spin_unlock_bh(&iface_stat_list_lock);
841
842 *eof = 1;
843 return outp - page;
844}
845
846static void iface_create_proc_worker(struct work_struct *work)
847{
848 struct proc_dir_entry *proc_entry;
849 struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
850 iface_work);
851 struct iface_stat *new_iface = isw->iface_entry;
852
853 /* iface_entries are not deleted, so safe to manipulate. */
854 proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
855 if (IS_ERR_OR_NULL(proc_entry)) {
856 pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
857 kfree(isw);
858 return;
859 }
860
861 new_iface->proc_ptr = proc_entry;
862
863 create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry,
864 read_proc_u64, &new_iface->totals[IFS_TX].bytes);
865 create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry,
866 read_proc_u64, &new_iface->totals[IFS_RX].bytes);
867 create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry,
868 read_proc_u64, &new_iface->totals[IFS_TX].packets);
869 create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry,
870 read_proc_u64, &new_iface->totals[IFS_RX].packets);
871 create_proc_read_entry("active", proc_iface_perms, proc_entry,
872 read_proc_bool, &new_iface->active);
873
874 IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
875 "entry=%p dev=%s\n", new_iface, new_iface->ifname);
876 kfree(isw);
877}
878
879/*
880 * Will set the entry's active state, and
881 * update the net_dev accordingly also.
882 */
883static void _iface_stat_set_active(struct iface_stat *entry,
884 struct net_device *net_dev,
885 bool activate)
886{
887 if (activate) {
888 entry->net_dev = net_dev;
889 entry->active = true;
890 IF_DEBUG("qtaguid: %s(%s): "
891 "enable tracking. rfcnt=%d\n", __func__,
892 entry->ifname,
893 percpu_read(*net_dev->pcpu_refcnt));
894 } else {
895 entry->active = false;
896 entry->net_dev = NULL;
897 IF_DEBUG("qtaguid: %s(%s): "
898 "disable tracking. rfcnt=%d\n", __func__,
899 entry->ifname,
900 percpu_read(*net_dev->pcpu_refcnt));
901
902 }
903}
904
905/* Caller must hold iface_stat_list_lock */
906static struct iface_stat *iface_alloc(struct net_device *net_dev)
907{
908 struct iface_stat *new_iface;
909 struct iface_stat_work *isw;
910
911 new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
912 if (new_iface == NULL) {
913 pr_err("qtaguid: iface_stat: create(%s): "
914 "iface_stat alloc failed\n", net_dev->name);
915 return NULL;
916 }
917 new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
918 if (new_iface->ifname == NULL) {
919 pr_err("qtaguid: iface_stat: create(%s): "
920 "ifname alloc failed\n", net_dev->name);
921 kfree(new_iface);
922 return NULL;
923 }
924 spin_lock_init(&new_iface->tag_stat_list_lock);
925 new_iface->tag_stat_tree = RB_ROOT;
926 _iface_stat_set_active(new_iface, net_dev, true);
927
928 /*
929 * ipv6 notifier chains are atomic :(
930 * No create_proc_read_entry() for you!
931 */
932 isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
933 if (!isw) {
934 pr_err("qtaguid: iface_stat: create(%s): "
935 "work alloc failed\n", new_iface->ifname);
936 _iface_stat_set_active(new_iface, net_dev, false);
937 kfree(new_iface->ifname);
938 kfree(new_iface);
939 return NULL;
940 }
941 isw->iface_entry = new_iface;
942 INIT_WORK(&isw->iface_work, iface_create_proc_worker);
943 schedule_work(&isw->iface_work);
944 list_add(&new_iface->list, &iface_stat_list);
945 return new_iface;
946}
947
948static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
949 struct iface_stat *iface)
950{
951 struct rtnl_link_stats64 dev_stats, *stats;
952 bool stats_rewound;
953
954 stats = dev_get_stats(net_dev, &dev_stats);
955 /* No empty packets */
956 stats_rewound =
957 (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
958 || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
959
960 IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
961 "bytes rx/tx=%llu/%llu "
962 "active=%d last_known=%d "
963 "stats_rewound=%d\n", __func__,
964 net_dev ? net_dev->name : "?",
965 iface, net_dev,
966 stats->rx_bytes, stats->tx_bytes,
967 iface->active, iface->last_known_valid, stats_rewound);
968
969 if (iface->active && iface->last_known_valid && stats_rewound) {
970 pr_warn_once("qtaguid: iface_stat: %s(%s): "
971 "iface reset its stats unexpectedly\n", __func__,
972 net_dev->name);
973
974 iface->totals[IFS_TX].bytes += iface->last_known[IFS_TX].bytes;
975 iface->totals[IFS_TX].packets +=
976 iface->last_known[IFS_TX].packets;
977 iface->totals[IFS_RX].bytes += iface->last_known[IFS_RX].bytes;
978 iface->totals[IFS_RX].packets +=
979 iface->last_known[IFS_RX].packets;
980 iface->last_known_valid = false;
981 IF_DEBUG("qtaguid: %s(%s): iface=%p "
982 "used last known bytes rx/tx=%llu/%llu\n", __func__,
983 iface->ifname, iface, iface->last_known[IFS_RX].bytes,
984 iface->last_known[IFS_TX].bytes);
985 }
986}
987
988/*
989 * Create a new entry for tracking the specified interface.
990 * Do nothing if the entry already exists.
991 * Called when an interface is configured with a valid IP address.
992 */
993static void iface_stat_create(struct net_device *net_dev,
994 struct in_ifaddr *ifa)
995{
996 struct in_device *in_dev = NULL;
997 const char *ifname;
998 struct iface_stat *entry;
999 __be32 ipaddr = 0;
1000 struct iface_stat *new_iface;
1001
1002 IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
1003 net_dev ? net_dev->name : "?",
1004 ifa, net_dev);
1005 if (!net_dev) {
1006 pr_err("qtaguid: iface_stat: create(): no net dev\n");
1007 return;
1008 }
1009
1010 ifname = net_dev->name;
1011 if (!ifa) {
1012 in_dev = in_dev_get(net_dev);
1013 if (!in_dev) {
1014 pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
1015 ifname);
1016 return;
1017 }
1018 IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
1019 ifname, in_dev);
1020 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1021 IF_DEBUG("qtaguid: iface_stat: create(%s): "
1022 "ifa=%p ifa_label=%s\n",
1023 ifname, ifa,
1024 ifa->ifa_label ? ifa->ifa_label : "(null)");
1025 if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
1026 break;
1027 }
1028 }
1029
1030 if (!ifa) {
1031 IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
1032 ifname);
1033 goto done_put;
1034 }
1035 ipaddr = ifa->ifa_local;
1036
1037 spin_lock_bh(&iface_stat_list_lock);
1038 entry = get_iface_entry(ifname);
1039 if (entry != NULL) {
1040 bool activate = !ipv4_is_loopback(ipaddr);
1041 IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
1042 ifname, entry);
1043 iface_check_stats_reset_and_adjust(net_dev, entry);
1044 _iface_stat_set_active(entry, net_dev, activate);
1045 IF_DEBUG("qtaguid: %s(%s): "
1046 "tracking now %d on ip=%pI4\n", __func__,
1047 entry->ifname, activate, &ipaddr);
1048 goto done_unlock_put;
1049 } else if (ipv4_is_loopback(ipaddr)) {
1050 IF_DEBUG("qtaguid: iface_stat: create(%s): "
1051 "ignore loopback dev. ip=%pI4\n", ifname, &ipaddr);
1052 goto done_unlock_put;
1053 }
1054
1055 new_iface = iface_alloc(net_dev);
1056 IF_DEBUG("qtaguid: iface_stat: create(%s): done "
1057 "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
1058done_unlock_put:
1059 spin_unlock_bh(&iface_stat_list_lock);
1060done_put:
1061 if (in_dev)
1062 in_dev_put(in_dev);
1063}
1064
1065static void iface_stat_create_ipv6(struct net_device *net_dev,
1066 struct inet6_ifaddr *ifa)
1067{
1068 struct in_device *in_dev;
1069 const char *ifname;
1070 struct iface_stat *entry;
1071 struct iface_stat *new_iface;
1072 int addr_type;
1073
1074 IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
1075 ifa, net_dev, net_dev ? net_dev->name : "");
1076 if (!net_dev) {
1077 pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
1078 return;
1079 }
1080 ifname = net_dev->name;
1081
1082 in_dev = in_dev_get(net_dev);
1083 if (!in_dev) {
1084 pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
1085 ifname);
1086 return;
1087 }
1088
1089 IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
1090 ifname, in_dev);
1091
1092 if (!ifa) {
1093 IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
1094 ifname);
1095 goto done_put;
1096 }
1097 addr_type = ipv6_addr_type(&ifa->addr);
1098
1099 spin_lock_bh(&iface_stat_list_lock);
1100 entry = get_iface_entry(ifname);
1101 if (entry != NULL) {
1102 bool activate = !(addr_type & IPV6_ADDR_LOOPBACK);
1103 IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
1104 ifname, entry);
1105 iface_check_stats_reset_and_adjust(net_dev, entry);
1106 _iface_stat_set_active(entry, net_dev, activate);
1107 IF_DEBUG("qtaguid: %s(%s): "
1108 "tracking now %d on ip=%pI6c\n", __func__,
1109 entry->ifname, activate, &ifa->addr);
1110 goto done_unlock_put;
1111 } else if (addr_type & IPV6_ADDR_LOOPBACK) {
1112 IF_DEBUG("qtaguid: %s(%s): "
1113 "ignore loopback dev. ip=%pI6c\n", __func__,
1114 ifname, &ifa->addr);
1115 goto done_unlock_put;
1116 }
1117
1118 new_iface = iface_alloc(net_dev);
1119 IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
1120 "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
1121
1122done_unlock_put:
1123 spin_unlock_bh(&iface_stat_list_lock);
1124done_put:
1125 in_dev_put(in_dev);
1126}
1127
1128static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
1129{
1130 MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
1131 return sock_tag_tree_search(&sock_tag_tree, sk);
1132}
1133
1134static struct sock_tag *get_sock_stat(const struct sock *sk)
1135{
1136 struct sock_tag *sock_tag_entry;
1137 MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
1138 if (!sk)
1139 return NULL;
1140 spin_lock_bh(&sock_tag_list_lock);
1141 sock_tag_entry = get_sock_stat_nl(sk);
1142 spin_unlock_bh(&sock_tag_list_lock);
1143 return sock_tag_entry;
1144}
1145
1146static void
1147data_counters_update(struct data_counters *dc, int set,
1148 enum ifs_tx_rx direction, int proto, int bytes)
1149{
1150 switch (proto) {
1151 case IPPROTO_TCP:
1152 dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
1153 break;
1154 case IPPROTO_UDP:
1155 dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
1156 break;
1157 case IPPROTO_IP:
1158 default:
1159 dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
1160 1);
1161 break;
1162 }
1163}
1164
1165/*
1166 * Update stats for the specified interface. Do nothing if the entry
1167 * does not exist (when a device was never configured with an IP address).
1168 * Called when an device is being unregistered.
1169 */
1170static void iface_stat_update(struct net_device *net_dev, bool stash_only)
1171{
1172 struct rtnl_link_stats64 dev_stats, *stats;
1173 struct iface_stat *entry;
1174
1175 stats = dev_get_stats(net_dev, &dev_stats);
1176 spin_lock_bh(&iface_stat_list_lock);
1177 entry = get_iface_entry(net_dev->name);
1178 if (entry == NULL) {
1179 IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
1180 net_dev->name);
1181 spin_unlock_bh(&iface_stat_list_lock);
1182 return;
1183 }
1184
1185 IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
1186 net_dev->name, entry);
1187 if (!entry->active) {
1188 IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
1189 net_dev->name);
1190 spin_unlock_bh(&iface_stat_list_lock);
1191 return;
1192 }
1193
1194 if (stash_only) {
1195 entry->last_known[IFS_TX].bytes = stats->tx_bytes;
1196 entry->last_known[IFS_TX].packets = stats->tx_packets;
1197 entry->last_known[IFS_RX].bytes = stats->rx_bytes;
1198 entry->last_known[IFS_RX].packets = stats->rx_packets;
1199 entry->last_known_valid = true;
1200 IF_DEBUG("qtaguid: %s(%s): "
1201 "dev stats stashed rx/tx=%llu/%llu\n", __func__,
1202 net_dev->name, stats->rx_bytes, stats->tx_bytes);
1203 spin_unlock_bh(&iface_stat_list_lock);
1204 return;
1205 }
1206 entry->totals[IFS_TX].bytes += stats->tx_bytes;
1207 entry->totals[IFS_TX].packets += stats->tx_packets;
1208 entry->totals[IFS_RX].bytes += stats->rx_bytes;
1209 entry->totals[IFS_RX].packets += stats->rx_packets;
1210 /* We don't need the last_known[] anymore */
1211 entry->last_known_valid = false;
1212 _iface_stat_set_active(entry, net_dev, false);
1213 IF_DEBUG("qtaguid: %s(%s): "
1214 "disable tracking. rx/tx=%llu/%llu\n", __func__,
1215 net_dev->name, stats->rx_bytes, stats->tx_bytes);
1216 spin_unlock_bh(&iface_stat_list_lock);
1217}
1218
1219static void tag_stat_update(struct tag_stat *tag_entry,
1220 enum ifs_tx_rx direction, int proto, int bytes)
1221{
1222 int active_set;
1223 active_set = get_active_counter_set(tag_entry->tn.tag);
1224 MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
1225 "dir=%d proto=%d bytes=%d)\n",
1226 tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
1227 active_set, direction, proto, bytes);
1228 data_counters_update(&tag_entry->counters, active_set, direction,
1229 proto, bytes);
1230 if (tag_entry->parent_counters)
1231 data_counters_update(tag_entry->parent_counters, active_set,
1232 direction, proto, bytes);
1233}
1234
1235/*
1236 * Create a new entry for tracking the specified {acct_tag,uid_tag} within
1237 * the interface.
1238 * iface_entry->tag_stat_list_lock should be held.
1239 */
1240static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
1241 tag_t tag)
1242{
1243 struct tag_stat *new_tag_stat_entry = NULL;
1244 IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
1245 " (uid=%u)\n", __func__,
1246 iface_entry, tag, get_uid_from_tag(tag));
1247 new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
1248 if (!new_tag_stat_entry) {
1249 pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
1250 goto done;
1251 }
1252 new_tag_stat_entry->tn.tag = tag;
1253 tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
1254done:
1255 return new_tag_stat_entry;
1256}
1257
1258static void if_tag_stat_update(const char *ifname, uid_t uid,
1259 const struct sock *sk, enum ifs_tx_rx direction,
1260 int proto, int bytes)
1261{
1262 struct tag_stat *tag_stat_entry;
1263 tag_t tag, acct_tag;
1264 tag_t uid_tag;
1265 struct data_counters *uid_tag_counters;
1266 struct sock_tag *sock_tag_entry;
1267 struct iface_stat *iface_entry;
1268 struct tag_stat *new_tag_stat;
1269 MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
1270 "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
1271 ifname, uid, sk, direction, proto, bytes);
1272
1273
1274 iface_entry = get_iface_entry(ifname);
1275 if (!iface_entry) {
1276 pr_err("qtaguid: iface_stat: stat_update() %s not found\n",
1277 ifname);
1278 return;
1279 }
1280 /* It is ok to process data when an iface_entry is inactive */
1281
1282 MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
1283 ifname, iface_entry);
1284
1285 /*
1286 * Look for a tagged sock.
1287 * It will have an acct_uid.
1288 */
1289 sock_tag_entry = get_sock_stat(sk);
1290 if (sock_tag_entry) {
1291 tag = sock_tag_entry->tag;
1292 acct_tag = get_atag_from_tag(tag);
1293 uid_tag = get_utag_from_tag(tag);
1294 } else {
1295 acct_tag = make_atag_from_value(0);
1296 tag = combine_atag_with_uid(acct_tag, uid);
1297 uid_tag = make_tag_from_uid(uid);
1298 }
1299 MT_DEBUG("qtaguid: iface_stat: stat_update(): "
1300 " looking for tag=0x%llx (uid=%u) in ife=%p\n",
1301 tag, get_uid_from_tag(tag), iface_entry);
1302 /* Loop over tag list under this interface for {acct_tag,uid_tag} */
1303 spin_lock_bh(&iface_entry->tag_stat_list_lock);
1304
1305 tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
1306 tag);
1307 if (tag_stat_entry) {
1308 /*
1309 * Updating the {acct_tag, uid_tag} entry handles both stats:
1310 * {0, uid_tag} will also get updated.
1311 */
1312 tag_stat_update(tag_stat_entry, direction, proto, bytes);
1313 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
1314 return;
1315 }
1316
1317 /* Loop over tag list under this interface for {0,uid_tag} */
1318 tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
1319 uid_tag);
1320 if (!tag_stat_entry) {
1321 /* Here: the base uid_tag did not exist */
1322 /*
1323 * No parent counters. So
1324 * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
1325 */
1326 new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
1327 uid_tag_counters = &new_tag_stat->counters;
1328 } else {
1329 uid_tag_counters = &tag_stat_entry->counters;
1330 }
1331
1332 if (acct_tag) {
1333 new_tag_stat = create_if_tag_stat(iface_entry, tag);
1334 new_tag_stat->parent_counters = uid_tag_counters;
1335 }
1336 tag_stat_update(new_tag_stat, direction, proto, bytes);
1337 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
1338}
1339
1340static int iface_netdev_event_handler(struct notifier_block *nb,
1341 unsigned long event, void *ptr) {
1342 struct net_device *dev = ptr;
1343
1344 if (unlikely(module_passive))
1345 return NOTIFY_DONE;
1346
1347 IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
1348 "ev=0x%lx/%s netdev=%p->name=%s\n",
1349 event, netdev_evt_str(event), dev, dev ? dev->name : "");
1350
1351 switch (event) {
1352 case NETDEV_UP:
1353 iface_stat_create(dev, NULL);
1354 atomic64_inc(&qtu_events.iface_events);
1355 break;
1356 case NETDEV_DOWN:
1357 case NETDEV_UNREGISTER:
1358 iface_stat_update(dev, event == NETDEV_DOWN);
1359 atomic64_inc(&qtu_events.iface_events);
1360 break;
1361 }
1362 return NOTIFY_DONE;
1363}
1364
1365static int iface_inet6addr_event_handler(struct notifier_block *nb,
1366 unsigned long event, void *ptr)
1367{
1368 struct inet6_ifaddr *ifa = ptr;
1369 struct net_device *dev;
1370
1371 if (unlikely(module_passive))
1372 return NOTIFY_DONE;
1373
1374 IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
1375 "ev=0x%lx/%s ifa=%p\n",
1376 event, netdev_evt_str(event), ifa);
1377
1378 switch (event) {
1379 case NETDEV_UP:
1380 BUG_ON(!ifa || !ifa->idev);
1381 dev = (struct net_device *)ifa->idev->dev;
1382 iface_stat_create_ipv6(dev, ifa);
1383 atomic64_inc(&qtu_events.iface_events);
1384 break;
1385 case NETDEV_DOWN:
1386 case NETDEV_UNREGISTER:
1387 BUG_ON(!ifa || !ifa->idev);
1388 dev = (struct net_device *)ifa->idev->dev;
1389 iface_stat_update(dev, event == NETDEV_DOWN);
1390 atomic64_inc(&qtu_events.iface_events);
1391 break;
1392 }
1393 return NOTIFY_DONE;
1394}
1395
1396static int iface_inetaddr_event_handler(struct notifier_block *nb,
1397 unsigned long event, void *ptr)
1398{
1399 struct in_ifaddr *ifa = ptr;
1400 struct net_device *dev;
1401
1402 if (unlikely(module_passive))
1403 return NOTIFY_DONE;
1404
1405 IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
1406 "ev=0x%lx/%s ifa=%p\n",
1407 event, netdev_evt_str(event), ifa);
1408
1409 switch (event) {
1410 case NETDEV_UP:
1411 BUG_ON(!ifa || !ifa->ifa_dev);
1412 dev = ifa->ifa_dev->dev;
1413 iface_stat_create(dev, ifa);
1414 atomic64_inc(&qtu_events.iface_events);
1415 break;
1416 case NETDEV_DOWN:
1417 case NETDEV_UNREGISTER:
1418 BUG_ON(!ifa || !ifa->ifa_dev);
1419 dev = ifa->ifa_dev->dev;
1420 iface_stat_update(dev, event == NETDEV_DOWN);
1421 atomic64_inc(&qtu_events.iface_events);
1422 break;
1423 }
1424 return NOTIFY_DONE;
1425}
1426
1427static struct notifier_block iface_netdev_notifier_blk = {
1428 .notifier_call = iface_netdev_event_handler,
1429};
1430
1431static struct notifier_block iface_inetaddr_notifier_blk = {
1432 .notifier_call = iface_inetaddr_event_handler,
1433};
1434
1435static struct notifier_block iface_inet6addr_notifier_blk = {
1436 .notifier_call = iface_inet6addr_event_handler,
1437};
1438
1439static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
1440{
1441 int err;
1442
1443 iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
1444 if (!iface_stat_procdir) {
1445 pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
1446 err = -1;
1447 goto err;
1448 }
1449
1450 iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename,
1451 proc_iface_perms,
1452 parent_procdir);
1453 if (!iface_stat_all_procfile) {
1454 pr_err("qtaguid: iface_stat: init "
1455 " failed to create stat_all proc entry\n");
1456 err = -1;
1457 goto err_zap_entry;
1458 }
1459 iface_stat_all_procfile->read_proc = iface_stat_all_proc_read;
1460
1461
1462 err = register_netdevice_notifier(&iface_netdev_notifier_blk);
1463 if (err) {
1464 pr_err("qtaguid: iface_stat: init "
1465 "failed to register dev event handler\n");
1466 goto err_zap_all_stats_entry;
1467 }
1468 err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
1469 if (err) {
1470 pr_err("qtaguid: iface_stat: init "
1471 "failed to register ipv4 dev event handler\n");
1472 goto err_unreg_nd;
1473 }
1474
1475 err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
1476 if (err) {
1477 pr_err("qtaguid: iface_stat: init "
1478 "failed to register ipv6 dev event handler\n");
1479 goto err_unreg_ip4_addr;
1480 }
1481 return 0;
1482
1483err_unreg_ip4_addr:
1484 unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
1485err_unreg_nd:
1486 unregister_netdevice_notifier(&iface_netdev_notifier_blk);
1487err_zap_all_stats_entry:
1488 remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
1489err_zap_entry:
1490 remove_proc_entry(iface_stat_procdirname, parent_procdir);
1491err:
1492 return err;
1493}
1494
1495static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
1496 struct xt_action_param *par)
1497{
1498 struct sock *sk;
1499 unsigned int hook_mask = (1 << par->hooknum);
1500
1501 MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
1502 par->hooknum, par->family);
1503
1504 /*
1505 * Let's not abuse the the xt_socket_get*_sk(), or else it will
1506 * return garbage SKs.
1507 */
1508 if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
1509 return NULL;
1510
1511 switch (par->family) {
1512 case NFPROTO_IPV6:
1513 sk = xt_socket_get6_sk(skb, par);
1514 break;
1515 case NFPROTO_IPV4:
1516 sk = xt_socket_get4_sk(skb, par);
1517 break;
1518 default:
1519 return NULL;
1520 }
1521
1522 /*
1523 * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs.
1524 * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959
1525 * Not fixed in 3.0-r3 :(
1526 */
1527 if (sk) {
1528 MT_DEBUG("qtaguid: %p->sk_proto=%u "
1529 "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
1530 if (sk->sk_state == TCP_TIME_WAIT) {
1531 xt_socket_put_sk(sk);
1532 sk = NULL;
1533 }
1534 }
1535 return sk;
1536}
1537
1538static void account_for_uid(const struct sk_buff *skb,
1539 const struct sock *alternate_sk, uid_t uid,
1540 struct xt_action_param *par)
1541{
1542 const struct net_device *el_dev;
1543
1544 if (!skb->dev) {
1545 MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
1546 el_dev = par->in ? : par->out;
1547 } else {
1548 const struct net_device *other_dev;
1549 el_dev = skb->dev;
1550 other_dev = par->in ? : par->out;
1551 if (el_dev != other_dev) {
1552 MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
1553 "par->(in/out)=%p %s\n",
1554 par->hooknum, el_dev, el_dev->name, other_dev,
1555 other_dev->name);
1556 }
1557 }
1558
1559 if (unlikely(!el_dev)) {
1560 pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
1561 } else if (unlikely(!el_dev->name)) {
1562 pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
1563 } else {
1564 MT_DEBUG("qtaguid[%d]: dev name=%s type=%d\n",
1565 par->hooknum,
1566 el_dev->name,
1567 el_dev->type);
1568
1569 if_tag_stat_update(el_dev->name, uid,
1570 skb->sk ? skb->sk : alternate_sk,
1571 par->in ? IFS_RX : IFS_TX,
1572 ip_hdr(skb)->protocol, skb->len);
1573 }
1574}
1575
1576static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
1577{
1578 const struct xt_qtaguid_match_info *info = par->matchinfo;
1579 const struct file *filp;
1580 bool got_sock = false;
1581 struct sock *sk;
1582 uid_t sock_uid;
1583 bool res;
1584
1585 if (unlikely(module_passive))
1586 return (info->match ^ info->invert) == 0;
1587
1588 MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
1589 par->hooknum, skb, par->in, par->out, par->family);
1590
1591 atomic64_inc(&qtu_events.match_calls);
1592 if (skb == NULL) {
1593 res = (info->match ^ info->invert) == 0;
1594 goto ret_res;
1595 }
1596
1597 sk = skb->sk;
1598
1599 if (sk == NULL) {
1600 /*
1601 * A missing sk->sk_socket happens when packets are in-flight
1602 * and the matching socket is already closed and gone.
1603 */
1604 sk = qtaguid_find_sk(skb, par);
1605 /*
1606 * If we got the socket from the find_sk(), we will need to put
1607 * it back, as nf_tproxy_get_sock_v4() got it.
1608 */
1609 got_sock = sk;
1610 if (sk)
1611 atomic64_inc(&qtu_events.match_found_sk_in_ct);
1612 else
1613 atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
1614 } else {
1615 atomic64_inc(&qtu_events.match_found_sk);
1616 }
1617 MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d proto=%d\n",
1618 par->hooknum, sk, got_sock, ip_hdr(skb)->protocol);
1619 if (sk != NULL) {
1620 MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
1621 par->hooknum, sk, sk->sk_socket,
1622 sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
1623 filp = sk->sk_socket ? sk->sk_socket->file : NULL;
1624 MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
1625 par->hooknum, filp ? filp->f_cred->fsuid : -1);
1626 }
1627
1628 if (sk == NULL || sk->sk_socket == NULL) {
1629 /*
1630 * Here, the qtaguid_find_sk() using connection tracking
1631 * couldn't find the owner, so for now we just count them
1632 * against the system.
1633 */
1634 /*
1635 * TODO: unhack how to force just accounting.
1636 * For now we only do iface stats when the uid-owner is not
1637 * requested.
1638 */
1639 if (!(info->match & XT_QTAGUID_UID))
1640 account_for_uid(skb, sk, 0, par);
1641 MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
1642 par->hooknum,
1643 sk ? sk->sk_socket : NULL);
1644 res = (info->match ^ info->invert) == 0;
1645 atomic64_inc(&qtu_events.match_no_sk);
1646 goto put_sock_ret_res;
1647 } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
1648 res = false;
1649 goto put_sock_ret_res;
1650 }
1651 filp = sk->sk_socket->file;
1652 if (filp == NULL) {
1653 MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
1654 account_for_uid(skb, sk, 0, par);
1655 res = ((info->match ^ info->invert) &
1656 (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
1657 atomic64_inc(&qtu_events.match_no_sk_file);
1658 goto put_sock_ret_res;
1659 }
1660 sock_uid = filp->f_cred->fsuid;
1661 /*
1662 * TODO: unhack how to force just accounting.
1663 * For now we only do iface stats when the uid-owner is not requested
1664 */
1665 if (!(info->match & XT_QTAGUID_UID))
1666 account_for_uid(skb, sk, sock_uid, par);
1667
1668 /*
1669 * The following two tests fail the match when:
1670 * id not in range AND no inverted condition requested
1671 * or id in range AND inverted condition requested
1672 * Thus (!a && b) || (a && !b) == a ^ b
1673 */
1674 if (info->match & XT_QTAGUID_UID)
1675 if ((filp->f_cred->fsuid >= info->uid_min &&
1676 filp->f_cred->fsuid <= info->uid_max) ^
1677 !(info->invert & XT_QTAGUID_UID)) {
1678 MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
1679 par->hooknum);
1680 res = false;
1681 goto put_sock_ret_res;
1682 }
1683 if (info->match & XT_QTAGUID_GID)
1684 if ((filp->f_cred->fsgid >= info->gid_min &&
1685 filp->f_cred->fsgid <= info->gid_max) ^
1686 !(info->invert & XT_QTAGUID_GID)) {
1687 MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
1688 par->hooknum);
1689 res = false;
1690 goto put_sock_ret_res;
1691 }
1692
1693 MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
1694 res = true;
1695
1696put_sock_ret_res:
1697 if (got_sock)
1698 xt_socket_put_sk(sk);
1699ret_res:
1700 MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
1701 return res;
1702}
1703
1704#ifdef DDEBUG
1705/* This function is not in xt_qtaguid_print.c because of locks visibility */
1706static void prdebug_full_state(int indent_level, const char *fmt, ...)
1707{
1708 va_list args;
1709 char *fmt_buff;
1710 char *buff;
1711
1712 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
1713 return;
1714
1715 fmt_buff = kasprintf(GFP_ATOMIC,
1716 "qtaguid: %s(): %s {\n", __func__, fmt);
1717 BUG_ON(!fmt_buff);
1718 va_start(args, fmt);
1719 buff = kvasprintf(GFP_ATOMIC,
1720 fmt_buff, args);
1721 BUG_ON(!buff);
1722 pr_debug("%s", buff);
1723 kfree(fmt_buff);
1724 kfree(buff);
1725 va_end(args);
1726
1727 spin_lock_bh(&sock_tag_list_lock);
1728 prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
1729 spin_unlock_bh(&sock_tag_list_lock);
1730
1731 spin_lock_bh(&sock_tag_list_lock);
1732 spin_lock_bh(&uid_tag_data_tree_lock);
1733 prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
1734 prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
1735 spin_unlock_bh(&uid_tag_data_tree_lock);
1736 spin_unlock_bh(&sock_tag_list_lock);
1737
1738 spin_lock_bh(&iface_stat_list_lock);
1739 prdebug_iface_stat_list(indent_level, &iface_stat_list);
1740 spin_unlock_bh(&iface_stat_list_lock);
1741
1742 pr_debug("qtaguid: %s(): }\n", __func__);
1743}
1744#else
1745static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
1746#endif
1747
1748/*
1749 * Procfs reader to get all active socket tags using style "1)" as described in
1750 * fs/proc/generic.c
1751 */
1752static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
1753 off_t items_to_skip, int char_count, int *eof,
1754 void *data)
1755{
1756 char *outp = page;
1757 int len;
1758 uid_t uid;
1759 struct rb_node *node;
1760 struct sock_tag *sock_tag_entry;
1761 int item_index = 0;
1762 int indent_level = 0;
1763 long f_count;
1764
1765 if (unlikely(module_passive)) {
1766 *eof = 1;
1767 return 0;
1768 }
1769
1770 if (*eof)
1771 return 0;
1772
1773 CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n",
1774 page, items_to_skip, char_count, *eof);
1775
1776 spin_lock_bh(&sock_tag_list_lock);
1777 for (node = rb_first(&sock_tag_tree);
1778 node;
1779 node = rb_next(node)) {
1780 if (item_index++ < items_to_skip)
1781 continue;
1782 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
1783 uid = get_uid_from_tag(sock_tag_entry->tag);
1784 CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
1785 "pid=%u\n",
1786 sock_tag_entry->sk,
1787 sock_tag_entry->tag,
1788 uid,
1789 sock_tag_entry->pid
1790 );
1791 f_count = atomic_long_read(
1792 &sock_tag_entry->socket->file->f_count);
1793 len = snprintf(outp, char_count,
1794 "sock=%p tag=0x%llx (uid=%u) pid=%u "
1795 "f_count=%lu\n",
1796 sock_tag_entry->sk,
1797 sock_tag_entry->tag, uid,
1798 sock_tag_entry->pid, f_count);
1799 if (len >= char_count) {
1800 spin_unlock_bh(&sock_tag_list_lock);
1801 *outp = '\0';
1802 return outp - page;
1803 }
1804 outp += len;
1805 char_count -= len;
1806 (*num_items_returned)++;
1807 }
1808 spin_unlock_bh(&sock_tag_list_lock);
1809
1810 if (item_index++ >= items_to_skip) {
1811 len = snprintf(outp, char_count,
1812 "events: sockets_tagged=%llu "
1813 "sockets_untagged=%llu "
1814 "counter_set_changes=%llu "
1815 "delete_cmds=%llu "
1816 "iface_events=%llu "
1817 "match_calls=%llu "
1818 "match_found_sk=%llu "
1819 "match_found_sk_in_ct=%llu "
1820 "match_found_no_sk_in_ct=%llu "
1821 "match_no_sk=%llu "
1822 "match_no_sk_file=%llu\n",
1823 atomic64_read(&qtu_events.sockets_tagged),
1824 atomic64_read(&qtu_events.sockets_untagged),
1825 atomic64_read(&qtu_events.counter_set_changes),
1826 atomic64_read(&qtu_events.delete_cmds),
1827 atomic64_read(&qtu_events.iface_events),
1828 atomic64_read(&qtu_events.match_calls),
1829 atomic64_read(&qtu_events.match_found_sk),
1830 atomic64_read(&qtu_events.match_found_sk_in_ct),
1831 atomic64_read(
1832 &qtu_events.match_found_no_sk_in_ct),
1833 atomic64_read(&qtu_events.match_no_sk),
1834 atomic64_read(&qtu_events.match_no_sk_file));
1835 if (len >= char_count) {
1836 *outp = '\0';
1837 return outp - page;
1838 }
1839 outp += len;
1840 char_count -= len;
1841 (*num_items_returned)++;
1842 }
1843
1844 /* Count the following as part of the last item_index */
1845 if (item_index > items_to_skip) {
1846 prdebug_full_state(indent_level, "proc ctrl");
1847 }
1848
1849 *eof = 1;
1850 return outp - page;
1851}
1852
1853/*
1854 * Delete socket tags, and stat tags associated with a given
1855 * accouting tag and uid.
1856 */
1857static int ctrl_cmd_delete(const char *input)
1858{
1859 char cmd;
1860 uid_t uid;
1861 uid_t entry_uid;
1862 tag_t acct_tag;
1863 tag_t tag;
1864 int res, argc;
1865 struct iface_stat *iface_entry;
1866 struct rb_node *node;
1867 struct sock_tag *st_entry;
1868 struct rb_root st_to_free_tree = RB_ROOT;
1869 struct tag_stat *ts_entry;
1870 struct tag_counter_set *tcs_entry;
1871 struct tag_ref *tr_entry;
1872 struct uid_tag_data *utd_entry;
1873
1874 argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
1875 CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
1876 "user_tag=0x%llx uid=%u\n", input, argc, cmd,
1877 acct_tag, uid);
1878 if (argc < 2) {
1879 res = -EINVAL;
1880 goto err;
1881 }
1882 if (!valid_atag(acct_tag)) {
1883 pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
1884 res = -EINVAL;
1885 goto err;
1886 }
1887 if (argc < 3) {
1888 uid = current_fsuid();
1889 } else if (!can_impersonate_uid(uid)) {
1890 pr_info("qtaguid: ctrl_delete(%s): "
1891 "insufficient priv from pid=%u tgid=%u uid=%u\n",
1892 input, current->pid, current->tgid, current_fsuid());
1893 res = -EPERM;
1894 goto err;
1895 }
1896
1897 tag = combine_atag_with_uid(acct_tag, uid);
1898 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1899 "looking for tag=0x%llx (uid=%u)\n",
1900 input, tag, uid);
1901
1902 /* Delete socket tags */
1903 spin_lock_bh(&sock_tag_list_lock);
1904 node = rb_first(&sock_tag_tree);
1905 while (node) {
1906 st_entry = rb_entry(node, struct sock_tag, sock_node);
1907 entry_uid = get_uid_from_tag(st_entry->tag);
1908 node = rb_next(node);
1909 if (entry_uid != uid)
1910 continue;
1911
1912 CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
1913 input, st_entry->tag, entry_uid);
1914
1915 if (!acct_tag || st_entry->tag == tag) {
1916 rb_erase(&st_entry->sock_node, &sock_tag_tree);
1917 /* Can't sockfd_put() within spinlock, do it later. */
1918 sock_tag_tree_insert(st_entry, &st_to_free_tree);
1919 tr_entry = lookup_tag_ref(st_entry->tag, NULL);
1920 BUG_ON(tr_entry->num_sock_tags <= 0);
1921 tr_entry->num_sock_tags--;
1922 /*
1923 * TODO: remove if, and start failing.
1924 * This is a hack to work around the fact that in some
1925 * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
1926 * and are trying to work around apps
1927 * that didn't open the /dev/xt_qtaguid.
1928 */
1929 if (st_entry->list.next && st_entry->list.prev)
1930 list_del(&st_entry->list);
1931 }
1932 }
1933 spin_unlock_bh(&sock_tag_list_lock);
1934
1935 sock_tag_tree_erase(&st_to_free_tree);
1936
1937 /* Delete tag counter-sets */
1938 spin_lock_bh(&tag_counter_set_list_lock);
1939 /* Counter sets are only on the uid tag, not full tag */
1940 tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
1941 if (tcs_entry) {
1942 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1943 "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
1944 input,
1945 tcs_entry->tn.tag,
1946 get_uid_from_tag(tcs_entry->tn.tag),
1947 tcs_entry->active_set);
1948 rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
1949 kfree(tcs_entry);
1950 }
1951 spin_unlock_bh(&tag_counter_set_list_lock);
1952
1953 /*
1954 * If acct_tag is 0, then all entries belonging to uid are
1955 * erased.
1956 */
1957 spin_lock_bh(&iface_stat_list_lock);
1958 list_for_each_entry(iface_entry, &iface_stat_list, list) {
1959 spin_lock_bh(&iface_entry->tag_stat_list_lock);
1960 node = rb_first(&iface_entry->tag_stat_tree);
1961 while (node) {
1962 ts_entry = rb_entry(node, struct tag_stat, tn.node);
1963 entry_uid = get_uid_from_tag(ts_entry->tn.tag);
1964 node = rb_next(node);
1965
1966 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1967 "ts tag=0x%llx (uid=%u)\n",
1968 input, ts_entry->tn.tag, entry_uid);
1969
1970 if (entry_uid != uid)
1971 continue;
1972 if (!acct_tag || ts_entry->tn.tag == tag) {
1973 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1974 "erase ts: %s 0x%llx %u\n",
1975 input, iface_entry->ifname,
1976 get_atag_from_tag(ts_entry->tn.tag),
1977 entry_uid);
1978 rb_erase(&ts_entry->tn.node,
1979 &iface_entry->tag_stat_tree);
1980 kfree(ts_entry);
1981 }
1982 }
1983 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
1984 }
1985 spin_unlock_bh(&iface_stat_list_lock);
1986
1987 /* Cleanup the uid_tag_data */
1988 spin_lock_bh(&uid_tag_data_tree_lock);
1989 node = rb_first(&uid_tag_data_tree);
1990 while (node) {
1991 utd_entry = rb_entry(node, struct uid_tag_data, node);
1992 entry_uid = utd_entry->uid;
1993 node = rb_next(node);
1994
1995 CT_DEBUG("qtaguid: ctrl_delete(%s): "
1996 "utd uid=%u\n",
1997 input, entry_uid);
1998
1999 if (entry_uid != uid)
2000 continue;
2001 /*
2002 * Go over the tag_refs, and those that don't have
2003 * sock_tags using them are freed.
2004 */
2005 put_tag_ref_tree(tag, utd_entry);
2006 put_utd_entry(utd_entry);
2007 }
2008 spin_unlock_bh(&uid_tag_data_tree_lock);
2009
2010 atomic64_inc(&qtu_events.delete_cmds);
2011 res = 0;
2012
2013err:
2014 return res;
2015}
2016
2017static int ctrl_cmd_counter_set(const char *input)
2018{
2019 char cmd;
2020 uid_t uid = 0;
2021 tag_t tag;
2022 int res, argc;
2023 struct tag_counter_set *tcs;
2024 int counter_set;
2025
2026 argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
2027 CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
2028 "set=%d uid=%u\n", input, argc, cmd,
2029 counter_set, uid);
2030 if (argc != 3) {
2031 res = -EINVAL;
2032 goto err;
2033 }
2034 if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
2035 pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
2036 input);
2037 res = -EINVAL;
2038 goto err;
2039 }
2040 if (!can_manipulate_uids()) {
2041 pr_info("qtaguid: ctrl_counterset(%s): "
2042 "insufficient priv from pid=%u tgid=%u uid=%u\n",
2043 input, current->pid, current->tgid, current_fsuid());
2044 res = -EPERM;
2045 goto err;
2046 }
2047
2048 tag = make_tag_from_uid(uid);
2049 spin_lock_bh(&tag_counter_set_list_lock);
2050 tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
2051 if (!tcs) {
2052 tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
2053 if (!tcs) {
2054 spin_unlock_bh(&tag_counter_set_list_lock);
2055 pr_err("qtaguid: ctrl_counterset(%s): "
2056 "failed to alloc counter set\n",
2057 input);
2058 res = -ENOMEM;
2059 goto err;
2060 }
2061 tcs->tn.tag = tag;
2062 tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
2063 CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
2064 "(uid=%u) set=%d\n",
2065 input, tag, get_uid_from_tag(tag), counter_set);
2066 }
2067 tcs->active_set = counter_set;
2068 spin_unlock_bh(&tag_counter_set_list_lock);
2069 atomic64_inc(&qtu_events.counter_set_changes);
2070 res = 0;
2071
2072err:
2073 return res;
2074}
2075
2076static int ctrl_cmd_tag(const char *input)
2077{
2078 char cmd;
2079 int sock_fd = 0;
2080 uid_t uid = 0;
2081 tag_t acct_tag = make_atag_from_value(0);
2082 tag_t full_tag;
2083 struct socket *el_socket;
2084 int res, argc;
2085 struct sock_tag *sock_tag_entry;
2086 struct tag_ref *tag_ref_entry;
2087 struct uid_tag_data *uid_tag_data_entry;
2088 struct proc_qtu_data *pqd_entry;
2089
2090 /* Unassigned args will get defaulted later. */
2091 argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
2092 CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
2093 "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
2094 acct_tag, uid);
2095 if (argc < 2) {
2096 res = -EINVAL;
2097 goto err;
2098 }
2099 el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
2100 if (!el_socket) {
2101 pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
2102 " sock_fd=%d err=%d\n", input, sock_fd, res);
2103 goto err;
2104 }
2105 CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
2106 input, atomic_long_read(&el_socket->file->f_count),
2107 el_socket->sk);
2108 if (argc < 3) {
2109 acct_tag = make_atag_from_value(0);
2110 } else if (!valid_atag(acct_tag)) {
2111 pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
2112 res = -EINVAL;
2113 goto err_put;
2114 }
2115 CT_DEBUG("qtaguid: ctrl_tag(%s): "
2116 "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
2117 "in_group=%d in_egroup=%d\n",
2118 input, current->pid, current->tgid, current_uid(),
2119 current_euid(), current_fsuid(),
2120 in_group_p(proc_ctrl_write_gid),
2121 in_egroup_p(proc_ctrl_write_gid));
2122 if (argc < 4) {
2123 uid = current_fsuid();
2124 } else if (!can_impersonate_uid(uid)) {
2125 pr_info("qtaguid: ctrl_tag(%s): "
2126 "insufficient priv from pid=%u tgid=%u uid=%u\n",
2127 input, current->pid, current->tgid, current_fsuid());
2128 res = -EPERM;
2129 goto err_put;
2130 }
2131 full_tag = combine_atag_with_uid(acct_tag, uid);
2132
2133 spin_lock_bh(&sock_tag_list_lock);
2134 sock_tag_entry = get_sock_stat_nl(el_socket->sk);
2135 tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
2136 if (IS_ERR(tag_ref_entry)) {
2137 res = PTR_ERR(tag_ref_entry);
2138 spin_unlock_bh(&sock_tag_list_lock);
2139 goto err_put;
2140 }
2141 tag_ref_entry->num_sock_tags++;
2142 if (sock_tag_entry) {
2143 struct tag_ref *prev_tag_ref_entry;
2144
2145 CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
2146 "st@%p ...->f_count=%ld\n",
2147 input, el_socket->sk, sock_tag_entry,
2148 atomic_long_read(&el_socket->file->f_count));
2149 /*
2150 * This is a re-tagging, so release the sock_fd that was
2151 * locked at the time of the 1st tagging.
2152 * There is still the ref from this call's sockfd_lookup() so
2153 * it can be done within the spinlock.
2154 */
2155 sockfd_put(sock_tag_entry->socket);
2156 prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
2157 &uid_tag_data_entry);
2158 BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
2159 BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
2160 prev_tag_ref_entry->num_sock_tags--;
2161 sock_tag_entry->tag = full_tag;
2162 } else {
2163 CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
2164 input, el_socket->sk);
2165 sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
2166 GFP_ATOMIC);
2167 if (!sock_tag_entry) {
2168 pr_err("qtaguid: ctrl_tag(%s): "
2169 "socket tag alloc failed\n",
2170 input);
2171 spin_unlock_bh(&sock_tag_list_lock);
2172 res = -ENOMEM;
2173 goto err_tag_unref_put;
2174 }
2175 sock_tag_entry->sk = el_socket->sk;
2176 sock_tag_entry->socket = el_socket;
2177 sock_tag_entry->pid = current->tgid;
2178 sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
2179 uid);
2180 spin_lock_bh(&uid_tag_data_tree_lock);
2181 pqd_entry = proc_qtu_data_tree_search(
2182 &proc_qtu_data_tree, current->tgid);
2183 /*
2184 * TODO: remove if, and start failing.
2185 * At first, we want to catch user-space code that is not
2186 * opening the /dev/xt_qtaguid.
2187 */
2188 if (IS_ERR_OR_NULL(pqd_entry))
2189 pr_warn_once(
2190 "qtaguid: %s(): "
2191 "User space forgot to open /dev/xt_qtaguid? "
2192 "pid=%u tgid=%u uid=%u\n", __func__,
2193 current->pid, current->tgid,
2194 current_fsuid());
2195 else
2196 list_add(&sock_tag_entry->list,
2197 &pqd_entry->sock_tag_list);
2198 spin_unlock_bh(&uid_tag_data_tree_lock);
2199
2200 sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
2201 atomic64_inc(&qtu_events.sockets_tagged);
2202 }
2203 spin_unlock_bh(&sock_tag_list_lock);
2204 /* We keep the ref to the socket (file) until it is untagged */
2205 CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
2206 input, sock_tag_entry,
2207 atomic_long_read(&el_socket->file->f_count));
2208 return 0;
2209
2210err_tag_unref_put:
2211 BUG_ON(tag_ref_entry->num_sock_tags <= 0);
2212 tag_ref_entry->num_sock_tags--;
2213 free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
2214err_put:
2215 CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
2216 input, atomic_long_read(&el_socket->file->f_count) - 1);
2217 /* Release the sock_fd that was grabbed by sockfd_lookup(). */
2218 sockfd_put(el_socket);
2219 return res;
2220
2221err:
2222 CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
2223 return res;
2224}
2225
2226static int ctrl_cmd_untag(const char *input)
2227{
2228 char cmd;
2229 int sock_fd = 0;
2230 struct socket *el_socket;
2231 int res, argc;
2232 struct sock_tag *sock_tag_entry;
2233 struct tag_ref *tag_ref_entry;
2234 struct uid_tag_data *utd_entry;
2235 struct proc_qtu_data *pqd_entry;
2236
2237 argc = sscanf(input, "%c %d", &cmd, &sock_fd);
2238 CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
2239 input, argc, cmd, sock_fd);
2240 if (argc < 2) {
2241 res = -EINVAL;
2242 goto err;
2243 }
2244 el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
2245 if (!el_socket) {
2246 pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
2247 " sock_fd=%d err=%d\n", input, sock_fd, res);
2248 goto err;
2249 }
2250 CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
2251 input, atomic_long_read(&el_socket->file->f_count),
2252 el_socket->sk);
2253 spin_lock_bh(&sock_tag_list_lock);
2254 sock_tag_entry = get_sock_stat_nl(el_socket->sk);
2255 if (!sock_tag_entry) {
2256 spin_unlock_bh(&sock_tag_list_lock);
2257 res = -EINVAL;
2258 goto err_put;
2259 }
2260 /*
2261 * The socket already belongs to the current process
2262 * so it can do whatever it wants to it.
2263 */
2264 rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
2265
2266 tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
2267 BUG_ON(!tag_ref_entry);
2268 BUG_ON(tag_ref_entry->num_sock_tags <= 0);
2269 spin_lock_bh(&uid_tag_data_tree_lock);
2270 pqd_entry = proc_qtu_data_tree_search(
2271 &proc_qtu_data_tree, current->tgid);
2272 /*
2273 * TODO: remove if, and start failing.
2274 * At first, we want to catch user-space code that is not
2275 * opening the /dev/xt_qtaguid.
2276 */
2277 if (IS_ERR_OR_NULL(pqd_entry))
2278 pr_warn_once("qtaguid: %s(): "
2279 "User space forgot to open /dev/xt_qtaguid? "
2280 "pid=%u tgid=%u uid=%u\n", __func__,
2281 current->pid, current->tgid, current_fsuid());
2282 else
2283 list_del(&sock_tag_entry->list);
2284 spin_unlock_bh(&uid_tag_data_tree_lock);
2285 /*
2286 * We don't free tag_ref from the utd_entry here,
2287 * only during a cmd_delete().
2288 */
2289 tag_ref_entry->num_sock_tags--;
2290 spin_unlock_bh(&sock_tag_list_lock);
2291 /*
2292 * Release the sock_fd that was grabbed at tag time,
2293 * and once more for the sockfd_lookup() here.
2294 */
2295 sockfd_put(sock_tag_entry->socket);
2296 CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
2297 input, sock_tag_entry,
2298 atomic_long_read(&el_socket->file->f_count) - 1);
2299 sockfd_put(el_socket);
2300
2301 kfree(sock_tag_entry);
2302 atomic64_inc(&qtu_events.sockets_untagged);
2303
2304 return 0;
2305
2306err_put:
2307 CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
2308 input, atomic_long_read(&el_socket->file->f_count) - 1);
2309 /* Release the sock_fd that was grabbed by sockfd_lookup(). */
2310 sockfd_put(el_socket);
2311 return res;
2312
2313err:
2314 CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
2315 return res;
2316}
2317
2318static int qtaguid_ctrl_parse(const char *input, int count)
2319{
2320 char cmd;
2321 int res;
2322
2323 cmd = input[0];
2324 /* Collect params for commands */
2325 switch (cmd) {
2326 case 'd':
2327 res = ctrl_cmd_delete(input);
2328 break;
2329
2330 case 's':
2331 res = ctrl_cmd_counter_set(input);
2332 break;
2333
2334 case 't':
2335 res = ctrl_cmd_tag(input);
2336 break;
2337
2338 case 'u':
2339 res = ctrl_cmd_untag(input);
2340 break;
2341
2342 default:
2343 res = -EINVAL;
2344 goto err;
2345 }
2346 if (!res)
2347 res = count;
2348err:
2349 CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res);
2350 return res;
2351}
2352
2353#define MAX_QTAGUID_CTRL_INPUT_LEN 255
2354static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
2355 unsigned long count, void *data)
2356{
2357 char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
2358
2359 if (unlikely(module_passive))
2360 return count;
2361
2362 if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
2363 return -EINVAL;
2364
2365 if (copy_from_user(input_buf, buffer, count))
2366 return -EFAULT;
2367
2368 input_buf[count] = '\0';
2369 return qtaguid_ctrl_parse(input_buf, count);
2370}
2371
2372struct proc_print_info {
2373 char *outp;
2374 char **num_items_returned;
2375 struct iface_stat *iface_entry;
2376 struct tag_stat *ts_entry;
2377 int item_index;
2378 int items_to_skip;
2379 int char_count;
2380};
2381
2382static int pp_stats_line(struct proc_print_info *ppi, int cnt_set)
2383{
2384 int len;
2385 struct data_counters *cnts;
2386
2387 if (!ppi->item_index) {
2388 if (ppi->item_index++ < ppi->items_to_skip)
2389 return 0;
2390 len = snprintf(ppi->outp, ppi->char_count,
2391 "idx iface acct_tag_hex uid_tag_int cnt_set "
2392 "rx_bytes rx_packets "
2393 "tx_bytes tx_packets "
2394 "rx_tcp_bytes rx_tcp_packets "
2395 "rx_udp_bytes rx_udp_packets "
2396 "rx_other_bytes rx_other_packets "
2397 "tx_tcp_bytes tx_tcp_packets "
2398 "tx_udp_bytes tx_udp_packets "
2399 "tx_other_bytes tx_other_packets\n");
2400 } else {
2401 tag_t tag = ppi->ts_entry->tn.tag;
2402 uid_t stat_uid = get_uid_from_tag(tag);
2403
2404 if (!can_read_other_uid_stats(stat_uid)) {
2405 CT_DEBUG("qtaguid: stats line: "
2406 "%s 0x%llx %u: insufficient priv "
2407 "from pid=%u tgid=%u uid=%u\n",
2408 ppi->iface_entry->ifname,
2409 get_atag_from_tag(tag), stat_uid,
2410 current->pid, current->tgid, current_fsuid());
2411 return 0;
2412 }
2413 if (ppi->item_index++ < ppi->items_to_skip)
2414 return 0;
2415 cnts = &ppi->ts_entry->counters;
2416 len = snprintf(
2417 ppi->outp, ppi->char_count,
2418 "%d %s 0x%llx %u %u "
2419 "%llu %llu "
2420 "%llu %llu "
2421 "%llu %llu "
2422 "%llu %llu "
2423 "%llu %llu "
2424 "%llu %llu "
2425 "%llu %llu "
2426 "%llu %llu\n",
2427 ppi->item_index,
2428 ppi->iface_entry->ifname,
2429 get_atag_from_tag(tag),
2430 stat_uid,
2431 cnt_set,
2432 dc_sum_bytes(cnts, cnt_set, IFS_RX),
2433 dc_sum_packets(cnts, cnt_set, IFS_RX),
2434 dc_sum_bytes(cnts, cnt_set, IFS_TX),
2435 dc_sum_packets(cnts, cnt_set, IFS_TX),
2436 cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
2437 cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
2438 cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
2439 cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
2440 cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
2441 cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
2442 cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
2443 cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
2444 cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
2445 cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
2446 cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
2447 cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
2448 }
2449 return len;
2450}
2451
2452static bool pp_sets(struct proc_print_info *ppi)
2453{
2454 int len;
2455 int counter_set;
2456 for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
2457 counter_set++) {
2458 len = pp_stats_line(ppi, counter_set);
2459 if (len >= ppi->char_count) {
2460 *ppi->outp = '\0';
2461 return false;
2462 }
2463 if (len) {
2464 ppi->outp += len;
2465 ppi->char_count -= len;
2466 (*ppi->num_items_returned)++;
2467 }
2468 }
2469 return true;
2470}
2471
2472/*
2473 * Procfs reader to get all tag stats using style "1)" as described in
2474 * fs/proc/generic.c
2475 * Groups all protocols tx/rx bytes.
2476 */
2477static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
2478 off_t items_to_skip, int char_count, int *eof,
2479 void *data)
2480{
2481 struct proc_print_info ppi;
2482 int len;
2483
2484 ppi.outp = page;
2485 ppi.item_index = 0;
2486 ppi.char_count = char_count;
2487 ppi.num_items_returned = num_items_returned;
2488 ppi.items_to_skip = items_to_skip;
2489
2490 if (unlikely(module_passive)) {
2491 len = pp_stats_line(&ppi, 0);
2492 /* The header should always be shorter than the buffer. */
2493 BUG_ON(len >= ppi.char_count);
2494 (*num_items_returned)++;
2495 *eof = 1;
2496 return len;
2497 }
2498
2499 CT_DEBUG("qtaguid:proc stats page=%p *num_items_returned=%p off=%ld "
2500 "char_count=%d *eof=%d\n", page, *num_items_returned,
2501 items_to_skip, char_count, *eof);
2502
2503 if (*eof)
2504 return 0;
2505
2506 /* The idx is there to help debug when things go belly up. */
2507 len = pp_stats_line(&ppi, 0);
2508 /* Don't advance the outp unless the whole line was printed */
2509 if (len >= ppi.char_count) {
2510 *ppi.outp = '\0';
2511 return ppi.outp - page;
2512 }
2513 if (len) {
2514 ppi.outp += len;
2515 ppi.char_count -= len;
2516 (*num_items_returned)++;
2517 }
2518
2519 spin_lock_bh(&iface_stat_list_lock);
2520 list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) {
2521 struct rb_node *node;
2522 spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock);
2523 for (node = rb_first(&ppi.iface_entry->tag_stat_tree);
2524 node;
2525 node = rb_next(node)) {
2526 ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node);
2527 if (!pp_sets(&ppi)) {
2528 spin_unlock_bh(
2529 &ppi.iface_entry->tag_stat_list_lock);
2530 spin_unlock_bh(&iface_stat_list_lock);
2531 return ppi.outp - page;
2532 }
2533 }
2534 spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock);
2535 }
2536 spin_unlock_bh(&iface_stat_list_lock);
2537
2538 *eof = 1;
2539 return ppi.outp - page;
2540}
2541
2542/*------------------------------------------*/
2543static int qtudev_open(struct inode *inode, struct file *file)
2544{
2545 struct uid_tag_data *utd_entry;
2546 struct proc_qtu_data *pqd_entry;
2547 struct proc_qtu_data *new_pqd_entry;
2548 int res;
2549 bool utd_entry_found;
2550
2551 if (unlikely(qtu_proc_handling_passive))
2552 return 0;
2553
2554 DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
2555 current->pid, current->tgid, current_fsuid());
2556
2557 spin_lock_bh(&uid_tag_data_tree_lock);
2558
2559 /* Look for existing uid data, or alloc one. */
2560 utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
2561 if (IS_ERR_OR_NULL(utd_entry)) {
2562 res = PTR_ERR(utd_entry);
2563 goto err;
2564 }
2565
2566 /* Look for existing PID based proc_data */
2567 pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
2568 current->tgid);
2569 if (pqd_entry) {
2570 pr_err("qtaguid: qtudev_open(): %u/%u %u "
2571 "%s already opened\n",
2572 current->pid, current->tgid, current_fsuid(),
2573 QTU_DEV_NAME);
2574 res = -EBUSY;
2575 goto err_unlock_free_utd;
2576 }
2577
2578 new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
2579 if (!new_pqd_entry) {
2580 pr_err("qtaguid: qtudev_open(): %u/%u %u: "
2581 "proc data alloc failed\n",
2582 current->pid, current->tgid, current_fsuid());
2583 res = -ENOMEM;
2584 goto err_unlock_free_utd;
2585 }
2586 new_pqd_entry->pid = current->tgid;
2587 INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
2588 new_pqd_entry->parent_tag_data = utd_entry;
2589 utd_entry->num_pqd++;
2590
2591 proc_qtu_data_tree_insert(new_pqd_entry,
2592 &proc_qtu_data_tree);
2593
2594 spin_unlock_bh(&uid_tag_data_tree_lock);
2595 DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
2596 current_fsuid(), new_pqd_entry);
2597 file->private_data = new_pqd_entry;
2598 return 0;
2599
2600err_unlock_free_utd:
2601 if (!utd_entry_found) {
2602 rb_erase(&utd_entry->node, &uid_tag_data_tree);
2603 kfree(utd_entry);
2604 }
2605 spin_unlock_bh(&uid_tag_data_tree_lock);
2606err:
2607 return res;
2608}
2609
2610static int qtudev_release(struct inode *inode, struct file *file)
2611{
2612 struct proc_qtu_data *pqd_entry = file->private_data;
2613 struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data;
2614 struct sock_tag *st_entry;
2615 struct rb_root st_to_free_tree = RB_ROOT;
2616 struct list_head *entry, *next;
2617 struct tag_ref *tr;
2618
2619 if (unlikely(qtu_proc_handling_passive))
2620 return 0;
2621
2622 /*
2623 * Do not trust the current->pid, it might just be a kworker cleaning
2624 * up after a dead proc.
2625 */
2626 DR_DEBUG("qtaguid: qtudev_release(): "
2627 "pid=%u tgid=%u uid=%u "
2628 "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
2629 current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
2630 pqd_entry, pqd_entry->pid, utd_entry,
2631 utd_entry->num_active_tags);
2632
2633 spin_lock_bh(&sock_tag_list_lock);
2634 spin_lock_bh(&uid_tag_data_tree_lock);
2635
2636 list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
2637 st_entry = list_entry(entry, struct sock_tag, list);
2638 DR_DEBUG("qtaguid: %s(): "
2639 "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
2640 __func__,
2641 st_entry, st_entry->sk,
2642 current->pid, current->tgid,
2643 pqd_entry->parent_tag_data->uid);
2644
2645 utd_entry = uid_tag_data_tree_search(
2646 &uid_tag_data_tree,
2647 get_uid_from_tag(st_entry->tag));
2648 BUG_ON(IS_ERR_OR_NULL(utd_entry));
2649 DR_DEBUG("qtaguid: %s(): "
2650 "looking for tag=0x%llx in utd_entry=%p\n", __func__,
2651 st_entry->tag, utd_entry);
2652 tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
2653 st_entry->tag);
2654 BUG_ON(!tr);
2655 BUG_ON(tr->num_sock_tags <= 0);
2656 tr->num_sock_tags--;
2657 free_tag_ref_from_utd_entry(tr, utd_entry);
2658
2659 rb_erase(&st_entry->sock_node, &sock_tag_tree);
2660 list_del(&st_entry->list);
2661 /* Can't sockfd_put() within spinlock, do it later. */
2662 sock_tag_tree_insert(st_entry, &st_to_free_tree);
2663
2664 /*
2665 * Try to free the utd_entry if no other proc_qtu_data is
2666 * using it (num_pqd is 0) and it doesn't have active tags
2667 * (num_active_tags is 0).
2668 */
2669 put_utd_entry(utd_entry);
2670 }
2671
2672 rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
2673 BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
2674 pqd_entry->parent_tag_data->num_pqd--;
2675 put_utd_entry(pqd_entry->parent_tag_data);
2676 kfree(pqd_entry);
2677 file->private_data = NULL;
2678
2679 spin_unlock_bh(&uid_tag_data_tree_lock);
2680 spin_unlock_bh(&sock_tag_list_lock);
2681
2682
2683 sock_tag_tree_erase(&st_to_free_tree);
2684
2685 prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
2686 current->pid, current->tgid);
2687 return 0;
2688}
2689
2690/*------------------------------------------*/
2691static const struct file_operations qtudev_fops = {
2692 .owner = THIS_MODULE,
2693 .open = qtudev_open,
2694 .release = qtudev_release,
2695};
2696
2697static struct miscdevice qtu_device = {
2698 .minor = MISC_DYNAMIC_MINOR,
2699 .name = QTU_DEV_NAME,
2700 .fops = &qtudev_fops,
2701 /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
2702};
2703
2704/*------------------------------------------*/
2705static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
2706{
2707 int ret;
2708 *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
2709 if (!*res_procdir) {
2710 pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
2711 ret = -ENOMEM;
2712 goto no_dir;
2713 }
2714
2715 xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms,
2716 *res_procdir);
2717 if (!xt_qtaguid_ctrl_file) {
2718 pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
2719 " file\n");
2720 ret = -ENOMEM;
2721 goto no_ctrl_entry;
2722 }
2723 xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read;
2724 xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write;
2725
2726 xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms,
2727 *res_procdir);
2728 if (!xt_qtaguid_stats_file) {
2729 pr_err("qtaguid: failed to create xt_qtaguid/stats "
2730 "file\n");
2731 ret = -ENOMEM;
2732 goto no_stats_entry;
2733 }
2734 xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read;
2735 /*
2736 * TODO: add support counter hacking
2737 * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
2738 */
2739 return 0;
2740
2741no_stats_entry:
2742 remove_proc_entry("ctrl", *res_procdir);
2743no_ctrl_entry:
2744 remove_proc_entry("xt_qtaguid", NULL);
2745no_dir:
2746 return ret;
2747}
2748
2749static struct xt_match qtaguid_mt_reg __read_mostly = {
2750 /*
2751 * This module masquerades as the "owner" module so that iptables
2752 * tools can deal with it.
2753 */
2754 .name = "owner",
2755 .revision = 1,
2756 .family = NFPROTO_UNSPEC,
2757 .match = qtaguid_mt,
2758 .matchsize = sizeof(struct xt_qtaguid_match_info),
2759 .me = THIS_MODULE,
2760};
2761
2762static int __init qtaguid_mt_init(void)
2763{
2764 if (qtaguid_proc_register(&xt_qtaguid_procdir)
2765 || iface_stat_init(xt_qtaguid_procdir)
2766 || xt_register_match(&qtaguid_mt_reg)
2767 || misc_register(&qtu_device))
2768 return -1;
2769 return 0;
2770}
2771
2772/*
2773 * TODO: allow unloading of the module.
2774 * For now stats are permanent.
2775 * Kconfig forces'y/n' and never an 'm'.
2776 */
2777
2778module_init(qtaguid_mt_init);
2779MODULE_AUTHOR("jpa <jpa@google.com>");
2780MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
2781MODULE_LICENSE("GPL");
2782MODULE_ALIAS("ipt_owner");
2783MODULE_ALIAS("ip6t_owner");
2784MODULE_ALIAS("ipt_qtaguid");
2785MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 00000000000..02479d6d317
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,330 @@
1/*
2 * Kernel iptables module to track stats for packets based on user tags.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __XT_QTAGUID_INTERNAL_H__
11#define __XT_QTAGUID_INTERNAL_H__
12
13#include <linux/types.h>
14#include <linux/rbtree.h>
15#include <linux/spinlock_types.h>
16#include <linux/workqueue.h>
17
18/* Iface handling */
19#define IDEBUG_MASK (1<<0)
20/* Iptable Matching. Per packet. */
21#define MDEBUG_MASK (1<<1)
22/* Red-black tree handling. Per packet. */
23#define RDEBUG_MASK (1<<2)
24/* procfs ctrl/stats handling */
25#define CDEBUG_MASK (1<<3)
26/* dev and resource tracking */
27#define DDEBUG_MASK (1<<4)
28
29/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
30#define DEFAULT_DEBUG_MASK 0
31
32/*
33 * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
34 * All undef: text size ~ 0x3030; all def: ~ 0x4404.
35 */
36#define IDEBUG
37#define MDEBUG
38#define RDEBUG
39#define CDEBUG
40#define DDEBUG
41
42#define MSK_DEBUG(mask, ...) do { \
43 if (unlikely(qtaguid_debug_mask & (mask))) \
44 pr_debug(__VA_ARGS__); \
45 } while (0)
46#ifdef IDEBUG
47#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
48#else
49#define IF_DEBUG(...) no_printk(__VA_ARGS__)
50#endif
51#ifdef MDEBUG
52#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
53#else
54#define MT_DEBUG(...) no_printk(__VA_ARGS__)
55#endif
56#ifdef RDEBUG
57#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
58#else
59#define RB_DEBUG(...) no_printk(__VA_ARGS__)
60#endif
61#ifdef CDEBUG
62#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
63#else
64#define CT_DEBUG(...) no_printk(__VA_ARGS__)
65#endif
66#ifdef DDEBUG
67#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
68#else
69#define DR_DEBUG(...) no_printk(__VA_ARGS__)
70#endif
71
72extern uint qtaguid_debug_mask;
73
74/*---------------------------------------------------------------------------*/
75/*
76 * Tags:
77 *
78 * They represent what the data usage counters will be tracked against.
79 * By default a tag is just based on the UID.
80 * The UID is used as the base for policing, and can not be ignored.
81 * So a tag will always at least represent a UID (uid_tag).
82 *
83 * A tag can be augmented with an "accounting tag" which is associated
84 * with a UID.
85 * User space can set the acct_tag portion of the tag which is then used
86 * with sockets: all data belonging to that socket will be counted against the
87 * tag. The policing is then based on the tag's uid_tag portion,
88 * and stats are collected for the acct_tag portion separately.
89 *
90 * There could be
91 * a: {acct_tag=1, uid_tag=10003}
92 * b: {acct_tag=2, uid_tag=10003}
93 * c: {acct_tag=3, uid_tag=10003}
94 * d: {acct_tag=0, uid_tag=10003}
95 * a, b, and c represent tags associated with specific sockets.
96 * d is for the totals for that uid, including all untagged traffic.
97 * Typically d is used with policing/quota rules.
98 *
99 * We want tag_t big enough to distinguish uid_t and acct_tag.
100 * It might become a struct if needed.
101 * Nothing should be using it as an int.
102 */
103typedef uint64_t tag_t; /* Only used via accessors */
104
105#define TAG_UID_MASK 0xFFFFFFFFULL
106#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
107
108static inline int tag_compare(tag_t t1, tag_t t2)
109{
110 return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
111}
112
113static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
114{
115 return acct_tag | uid;
116}
117static inline tag_t make_tag_from_uid(uid_t uid)
118{
119 return uid;
120}
121static inline uid_t get_uid_from_tag(tag_t tag)
122{
123 return tag & TAG_UID_MASK;
124}
125static inline tag_t get_utag_from_tag(tag_t tag)
126{
127 return tag & TAG_UID_MASK;
128}
129static inline tag_t get_atag_from_tag(tag_t tag)
130{
131 return tag & TAG_ACCT_MASK;
132}
133
134static inline bool valid_atag(tag_t tag)
135{
136 return !(tag & TAG_UID_MASK);
137}
138static inline tag_t make_atag_from_value(uint32_t value)
139{
140 return (uint64_t)value << 32;
141}
142/*---------------------------------------------------------------------------*/
143
144/*
145 * Maximum number of socket tags that a UID is allowed to have active.
146 * Multiple processes belonging to the same UID contribute towards this limit.
147 * Special UIDs that can impersonate a UID also contribute (e.g. download
148 * manager, ...)
149 */
150#define DEFAULT_MAX_SOCK_TAGS 1024
151
152/*
153 * For now we only track 2 sets of counters.
154 * The default set is 0.
155 * Userspace can activate another set for a given uid being tracked.
156 */
157#define IFS_MAX_COUNTER_SETS 2
158
159enum ifs_tx_rx {
160 IFS_TX,
161 IFS_RX,
162 IFS_MAX_DIRECTIONS
163};
164
165/* For now, TCP, UDP, the rest */
166enum ifs_proto {
167 IFS_TCP,
168 IFS_UDP,
169 IFS_PROTO_OTHER,
170 IFS_MAX_PROTOS
171};
172
173struct byte_packet_counters {
174 uint64_t bytes;
175 uint64_t packets;
176};
177
178struct data_counters {
179 struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
180};
181
182/* Generic X based nodes used as a base for rb_tree ops */
183struct tag_node {
184 struct rb_node node;
185 tag_t tag;
186};
187
188struct tag_stat {
189 struct tag_node tn;
190 struct data_counters counters;
191 /*
192 * If this tag is acct_tag based, we need to count against the
193 * matching parent uid_tag.
194 */
195 struct data_counters *parent_counters;
196};
197
198struct iface_stat {
199 struct list_head list; /* in iface_stat_list */
200 char *ifname;
201 bool active;
202 /* net_dev is only valid for active iface_stat */
203 struct net_device *net_dev;
204
205 struct byte_packet_counters totals[IFS_MAX_DIRECTIONS];
206 /*
207 * We keep the last_known, because some devices reset their counters
208 * just before NETDEV_UP, while some will reset just before
209 * NETDEV_REGISTER (which is more normal).
210 * So now, if the device didn't do a NETDEV_UNREGISTER and we see
211 * its current dev stats smaller that what was previously known, we
212 * assume an UNREGISTER and just use the last_known.
213 */
214 struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
215 /* last_known is usable when last_known_valid is true */
216 bool last_known_valid;
217
218 struct proc_dir_entry *proc_ptr;
219
220 struct rb_root tag_stat_tree;
221 spinlock_t tag_stat_list_lock;
222};
223
224/* This is needed to create proc_dir_entries from atomic context. */
225struct iface_stat_work {
226 struct work_struct iface_work;
227 struct iface_stat *iface_entry;
228};
229
230/*
231 * Track tag that this socket is transferring data for, and not necessarily
232 * the uid that owns the socket.
233 * This is the tag against which tag_stat.counters will be billed.
234 * These structs need to be looked up by sock and pid.
235 */
236struct sock_tag {
237 struct rb_node sock_node;
238 struct sock *sk; /* Only used as a number, never dereferenced */
239 /* The socket is needed for sockfd_put() */
240 struct socket *socket;
241 /* Used to associate with a given pid */
242 struct list_head list; /* in proc_qtu_data.sock_tag_list */
243 pid_t pid;
244
245 tag_t tag;
246};
247
248struct qtaguid_event_counts {
249 /* Various successful events */
250 atomic64_t sockets_tagged;
251 atomic64_t sockets_untagged;
252 atomic64_t counter_set_changes;
253 atomic64_t delete_cmds;
254 atomic64_t iface_events; /* Number of NETDEV_* events handled */
255
256 atomic64_t match_calls; /* Number of times iptables called mt */
257 /*
258 * match_found_sk_*: numbers related to the netfilter matching
259 * function finding a sock for the sk_buff.
260 * Total skbs processed is sum(match_found*).
261 */
262 atomic64_t match_found_sk; /* An sk was already in the sk_buff. */
263 /* The connection tracker had or didn't have the sk. */
264 atomic64_t match_found_sk_in_ct;
265 atomic64_t match_found_no_sk_in_ct;
266 /*
267 * No sk could be found. No apparent owner. Could happen with
268 * unsolicited traffic.
269 */
270 atomic64_t match_no_sk;
271 /*
272 * The file ptr in the sk_socket wasn't there.
273 * This might happen for traffic while the socket is being closed.
274 */
275 atomic64_t match_no_sk_file;
276};
277
278/* Track the set active_set for the given tag. */
279struct tag_counter_set {
280 struct tag_node tn;
281 int active_set;
282};
283
284/*----------------------------------------------*/
285/*
286 * The qtu uid data is used to track resources that are created directly or
287 * indirectly by processes (uid tracked).
288 * It is shared by the processes with the same uid.
289 * Some of the resource will be counted to prevent further rogue allocations,
290 * some will need freeing once the owner process (uid) exits.
291 */
292struct uid_tag_data {
293 struct rb_node node;
294 uid_t uid;
295
296 /*
297 * For the uid, how many accounting tags have been set.
298 */
299 int num_active_tags;
300 /* Track the number of proc_qtu_data that reference it */
301 int num_pqd;
302 struct rb_root tag_ref_tree;
303 /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
304};
305
306struct tag_ref {
307 struct tag_node tn;
308
309 /*
310 * This tracks the number of active sockets that have a tag on them
311 * which matches this tag_ref.tn.tag.
312 * A tag ref can live on after the sockets are untagged.
313 * A tag ref can only be removed during a tag delete command.
314 */
315 int num_sock_tags;
316};
317
318struct proc_qtu_data {
319 struct rb_node node;
320 pid_t pid;
321
322 struct uid_tag_data *parent_tag_data;
323
324 /* Tracks the sock_tags that need freeing upon this proc's death */
325 struct list_head sock_tag_list;
326 /* No spinlock_t sock_tag_list_lock; use the global one. */
327};
328
329/*----------------------------------------------*/
330#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 00000000000..39176785c91
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,556 @@
1/*
2 * Pretty printing Support for iptables xt_qtaguid module.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * Most of the functions in this file just waste time if DEBUG is not defined.
13 * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
14 * debug flags ore not defined.
15 * Those funcs that fail to allocate memory will panic as there is no need to
16 * hobble allong just pretending to do the requested work.
17 */
18
19#define DEBUG
20
21#include <linux/fs.h>
22#include <linux/gfp.h>
23#include <linux/net.h>
24#include <linux/rbtree.h>
25#include <linux/slab.h>
26#include <linux/spinlock_types.h>
27
28
29#include "xt_qtaguid_internal.h"
30#include "xt_qtaguid_print.h"
31
32#ifdef DDEBUG
33
34static void _bug_on_err_or_null(void *ptr)
35{
36 if (IS_ERR_OR_NULL(ptr)) {
37 pr_err("qtaguid: kmalloc failed\n");
38 BUG();
39 }
40}
41
42char *pp_tag_t(tag_t *tag)
43{
44 char *res;
45
46 if (!tag)
47 res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
48 else
49 res = kasprintf(GFP_ATOMIC,
50 "tag_t@%p{tag=0x%llx, uid=%u}",
51 tag, *tag, get_uid_from_tag(*tag));
52 _bug_on_err_or_null(res);
53 return res;
54}
55
56char *pp_data_counters(struct data_counters *dc, bool showValues)
57{
58 char *res;
59
60 if (!dc)
61 res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
62 else if (showValues)
63 res = kasprintf(
64 GFP_ATOMIC, "data_counters@%p{"
65 "set0{"
66 "rx{"
67 "tcp{b=%llu, p=%llu}, "
68 "udp{b=%llu, p=%llu},"
69 "other{b=%llu, p=%llu}}, "
70 "tx{"
71 "tcp{b=%llu, p=%llu}, "
72 "udp{b=%llu, p=%llu},"
73 "other{b=%llu, p=%llu}}}, "
74 "set1{"
75 "rx{"
76 "tcp{b=%llu, p=%llu}, "
77 "udp{b=%llu, p=%llu},"
78 "other{b=%llu, p=%llu}}, "
79 "tx{"
80 "tcp{b=%llu, p=%llu}, "
81 "udp{b=%llu, p=%llu},"
82 "other{b=%llu, p=%llu}}}}",
83 dc,
84 dc->bpc[0][IFS_RX][IFS_TCP].bytes,
85 dc->bpc[0][IFS_RX][IFS_TCP].packets,
86 dc->bpc[0][IFS_RX][IFS_UDP].bytes,
87 dc->bpc[0][IFS_RX][IFS_UDP].packets,
88 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
89 dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
90 dc->bpc[0][IFS_TX][IFS_TCP].bytes,
91 dc->bpc[0][IFS_TX][IFS_TCP].packets,
92 dc->bpc[0][IFS_TX][IFS_UDP].bytes,
93 dc->bpc[0][IFS_TX][IFS_UDP].packets,
94 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
95 dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
96 dc->bpc[1][IFS_RX][IFS_TCP].bytes,
97 dc->bpc[1][IFS_RX][IFS_TCP].packets,
98 dc->bpc[1][IFS_RX][IFS_UDP].bytes,
99 dc->bpc[1][IFS_RX][IFS_UDP].packets,
100 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
101 dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
102 dc->bpc[1][IFS_TX][IFS_TCP].bytes,
103 dc->bpc[1][IFS_TX][IFS_TCP].packets,
104 dc->bpc[1][IFS_TX][IFS_UDP].bytes,
105 dc->bpc[1][IFS_TX][IFS_UDP].packets,
106 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
107 dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
108 else
109 res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
110 _bug_on_err_or_null(res);
111 return res;
112}
113
114char *pp_tag_node(struct tag_node *tn)
115{
116 char *tag_str;
117 char *res;
118
119 if (!tn) {
120 res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
121 _bug_on_err_or_null(res);
122 return res;
123 }
124 tag_str = pp_tag_t(&tn->tag);
125 res = kasprintf(GFP_ATOMIC,
126 "tag_node@%p{tag=%s}",
127 tn, tag_str);
128 _bug_on_err_or_null(res);
129 kfree(tag_str);
130 return res;
131}
132
133char *pp_tag_ref(struct tag_ref *tr)
134{
135 char *tn_str;
136 char *res;
137
138 if (!tr) {
139 res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
140 _bug_on_err_or_null(res);
141 return res;
142 }
143 tn_str = pp_tag_node(&tr->tn);
144 res = kasprintf(GFP_ATOMIC,
145 "tag_ref@%p{%s, num_sock_tags=%d}",
146 tr, tn_str, tr->num_sock_tags);
147 _bug_on_err_or_null(res);
148 kfree(tn_str);
149 return res;
150}
151
152char *pp_tag_stat(struct tag_stat *ts)
153{
154 char *tn_str;
155 char *counters_str;
156 char *parent_counters_str;
157 char *res;
158
159 if (!ts) {
160 res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
161 _bug_on_err_or_null(res);
162 return res;
163 }
164 tn_str = pp_tag_node(&ts->tn);
165 counters_str = pp_data_counters(&ts->counters, true);
166 parent_counters_str = pp_data_counters(ts->parent_counters, false);
167 res = kasprintf(GFP_ATOMIC,
168 "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
169 ts, tn_str, counters_str, parent_counters_str);
170 _bug_on_err_or_null(res);
171 kfree(tn_str);
172 kfree(counters_str);
173 kfree(parent_counters_str);
174 return res;
175}
176
177char *pp_iface_stat(struct iface_stat *is)
178{
179 char *res;
180 if (!is)
181 res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
182 else
183 res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
184 "list=list_head{...}, "
185 "ifname=%s, "
186 "total={rx={bytes=%llu, "
187 "packets=%llu}, "
188 "tx={bytes=%llu, "
189 "packets=%llu}}, "
190 "last_known_valid=%d, "
191 "last_known={rx={bytes=%llu, "
192 "packets=%llu}, "
193 "tx={bytes=%llu, "
194 "packets=%llu}}, "
195 "active=%d, "
196 "net_dev=%p, "
197 "proc_ptr=%p, "
198 "tag_stat_tree=rb_root{...}}",
199 is,
200 is->ifname,
201 is->totals[IFS_RX].bytes,
202 is->totals[IFS_RX].packets,
203 is->totals[IFS_TX].bytes,
204 is->totals[IFS_TX].packets,
205 is->last_known_valid,
206 is->last_known[IFS_RX].bytes,
207 is->last_known[IFS_RX].packets,
208 is->last_known[IFS_TX].bytes,
209 is->last_known[IFS_TX].packets,
210 is->active,
211 is->net_dev,
212 is->proc_ptr);
213 _bug_on_err_or_null(res);
214 return res;
215}
216
217char *pp_sock_tag(struct sock_tag *st)
218{
219 char *tag_str;
220 char *res;
221
222 if (!st) {
223 res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
224 _bug_on_err_or_null(res);
225 return res;
226 }
227 tag_str = pp_tag_t(&st->tag);
228 res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
229 "sock_node=rb_node{...}, "
230 "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
231 "pid=%u, tag=%s}",
232 st, st->sk, st->socket, atomic_long_read(
233 &st->socket->file->f_count),
234 st->pid, tag_str);
235 _bug_on_err_or_null(res);
236 kfree(tag_str);
237 return res;
238}
239
240char *pp_uid_tag_data(struct uid_tag_data *utd)
241{
242 char *res;
243
244 if (!utd)
245 res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
246 else
247 res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
248 "uid=%u, num_active_acct_tags=%d, "
249 "num_pqd=%d, "
250 "tag_node_tree=rb_root{...}, "
251 "proc_qtu_data_tree=rb_root{...}}",
252 utd, utd->uid,
253 utd->num_active_tags, utd->num_pqd);
254 _bug_on_err_or_null(res);
255 return res;
256}
257
258char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
259{
260 char *parent_tag_data_str;
261 char *res;
262
263 if (!pqd) {
264 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
265 _bug_on_err_or_null(res);
266 return res;
267 }
268 parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
269 res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
270 "node=rb_node{...}, pid=%u, "
271 "parent_tag_data=%s, "
272 "sock_tag_list=list_head{...}}",
273 pqd, pqd->pid, parent_tag_data_str
274 );
275 _bug_on_err_or_null(res);
276 kfree(parent_tag_data_str);
277 return res;
278}
279
280/*------------------------------------------*/
281void prdebug_sock_tag_tree(int indent_level,
282 struct rb_root *sock_tag_tree)
283{
284 struct rb_node *node;
285 struct sock_tag *sock_tag_entry;
286 char *str;
287
288 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
289 return;
290
291 if (RB_EMPTY_ROOT(sock_tag_tree)) {
292 str = "sock_tag_tree=rb_root{}";
293 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
294 return;
295 }
296
297 str = "sock_tag_tree=rb_root{";
298 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
299 indent_level++;
300 for (node = rb_first(sock_tag_tree);
301 node;
302 node = rb_next(node)) {
303 sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
304 str = pp_sock_tag(sock_tag_entry);
305 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
306 kfree(str);
307 }
308 indent_level--;
309 str = "}";
310 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
311}
312
313void prdebug_sock_tag_list(int indent_level,
314 struct list_head *sock_tag_list)
315{
316 struct sock_tag *sock_tag_entry;
317 char *str;
318
319 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
320 return;
321
322 if (list_empty(sock_tag_list)) {
323 str = "sock_tag_list=list_head{}";
324 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
325 return;
326 }
327
328 str = "sock_tag_list=list_head{";
329 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
330 indent_level++;
331 list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
332 str = pp_sock_tag(sock_tag_entry);
333 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
334 kfree(str);
335 }
336 indent_level--;
337 str = "}";
338 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
339}
340
341void prdebug_proc_qtu_data_tree(int indent_level,
342 struct rb_root *proc_qtu_data_tree)
343{
344 char *str;
345 struct rb_node *node;
346 struct proc_qtu_data *proc_qtu_data_entry;
347
348 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
349 return;
350
351 if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
352 str = "proc_qtu_data_tree=rb_root{}";
353 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
354 return;
355 }
356
357 str = "proc_qtu_data_tree=rb_root{";
358 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
359 indent_level++;
360 for (node = rb_first(proc_qtu_data_tree);
361 node;
362 node = rb_next(node)) {
363 proc_qtu_data_entry = rb_entry(node,
364 struct proc_qtu_data,
365 node);
366 str = pp_proc_qtu_data(proc_qtu_data_entry);
367 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
368 str);
369 kfree(str);
370 indent_level++;
371 prdebug_sock_tag_list(indent_level,
372 &proc_qtu_data_entry->sock_tag_list);
373 indent_level--;
374
375 }
376 indent_level--;
377 str = "}";
378 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
379}
380
381void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
382{
383 char *str;
384 struct rb_node *node;
385 struct tag_ref *tag_ref_entry;
386
387 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
388 return;
389
390 if (RB_EMPTY_ROOT(tag_ref_tree)) {
391 str = "tag_ref_tree{}";
392 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
393 return;
394 }
395
396 str = "tag_ref_tree{";
397 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
398 indent_level++;
399 for (node = rb_first(tag_ref_tree);
400 node;
401 node = rb_next(node)) {
402 tag_ref_entry = rb_entry(node,
403 struct tag_ref,
404 tn.node);
405 str = pp_tag_ref(tag_ref_entry);
406 pr_debug("%*d: %s,\n", indent_level*2, indent_level,
407 str);
408 kfree(str);
409 }
410 indent_level--;
411 str = "}";
412 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
413}
414
415void prdebug_uid_tag_data_tree(int indent_level,
416 struct rb_root *uid_tag_data_tree)
417{
418 char *str;
419 struct rb_node *node;
420 struct uid_tag_data *uid_tag_data_entry;
421
422 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
423 return;
424
425 if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
426 str = "uid_tag_data_tree=rb_root{}";
427 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
428 return;
429 }
430
431 str = "uid_tag_data_tree=rb_root{";
432 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
433 indent_level++;
434 for (node = rb_first(uid_tag_data_tree);
435 node;
436 node = rb_next(node)) {
437 uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
438 node);
439 str = pp_uid_tag_data(uid_tag_data_entry);
440 pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
441 kfree(str);
442 if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
443 indent_level++;
444 prdebug_tag_ref_tree(indent_level,
445 &uid_tag_data_entry->tag_ref_tree);
446 indent_level--;
447 }
448 }
449 indent_level--;
450 str = "}";
451 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
452}
453
454void prdebug_tag_stat_tree(int indent_level,
455 struct rb_root *tag_stat_tree)
456{
457 char *str;
458 struct rb_node *node;
459 struct tag_stat *ts_entry;
460
461 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
462 return;
463
464 if (RB_EMPTY_ROOT(tag_stat_tree)) {
465 str = "tag_stat_tree{}";
466 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
467 return;
468 }
469
470 str = "tag_stat_tree{";
471 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
472 indent_level++;
473 for (node = rb_first(tag_stat_tree);
474 node;
475 node = rb_next(node)) {
476 ts_entry = rb_entry(node, struct tag_stat, tn.node);
477 str = pp_tag_stat(ts_entry);
478 pr_debug("%*d: %s\n", indent_level*2, indent_level,
479 str);
480 kfree(str);
481 }
482 indent_level--;
483 str = "}";
484 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
485}
486
487void prdebug_iface_stat_list(int indent_level,
488 struct list_head *iface_stat_list)
489{
490 char *str;
491 struct iface_stat *iface_entry;
492
493 if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
494 return;
495
496 if (list_empty(iface_stat_list)) {
497 str = "iface_stat_list=list_head{}";
498 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
499 return;
500 }
501
502 str = "iface_stat_list=list_head{";
503 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
504 indent_level++;
505 list_for_each_entry(iface_entry, iface_stat_list, list) {
506 str = pp_iface_stat(iface_entry);
507 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
508 kfree(str);
509
510 spin_lock_bh(&iface_entry->tag_stat_list_lock);
511 if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
512 indent_level++;
513 prdebug_tag_stat_tree(indent_level,
514 &iface_entry->tag_stat_tree);
515 indent_level--;
516 }
517 spin_unlock_bh(&iface_entry->tag_stat_list_lock);
518 }
519 indent_level--;
520 str = "}";
521 pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
522}
523
524#endif /* ifdef DDEBUG */
525/*------------------------------------------*/
526static const char * const netdev_event_strings[] = {
527 "netdev_unknown",
528 "NETDEV_UP",
529 "NETDEV_DOWN",
530 "NETDEV_REBOOT",
531 "NETDEV_CHANGE",
532 "NETDEV_REGISTER",
533 "NETDEV_UNREGISTER",
534 "NETDEV_CHANGEMTU",
535 "NETDEV_CHANGEADDR",
536 "NETDEV_GOING_DOWN",
537 "NETDEV_CHANGENAME",
538 "NETDEV_FEAT_CHANGE",
539 "NETDEV_BONDING_FAILOVER",
540 "NETDEV_PRE_UP",
541 "NETDEV_PRE_TYPE_CHANGE",
542 "NETDEV_POST_TYPE_CHANGE",
543 "NETDEV_POST_INIT",
544 "NETDEV_UNREGISTER_BATCH",
545 "NETDEV_RELEASE",
546 "NETDEV_NOTIFY_PEERS",
547 "NETDEV_JOIN",
548};
549
550const char *netdev_evt_str(int netdev_event)
551{
552 if (netdev_event < 0
553 || netdev_event >= ARRAY_SIZE(netdev_event_strings))
554 return "bad event num";
555 return netdev_event_strings[netdev_event];
556}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 00000000000..b63871a0be5
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
1/*
2 * Pretty printing Support for iptables xt_qtaguid module.
3 *
4 * (C) 2011 Google, Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __XT_QTAGUID_PRINT_H__
11#define __XT_QTAGUID_PRINT_H__
12
13#include "xt_qtaguid_internal.h"
14
15#ifdef DDEBUG
16
17char *pp_tag_t(tag_t *tag);
18char *pp_data_counters(struct data_counters *dc, bool showValues);
19char *pp_tag_node(struct tag_node *tn);
20char *pp_tag_ref(struct tag_ref *tr);
21char *pp_tag_stat(struct tag_stat *ts);
22char *pp_iface_stat(struct iface_stat *is);
23char *pp_sock_tag(struct sock_tag *st);
24char *pp_uid_tag_data(struct uid_tag_data *qtd);
25char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
26
27/*------------------------------------------*/
28void prdebug_sock_tag_list(int indent_level,
29 struct list_head *sock_tag_list);
30void prdebug_sock_tag_tree(int indent_level,
31 struct rb_root *sock_tag_tree);
32void prdebug_proc_qtu_data_tree(int indent_level,
33 struct rb_root *proc_qtu_data_tree);
34void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
35void prdebug_uid_tag_data_tree(int indent_level,
36 struct rb_root *uid_tag_data_tree);
37void prdebug_tag_stat_tree(int indent_level,
38 struct rb_root *tag_stat_tree);
39void prdebug_iface_stat_list(int indent_level,
40 struct list_head *iface_stat_list);
41
42#else
43
44/*------------------------------------------*/
45static inline char *pp_tag_t(tag_t *tag)
46{
47 return NULL;
48}
49static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
50{
51 return NULL;
52}
53static inline char *pp_tag_node(struct tag_node *tn)
54{
55 return NULL;
56}
57static inline char *pp_tag_ref(struct tag_ref *tr)
58{
59 return NULL;
60}
61static inline char *pp_tag_stat(struct tag_stat *ts)
62{
63 return NULL;
64}
65static inline char *pp_iface_stat(struct iface_stat *is)
66{
67 return NULL;
68}
69static inline char *pp_sock_tag(struct sock_tag *st)
70{
71 return NULL;
72}
73static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
74{
75 return NULL;
76}
77static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
78{
79 return NULL;
80}
81
82/*------------------------------------------*/
83static inline
84void prdebug_sock_tag_list(int indent_level,
85 struct list_head *sock_tag_list)
86{
87}
88static inline
89void prdebug_sock_tag_tree(int indent_level,
90 struct rb_root *sock_tag_tree)
91{
92}
93static inline
94void prdebug_proc_qtu_data_tree(int indent_level,
95 struct rb_root *proc_qtu_data_tree)
96{
97}
98static inline
99void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
100{
101}
102static inline
103void prdebug_uid_tag_data_tree(int indent_level,
104 struct rb_root *uid_tag_data_tree)
105{
106}
107static inline
108void prdebug_tag_stat_tree(int indent_level,
109 struct rb_root *tag_stat_tree)
110{
111}
112static inline
113void prdebug_iface_stat_list(int indent_level,
114 struct list_head *iface_stat_list)
115{
116}
117#endif
118/*------------------------------------------*/
119const char *netdev_evt_str(int netdev_event);
120#endif /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 00000000000..3c72bea2dd6
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,381 @@
1/*
2 * xt_quota2 - enhanced xt_quota that can count upwards and in packets
3 * as a minimal accounting match.
4 * by Jan Engelhardt <jengelh@medozas.de>, 2008
5 *
6 * Originally based on xt_quota.c:
7 * netfilter module to enforce network quotas
8 * Sam Johnston <samj@samj.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License; either
12 * version 2 of the License, as published by the Free Software Foundation.
13 */
14#include <linux/list.h>
15#include <linux/proc_fs.h>
16#include <linux/skbuff.h>
17#include <linux/spinlock.h>
18#include <asm/atomic.h>
19
20#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter/xt_quota2.h>
22#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
23#include <linux/netfilter_ipv4/ipt_ULOG.h>
24#endif
25
26/**
27 * @lock: lock to protect quota writers from each other
28 */
29struct xt_quota_counter {
30 u_int64_t quota;
31 spinlock_t lock;
32 struct list_head list;
33 atomic_t ref;
34 char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
35 struct proc_dir_entry *procfs_entry;
36};
37
38#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
39/* Harald's favorite number +1 :D From ipt_ULOG.C */
40static int qlog_nl_event = 112;
41module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(event_num,
43 "Event number for NETLINK_NFLOG message. 0 disables log."
44 "111 is what ipt_ULOG uses.");
45static struct sock *nflognl;
46#endif
47
48static LIST_HEAD(counter_list);
49static DEFINE_SPINLOCK(counter_list_lock);
50
51static struct proc_dir_entry *proc_xt_quota;
52static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
53static unsigned int quota_list_uid = 0;
54static unsigned int quota_list_gid = 0;
55module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
56module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
57module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
58
59
60#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
61static void quota2_log(unsigned int hooknum,
62 const struct sk_buff *skb,
63 const struct net_device *in,
64 const struct net_device *out,
65 const char *prefix)
66{
67 ulog_packet_msg_t *pm;
68 struct sk_buff *log_skb;
69 size_t size;
70 struct nlmsghdr *nlh;
71
72 if (!qlog_nl_event)
73 return;
74
75 size = NLMSG_SPACE(sizeof(*pm));
76 size = max(size, (size_t)NLMSG_GOODSIZE);
77 log_skb = alloc_skb(size, GFP_ATOMIC);
78 if (!log_skb) {
79 pr_err("xt_quota2: cannot alloc skb for logging\n");
80 return;
81 }
82
83 /* NLMSG_PUT() uses "goto nlmsg_failure" */
84 nlh = NLMSG_PUT(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
85 sizeof(*pm));
86 pm = NLMSG_DATA(nlh);
87 if (skb->tstamp.tv64 == 0)
88 __net_timestamp((struct sk_buff *)skb);
89 pm->data_len = 0;
90 pm->hook = hooknum;
91 if (prefix != NULL)
92 strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
93 else
94 *(pm->prefix) = '\0';
95 if (in)
96 strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
97 else
98 pm->indev_name[0] = '\0';
99
100 if (out)
101 strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
102 else
103 pm->outdev_name[0] = '\0';
104
105 NETLINK_CB(log_skb).dst_group = 1;
106 pr_debug("throwing 1 packets to netlink group 1\n");
107 netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
108
109nlmsg_failure: /* Used within NLMSG_PUT() */
110 pr_debug("xt_quota2: error during NLMSG_PUT\n");
111}
112#else
113static void quota2_log(unsigned int hooknum,
114 const struct sk_buff *skb,
115 const struct net_device *in,
116 const struct net_device *out,
117 const char *prefix)
118{
119}
120#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
121
122static int quota_proc_read(char *page, char **start, off_t offset,
123 int count, int *eof, void *data)
124{
125 struct xt_quota_counter *e = data;
126 int ret;
127
128 spin_lock_bh(&e->lock);
129 ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota);
130 spin_unlock_bh(&e->lock);
131 return ret;
132}
133
134static int quota_proc_write(struct file *file, const char __user *input,
135 unsigned long size, void *data)
136{
137 struct xt_quota_counter *e = data;
138 char buf[sizeof("18446744073709551616")];
139
140 if (size > sizeof(buf))
141 size = sizeof(buf);
142 if (copy_from_user(buf, input, size) != 0)
143 return -EFAULT;
144 buf[sizeof(buf)-1] = '\0';
145
146 spin_lock_bh(&e->lock);
147 e->quota = simple_strtoull(buf, NULL, 0);
148 spin_unlock_bh(&e->lock);
149 return size;
150}
151
152static struct xt_quota_counter *
153q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
154{
155 struct xt_quota_counter *e;
156 unsigned int size;
157
158 /* Do not need all the procfs things for anonymous counters. */
159 size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
160 e = kmalloc(size, GFP_KERNEL);
161 if (e == NULL)
162 return NULL;
163
164 e->quota = q->quota;
165 spin_lock_init(&e->lock);
166 if (!anon) {
167 INIT_LIST_HEAD(&e->list);
168 atomic_set(&e->ref, 1);
169 strlcpy(e->name, q->name, sizeof(e->name));
170 }
171 return e;
172}
173
174/**
175 * q2_get_counter - get ref to counter or create new
176 * @name: name of counter
177 */
178static struct xt_quota_counter *
179q2_get_counter(const struct xt_quota_mtinfo2 *q)
180{
181 struct proc_dir_entry *p;
182 struct xt_quota_counter *e = NULL;
183 struct xt_quota_counter *new_e;
184
185 if (*q->name == '\0')
186 return q2_new_counter(q, true);
187
188 /* No need to hold a lock while getting a new counter */
189 new_e = q2_new_counter(q, false);
190 if (new_e == NULL)
191 goto out;
192
193 spin_lock_bh(&counter_list_lock);
194 list_for_each_entry(e, &counter_list, list)
195 if (strcmp(e->name, q->name) == 0) {
196 atomic_inc(&e->ref);
197 spin_unlock_bh(&counter_list_lock);
198 kfree(new_e);
199 pr_debug("xt_quota2: old counter name=%s", e->name);
200 return e;
201 }
202 e = new_e;
203 pr_debug("xt_quota2: new_counter name=%s", e->name);
204 list_add_tail(&e->list, &counter_list);
205 /* The entry having a refcount of 1 is not directly destructible.
206 * This func has not yet returned the new entry, thus iptables
207 * has not references for destroying this entry.
208 * For another rule to try to destroy it, it would 1st need for this
209 * func* to be re-invoked, acquire a new ref for the same named quota.
210 * Nobody will access the e->procfs_entry either.
211 * So release the lock. */
212 spin_unlock_bh(&counter_list_lock);
213
214 /* create_proc_entry() is not spin_lock happy */
215 p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms,
216 proc_xt_quota);
217
218 if (IS_ERR_OR_NULL(p)) {
219 spin_lock_bh(&counter_list_lock);
220 list_del(&e->list);
221 spin_unlock_bh(&counter_list_lock);
222 goto out;
223 }
224 p->data = e;
225 p->read_proc = quota_proc_read;
226 p->write_proc = quota_proc_write;
227 p->uid = quota_list_uid;
228 p->gid = quota_list_gid;
229 return e;
230
231 out:
232 kfree(e);
233 return NULL;
234}
235
236static int quota_mt2_check(const struct xt_mtchk_param *par)
237{
238 struct xt_quota_mtinfo2 *q = par->matchinfo;
239
240 pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
241
242 if (q->flags & ~XT_QUOTA_MASK)
243 return -EINVAL;
244
245 q->name[sizeof(q->name)-1] = '\0';
246 if (*q->name == '.' || strchr(q->name, '/') != NULL) {
247 printk(KERN_ERR "xt_quota.3: illegal name\n");
248 return -EINVAL;
249 }
250
251 q->master = q2_get_counter(q);
252 if (q->master == NULL) {
253 printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
254 return -ENOMEM;
255 }
256
257 return 0;
258}
259
260static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
261{
262 struct xt_quota_mtinfo2 *q = par->matchinfo;
263 struct xt_quota_counter *e = q->master;
264
265 if (*q->name == '\0') {
266 kfree(e);
267 return;
268 }
269
270 spin_lock_bh(&counter_list_lock);
271 if (!atomic_dec_and_test(&e->ref)) {
272 spin_unlock_bh(&counter_list_lock);
273 return;
274 }
275
276 list_del(&e->list);
277 remove_proc_entry(e->name, proc_xt_quota);
278 spin_unlock_bh(&counter_list_lock);
279 kfree(e);
280}
281
282static bool
283quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
284{
285 struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
286 struct xt_quota_counter *e = q->master;
287 bool ret = q->flags & XT_QUOTA_INVERT;
288
289 spin_lock_bh(&e->lock);
290 if (q->flags & XT_QUOTA_GROW) {
291 /*
292 * While no_change is pointless in "grow" mode, we will
293 * implement it here simply to have a consistent behavior.
294 */
295 if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
296 e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
297 }
298 ret = true;
299 } else {
300 if (e->quota >= skb->len) {
301 if (!(q->flags & XT_QUOTA_NO_CHANGE))
302 e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
303 ret = !ret;
304 } else {
305 /* We are transitioning, log that fact. */
306 if (e->quota) {
307 quota2_log(par->hooknum,
308 skb,
309 par->in,
310 par->out,
311 q->name);
312 }
313 /* we do not allow even small packets from now on */
314 e->quota = 0;
315 }
316 }
317 spin_unlock_bh(&e->lock);
318 return ret;
319}
320
321static struct xt_match quota_mt2_reg[] __read_mostly = {
322 {
323 .name = "quota2",
324 .revision = 3,
325 .family = NFPROTO_IPV4,
326 .checkentry = quota_mt2_check,
327 .match = quota_mt2,
328 .destroy = quota_mt2_destroy,
329 .matchsize = sizeof(struct xt_quota_mtinfo2),
330 .me = THIS_MODULE,
331 },
332 {
333 .name = "quota2",
334 .revision = 3,
335 .family = NFPROTO_IPV6,
336 .checkentry = quota_mt2_check,
337 .match = quota_mt2,
338 .destroy = quota_mt2_destroy,
339 .matchsize = sizeof(struct xt_quota_mtinfo2),
340 .me = THIS_MODULE,
341 },
342};
343
344static int __init quota_mt2_init(void)
345{
346 int ret;
347 pr_debug("xt_quota2: init()");
348
349#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
350 nflognl = netlink_kernel_create(&init_net,
351 NETLINK_NFLOG, 1, NULL,
352 NULL, THIS_MODULE);
353 if (!nflognl)
354 return -ENOMEM;
355#endif
356
357 proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
358 if (proc_xt_quota == NULL)
359 return -EACCES;
360
361 ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
362 if (ret < 0)
363 remove_proc_entry("xt_quota", init_net.proc_net);
364 pr_debug("xt_quota2: init() %d", ret);
365 return ret;
366}
367
368static void __exit quota_mt2_exit(void)
369{
370 xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
371 remove_proc_entry("xt_quota", init_net.proc_net);
372}
373
374module_init(quota_mt2_init);
375module_exit(quota_mt2_exit);
376MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
377MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
378MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
379MODULE_LICENSE("GPL");
380MODULE_ALIAS("ipt_quota2");
381MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index fe39f7e913d..ddf5e0507f5 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,7 +35,7 @@
35#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack.h>
36#endif 36#endif
37 37
38static void 38void
39xt_socket_put_sk(struct sock *sk) 39xt_socket_put_sk(struct sock *sk)
40{ 40{
41 if (sk->sk_state == TCP_TIME_WAIT) 41 if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk)
43 else 43 else
44 sock_put(sk); 44 sock_put(sk);
45} 45}
46EXPORT_SYMBOL(xt_socket_put_sk);
46 47
47static int 48static int
48extract_icmp4_fields(const struct sk_buff *skb, 49extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb,
101 return 0; 102 return 0;
102} 103}
103 104
104static bool 105struct sock*
105socket_match(const struct sk_buff *skb, struct xt_action_param *par, 106xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
106 const struct xt_socket_mtinfo1 *info)
107{ 107{
108 const struct iphdr *iph = ip_hdr(skb); 108 const struct iphdr *iph = ip_hdr(skb);
109 struct udphdr _hdr, *hp = NULL; 109 struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
120 hp = skb_header_pointer(skb, ip_hdrlen(skb), 120 hp = skb_header_pointer(skb, ip_hdrlen(skb),
121 sizeof(_hdr), &_hdr); 121 sizeof(_hdr), &_hdr);
122 if (hp == NULL) 122 if (hp == NULL)
123 return false; 123 return NULL;
124 124
125 protocol = iph->protocol; 125 protocol = iph->protocol;
126 saddr = iph->saddr; 126 saddr = iph->saddr;
@@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
131 } else if (iph->protocol == IPPROTO_ICMP) { 131 } else if (iph->protocol == IPPROTO_ICMP) {
132 if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr, 132 if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
133 &sport, &dport)) 133 &sport, &dport))
134 return false; 134 return NULL;
135 } else { 135 } else {
136 return false; 136 return NULL;
137 } 137 }
138 138
139#ifdef XT_SOCKET_HAVE_CONNTRACK 139#ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
157 157
158 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 158 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
159 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); 159 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
160
161 pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
162 protocol, &saddr, ntohs(sport),
163 &daddr, ntohs(dport),
164 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
165
166 return sk;
167}
168EXPORT_SYMBOL(xt_socket_get4_sk);
169
170static bool
171socket_match(const struct sk_buff *skb, struct xt_action_param *par,
172 const struct xt_socket_mtinfo1 *info)
173{
174 struct sock *sk;
175
176 sk = xt_socket_get4_sk(skb, par);
160 if (sk != NULL) { 177 if (sk != NULL) {
161 bool wildcard; 178 bool wildcard;
162 bool transparent = true; 179 bool transparent = true;
@@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
179 sk = NULL; 196 sk = NULL;
180 } 197 }
181 198
182 pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
183 protocol, &saddr, ntohs(sport),
184 &daddr, ntohs(dport),
185 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
186
187 return (sk != NULL); 199 return (sk != NULL);
188} 200}
189 201
@@ -253,8 +265,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
253 return 0; 265 return 0;
254} 266}
255 267
256static bool 268struct sock*
257socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) 269xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
258{ 270{
259 struct ipv6hdr *iph = ipv6_hdr(skb); 271 struct ipv6hdr *iph = ipv6_hdr(skb);
260 struct udphdr _hdr, *hp = NULL; 272 struct udphdr _hdr, *hp = NULL;
@@ -262,7 +274,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
262 struct in6_addr *daddr, *saddr; 274 struct in6_addr *daddr, *saddr;
263 __be16 dport, sport; 275 __be16 dport, sport;
264 int thoff, tproto; 276 int thoff, tproto;
265 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
266 277
267 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); 278 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
268 if (tproto < 0) { 279 if (tproto < 0) {
@@ -274,7 +285,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
274 hp = skb_header_pointer(skb, thoff, 285 hp = skb_header_pointer(skb, thoff,
275 sizeof(_hdr), &_hdr); 286 sizeof(_hdr), &_hdr);
276 if (hp == NULL) 287 if (hp == NULL)
277 return false; 288 return NULL;
278 289
279 saddr = &iph->saddr; 290 saddr = &iph->saddr;
280 sport = hp->source; 291 sport = hp->source;
@@ -284,13 +295,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
284 } else if (tproto == IPPROTO_ICMPV6) { 295 } else if (tproto == IPPROTO_ICMPV6) {
285 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, 296 if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
286 &sport, &dport)) 297 &sport, &dport))
287 return false; 298 return NULL;
288 } else { 299 } else {
289 return false; 300 return NULL;
290 } 301 }
291 302
292 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, 303 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
293 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); 304 saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
305 pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
306 "(orig %pI6:%hu) sock %p\n",
307 tproto, saddr, ntohs(sport),
308 daddr, ntohs(dport),
309 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
310 return sk;
311}
312EXPORT_SYMBOL(xt_socket_get6_sk);
313
314static bool
315socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
316{
317 struct sock *sk;
318 const struct xt_socket_mtinfo1 *info;
319
320 info = (struct xt_socket_mtinfo1 *) par->matchinfo;
321 sk = xt_socket_get6_sk(skb, par);
294 if (sk != NULL) { 322 if (sk != NULL) {
295 bool wildcard; 323 bool wildcard;
296 bool transparent = true; 324 bool transparent = true;
@@ -313,12 +341,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
313 sk = NULL; 341 sk = NULL;
314 } 342 }
315 343
316 pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
317 "(orig %pI6:%hu) sock %p\n",
318 tproto, saddr, ntohs(sport),
319 daddr, ntohs(dport),
320 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
321
322 return (sk != NULL); 344 return (sk != NULL);
323} 345}
324#endif 346#endif
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe895b66..8e12c8a2b82 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@ menuconfig RFKILL
10 To compile this driver as a module, choose M here: the 10 To compile this driver as a module, choose M here: the
11 module will be called rfkill. 11 module will be called rfkill.
12 12
13config RFKILL_PM
14 bool "Power off on suspend"
15 depends on RFKILL && PM
16 default y
17
13# LED trigger support 18# LED trigger support
14config RFKILL_LEDS 19config RFKILL_LEDS
15 bool 20 bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640a277..df2dae6b272 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -769,6 +769,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
769} 769}
770EXPORT_SYMBOL(rfkill_pause_polling); 770EXPORT_SYMBOL(rfkill_pause_polling);
771 771
772#ifdef CONFIG_RFKILL_PM
772void rfkill_resume_polling(struct rfkill *rfkill) 773void rfkill_resume_polling(struct rfkill *rfkill)
773{ 774{
774 BUG_ON(!rfkill); 775 BUG_ON(!rfkill);
@@ -803,14 +804,17 @@ static int rfkill_resume(struct device *dev)
803 804
804 return 0; 805 return 0;
805} 806}
807#endif
806 808
807static struct class rfkill_class = { 809static struct class rfkill_class = {
808 .name = "rfkill", 810 .name = "rfkill",
809 .dev_release = rfkill_release, 811 .dev_release = rfkill_release,
810 .dev_attrs = rfkill_dev_attrs, 812 .dev_attrs = rfkill_dev_attrs,
811 .dev_uevent = rfkill_dev_uevent, 813 .dev_uevent = rfkill_dev_uevent,
814#ifdef CONFIG_RFKILL_PM
812 .suspend = rfkill_suspend, 815 .suspend = rfkill_suspend,
813 .resume = rfkill_resume, 816 .resume = rfkill_resume,
817#endif
814}; 818};
815 819
816bool rfkill_blocked(struct rfkill *rfkill) 820bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1f1ef70f34f..8e2a668c923 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -159,3 +159,14 @@ config LIB80211_DEBUG
159 from lib80211. 159 from lib80211.
160 160
161 If unsure, say N. 161 If unsure, say N.
162
163config CFG80211_ALLOW_RECONNECT
164 bool "Allow reconnect while already connected"
165 depends on CFG80211
166 default n
167 help
168 cfg80211 stack doesn't allow to connect if you are already
169 connected. This option allows to make a connection in this case.
170
171 Select this option ONLY for wlan drivers that are specifically
172 built for such purposes.
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f310a0d90c3..379ed3a1322 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2213,6 +2213,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2213 } 2213 }
2214 nla_nest_end(msg, sinfoattr); 2214 nla_nest_end(msg, sinfoattr);
2215 2215
2216 if (sinfo->assoc_req_ies)
2217 NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
2218 sinfo->assoc_req_ies);
2219
2216 return genlmsg_end(msg, hdr); 2220 return genlmsg_end(msg, hdr);
2217 2221
2218 nla_put_failure: 2222 nla_put_failure:
@@ -2240,6 +2244,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
2240 } 2244 }
2241 2245
2242 while (1) { 2246 while (1) {
2247 memset(&sinfo, 0, sizeof(sinfo));
2243 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, 2248 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
2244 mac_addr, &sinfo); 2249 mac_addr, &sinfo);
2245 if (err == -ENOENT) 2250 if (err == -ENOENT)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 90b73d1f902..092775af035 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1781,6 +1781,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
1781static void restore_regulatory_settings(bool reset_user) 1781static void restore_regulatory_settings(bool reset_user)
1782{ 1782{
1783 char alpha2[2]; 1783 char alpha2[2];
1784 char world_alpha2[2];
1784 struct reg_beacon *reg_beacon, *btmp; 1785 struct reg_beacon *reg_beacon, *btmp;
1785 struct regulatory_request *reg_request, *tmp; 1786 struct regulatory_request *reg_request, *tmp;
1786 LIST_HEAD(tmp_reg_req_list); 1787 LIST_HEAD(tmp_reg_req_list);
@@ -1831,11 +1832,13 @@ static void restore_regulatory_settings(bool reset_user)
1831 1832
1832 /* First restore to the basic regulatory settings */ 1833 /* First restore to the basic regulatory settings */
1833 cfg80211_regdomain = cfg80211_world_regdom; 1834 cfg80211_regdomain = cfg80211_world_regdom;
1835 world_alpha2[0] = cfg80211_regdomain->alpha2[0];
1836 world_alpha2[1] = cfg80211_regdomain->alpha2[1];
1834 1837
1835 mutex_unlock(&reg_mutex); 1838 mutex_unlock(&reg_mutex);
1836 mutex_unlock(&cfg80211_mutex); 1839 mutex_unlock(&cfg80211_mutex);
1837 1840
1838 regulatory_hint_core(cfg80211_regdomain->alpha2); 1841 regulatory_hint_core(world_alpha2);
1839 1842
1840 /* 1843 /*
1841 * This restores the ieee80211_regdom module parameter 1844 * This restores the ieee80211_regdom module parameter
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ae0c2256ba3..cbbc92731ec 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -17,7 +17,7 @@
17#include "nl80211.h" 17#include "nl80211.h"
18#include "wext-compat.h" 18#include "wext-compat.h"
19 19
20#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) 20#define IEEE80211_SCAN_RESULT_EXPIRE (3 * HZ)
21 21
22void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) 22void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
23{ 23{
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index b7b6ff8be55..cf4be21236b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -659,8 +659,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
659 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) 659 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
660 return; 660 return;
661 661
662#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
662 if (wdev->sme_state != CFG80211_SME_CONNECTED) 663 if (wdev->sme_state != CFG80211_SME_CONNECTED)
663 return; 664 return;
665#endif
664 666
665 if (wdev->current_bss) { 667 if (wdev->current_bss) {
666 cfg80211_unhold_bss(wdev->current_bss); 668 cfg80211_unhold_bss(wdev->current_bss);
@@ -758,10 +760,14 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
758 760
759 ASSERT_WDEV_LOCK(wdev); 761 ASSERT_WDEV_LOCK(wdev);
760 762
763#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
761 if (wdev->sme_state != CFG80211_SME_IDLE) 764 if (wdev->sme_state != CFG80211_SME_IDLE)
762 return -EALREADY; 765 return -EALREADY;
763 766
764 if (WARN_ON(wdev->connect_keys)) { 767 if (WARN_ON(wdev->connect_keys)) {
768#else
769 if (wdev->connect_keys) {
770#endif
765 kfree(wdev->connect_keys); 771 kfree(wdev->connect_keys);
766 wdev->connect_keys = NULL; 772 wdev->connect_keys = NULL;
767 } 773 }