diff options
Diffstat (limited to 'net/bluetooth/hci_sock.c')
-rw-r--r-- | net/bluetooth/hci_sock.c | 470 |
1 files changed, 383 insertions, 87 deletions
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index b2eb2b93580f..5914623f426a 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -42,14 +42,14 @@ | |||
42 | #include <linux/ioctl.h> | 42 | #include <linux/ioctl.h> |
43 | #include <net/sock.h> | 43 | #include <net/sock.h> |
44 | 44 | ||
45 | #include <asm/system.h> | ||
46 | #include <linux/uaccess.h> | 45 | #include <linux/uaccess.h> |
47 | #include <asm/unaligned.h> | 46 | #include <asm/unaligned.h> |
48 | 47 | ||
49 | #include <net/bluetooth/bluetooth.h> | 48 | #include <net/bluetooth/bluetooth.h> |
50 | #include <net/bluetooth/hci_core.h> | 49 | #include <net/bluetooth/hci_core.h> |
50 | #include <net/bluetooth/hci_mon.h> | ||
51 | 51 | ||
52 | static bool enable_mgmt; | 52 | static atomic_t monitor_promisc = ATOMIC_INIT(0); |
53 | 53 | ||
54 | /* ----- HCI socket interface ----- */ | 54 | /* ----- HCI socket interface ----- */ |
55 | 55 | ||
@@ -85,22 +85,20 @@ static struct bt_sock_list hci_sk_list = { | |||
85 | }; | 85 | }; |
86 | 86 | ||
87 | /* Send frame to RAW socket */ | 87 | /* Send frame to RAW socket */ |
88 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, | 88 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) |
89 | struct sock *skip_sk) | ||
90 | { | 89 | { |
91 | struct sock *sk; | 90 | struct sock *sk; |
92 | struct hlist_node *node; | 91 | struct hlist_node *node; |
92 | struct sk_buff *skb_copy = NULL; | ||
93 | 93 | ||
94 | BT_DBG("hdev %p len %d", hdev, skb->len); | 94 | BT_DBG("hdev %p len %d", hdev, skb->len); |
95 | 95 | ||
96 | read_lock(&hci_sk_list.lock); | 96 | read_lock(&hci_sk_list.lock); |
97 | |||
97 | sk_for_each(sk, node, &hci_sk_list.head) { | 98 | sk_for_each(sk, node, &hci_sk_list.head) { |
98 | struct hci_filter *flt; | 99 | struct hci_filter *flt; |
99 | struct sk_buff *nskb; | 100 | struct sk_buff *nskb; |
100 | 101 | ||
101 | if (sk == skip_sk) | ||
102 | continue; | ||
103 | |||
104 | if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) | 102 | if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) |
105 | continue; | 103 | continue; |
106 | 104 | ||
@@ -108,12 +106,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, | |||
108 | if (skb->sk == sk) | 106 | if (skb->sk == sk) |
109 | continue; | 107 | continue; |
110 | 108 | ||
111 | if (bt_cb(skb)->channel != hci_pi(sk)->channel) | 109 | if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) |
112 | continue; | 110 | continue; |
113 | 111 | ||
114 | if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL) | ||
115 | goto clone; | ||
116 | |||
117 | /* Apply filter */ | 112 | /* Apply filter */ |
118 | flt = &hci_pi(sk)->filter; | 113 | flt = &hci_pi(sk)->filter; |
119 | 114 | ||
@@ -137,21 +132,303 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, | |||
137 | continue; | 132 | continue; |
138 | } | 133 | } |
139 | 134 | ||
140 | clone: | 135 | if (!skb_copy) { |
136 | /* Create a private copy with headroom */ | ||
137 | skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); | ||
138 | if (!skb_copy) | ||
139 | continue; | ||
140 | |||
141 | /* Put type byte before the data */ | ||
142 | memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1); | ||
143 | } | ||
144 | |||
145 | nskb = skb_clone(skb_copy, GFP_ATOMIC); | ||
146 | if (!nskb) | ||
147 | continue; | ||
148 | |||
149 | if (sock_queue_rcv_skb(sk, nskb)) | ||
150 | kfree_skb(nskb); | ||
151 | } | ||
152 | |||
153 | read_unlock(&hci_sk_list.lock); | ||
154 | |||
155 | kfree_skb(skb_copy); | ||
156 | } | ||
157 | |||
158 | /* Send frame to control socket */ | ||
159 | void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) | ||
160 | { | ||
161 | struct sock *sk; | ||
162 | struct hlist_node *node; | ||
163 | |||
164 | BT_DBG("len %d", skb->len); | ||
165 | |||
166 | read_lock(&hci_sk_list.lock); | ||
167 | |||
168 | sk_for_each(sk, node, &hci_sk_list.head) { | ||
169 | struct sk_buff *nskb; | ||
170 | |||
171 | /* Skip the original socket */ | ||
172 | if (sk == skip_sk) | ||
173 | continue; | ||
174 | |||
175 | if (sk->sk_state != BT_BOUND) | ||
176 | continue; | ||
177 | |||
178 | if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) | ||
179 | continue; | ||
180 | |||
141 | nskb = skb_clone(skb, GFP_ATOMIC); | 181 | nskb = skb_clone(skb, GFP_ATOMIC); |
142 | if (!nskb) | 182 | if (!nskb) |
143 | continue; | 183 | continue; |
144 | 184 | ||
145 | /* Put type byte before the data */ | 185 | if (sock_queue_rcv_skb(sk, nskb)) |
146 | if (bt_cb(skb)->channel == HCI_CHANNEL_RAW) | 186 | kfree_skb(nskb); |
147 | memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); | 187 | } |
188 | |||
189 | read_unlock(&hci_sk_list.lock); | ||
190 | } | ||
191 | |||
192 | /* Send frame to monitor socket */ | ||
193 | void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) | ||
194 | { | ||
195 | struct sock *sk; | ||
196 | struct hlist_node *node; | ||
197 | struct sk_buff *skb_copy = NULL; | ||
198 | __le16 opcode; | ||
199 | |||
200 | if (!atomic_read(&monitor_promisc)) | ||
201 | return; | ||
202 | |||
203 | BT_DBG("hdev %p len %d", hdev, skb->len); | ||
204 | |||
205 | switch (bt_cb(skb)->pkt_type) { | ||
206 | case HCI_COMMAND_PKT: | ||
207 | opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT); | ||
208 | break; | ||
209 | case HCI_EVENT_PKT: | ||
210 | opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT); | ||
211 | break; | ||
212 | case HCI_ACLDATA_PKT: | ||
213 | if (bt_cb(skb)->incoming) | ||
214 | opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT); | ||
215 | else | ||
216 | opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT); | ||
217 | break; | ||
218 | case HCI_SCODATA_PKT: | ||
219 | if (bt_cb(skb)->incoming) | ||
220 | opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT); | ||
221 | else | ||
222 | opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT); | ||
223 | break; | ||
224 | default: | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | read_lock(&hci_sk_list.lock); | ||
229 | |||
230 | sk_for_each(sk, node, &hci_sk_list.head) { | ||
231 | struct sk_buff *nskb; | ||
232 | |||
233 | if (sk->sk_state != BT_BOUND) | ||
234 | continue; | ||
235 | |||
236 | if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) | ||
237 | continue; | ||
238 | |||
239 | if (!skb_copy) { | ||
240 | struct hci_mon_hdr *hdr; | ||
241 | |||
242 | /* Create a private copy with headroom */ | ||
243 | skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); | ||
244 | if (!skb_copy) | ||
245 | continue; | ||
246 | |||
247 | /* Put header before the data */ | ||
248 | hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE); | ||
249 | hdr->opcode = opcode; | ||
250 | hdr->index = cpu_to_le16(hdev->id); | ||
251 | hdr->len = cpu_to_le16(skb->len); | ||
252 | } | ||
253 | |||
254 | nskb = skb_clone(skb_copy, GFP_ATOMIC); | ||
255 | if (!nskb) | ||
256 | continue; | ||
257 | |||
258 | if (sock_queue_rcv_skb(sk, nskb)) | ||
259 | kfree_skb(nskb); | ||
260 | } | ||
261 | |||
262 | read_unlock(&hci_sk_list.lock); | ||
263 | |||
264 | kfree_skb(skb_copy); | ||
265 | } | ||
266 | |||
267 | static void send_monitor_event(struct sk_buff *skb) | ||
268 | { | ||
269 | struct sock *sk; | ||
270 | struct hlist_node *node; | ||
271 | |||
272 | BT_DBG("len %d", skb->len); | ||
273 | |||
274 | read_lock(&hci_sk_list.lock); | ||
275 | |||
276 | sk_for_each(sk, node, &hci_sk_list.head) { | ||
277 | struct sk_buff *nskb; | ||
278 | |||
279 | if (sk->sk_state != BT_BOUND) | ||
280 | continue; | ||
281 | |||
282 | if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) | ||
283 | continue; | ||
284 | |||
285 | nskb = skb_clone(skb, GFP_ATOMIC); | ||
286 | if (!nskb) | ||
287 | continue; | ||
148 | 288 | ||
149 | if (sock_queue_rcv_skb(sk, nskb)) | 289 | if (sock_queue_rcv_skb(sk, nskb)) |
150 | kfree_skb(nskb); | 290 | kfree_skb(nskb); |
151 | } | 291 | } |
292 | |||
152 | read_unlock(&hci_sk_list.lock); | 293 | read_unlock(&hci_sk_list.lock); |
153 | } | 294 | } |
154 | 295 | ||
296 | static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) | ||
297 | { | ||
298 | struct hci_mon_hdr *hdr; | ||
299 | struct hci_mon_new_index *ni; | ||
300 | struct sk_buff *skb; | ||
301 | __le16 opcode; | ||
302 | |||
303 | switch (event) { | ||
304 | case HCI_DEV_REG: | ||
305 | skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); | ||
306 | if (!skb) | ||
307 | return NULL; | ||
308 | |||
309 | ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE); | ||
310 | ni->type = hdev->dev_type; | ||
311 | ni->bus = hdev->bus; | ||
312 | bacpy(&ni->bdaddr, &hdev->bdaddr); | ||
313 | memcpy(ni->name, hdev->name, 8); | ||
314 | |||
315 | opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX); | ||
316 | break; | ||
317 | |||
318 | case HCI_DEV_UNREG: | ||
319 | skb = bt_skb_alloc(0, GFP_ATOMIC); | ||
320 | if (!skb) | ||
321 | return NULL; | ||
322 | |||
323 | opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX); | ||
324 | break; | ||
325 | |||
326 | default: | ||
327 | return NULL; | ||
328 | } | ||
329 | |||
330 | __net_timestamp(skb); | ||
331 | |||
332 | hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE); | ||
333 | hdr->opcode = opcode; | ||
334 | hdr->index = cpu_to_le16(hdev->id); | ||
335 | hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); | ||
336 | |||
337 | return skb; | ||
338 | } | ||
339 | |||
340 | static void send_monitor_replay(struct sock *sk) | ||
341 | { | ||
342 | struct hci_dev *hdev; | ||
343 | |||
344 | read_lock(&hci_dev_list_lock); | ||
345 | |||
346 | list_for_each_entry(hdev, &hci_dev_list, list) { | ||
347 | struct sk_buff *skb; | ||
348 | |||
349 | skb = create_monitor_event(hdev, HCI_DEV_REG); | ||
350 | if (!skb) | ||
351 | continue; | ||
352 | |||
353 | if (sock_queue_rcv_skb(sk, skb)) | ||
354 | kfree_skb(skb); | ||
355 | } | ||
356 | |||
357 | read_unlock(&hci_dev_list_lock); | ||
358 | } | ||
359 | |||
360 | /* Generate internal stack event */ | ||
361 | static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) | ||
362 | { | ||
363 | struct hci_event_hdr *hdr; | ||
364 | struct hci_ev_stack_internal *ev; | ||
365 | struct sk_buff *skb; | ||
366 | |||
367 | skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); | ||
368 | if (!skb) | ||
369 | return; | ||
370 | |||
371 | hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); | ||
372 | hdr->evt = HCI_EV_STACK_INTERNAL; | ||
373 | hdr->plen = sizeof(*ev) + dlen; | ||
374 | |||
375 | ev = (void *) skb_put(skb, sizeof(*ev) + dlen); | ||
376 | ev->type = type; | ||
377 | memcpy(ev->data, data, dlen); | ||
378 | |||
379 | bt_cb(skb)->incoming = 1; | ||
380 | __net_timestamp(skb); | ||
381 | |||
382 | bt_cb(skb)->pkt_type = HCI_EVENT_PKT; | ||
383 | skb->dev = (void *) hdev; | ||
384 | hci_send_to_sock(hdev, skb); | ||
385 | kfree_skb(skb); | ||
386 | } | ||
387 | |||
388 | void hci_sock_dev_event(struct hci_dev *hdev, int event) | ||
389 | { | ||
390 | struct hci_ev_si_device ev; | ||
391 | |||
392 | BT_DBG("hdev %s event %d", hdev->name, event); | ||
393 | |||
394 | /* Send event to monitor */ | ||
395 | if (atomic_read(&monitor_promisc)) { | ||
396 | struct sk_buff *skb; | ||
397 | |||
398 | skb = create_monitor_event(hdev, event); | ||
399 | if (skb) { | ||
400 | send_monitor_event(skb); | ||
401 | kfree_skb(skb); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* Send event to sockets */ | ||
406 | ev.event = event; | ||
407 | ev.dev_id = hdev->id; | ||
408 | hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); | ||
409 | |||
410 | if (event == HCI_DEV_UNREG) { | ||
411 | struct sock *sk; | ||
412 | struct hlist_node *node; | ||
413 | |||
414 | /* Detach sockets from device */ | ||
415 | read_lock(&hci_sk_list.lock); | ||
416 | sk_for_each(sk, node, &hci_sk_list.head) { | ||
417 | bh_lock_sock_nested(sk); | ||
418 | if (hci_pi(sk)->hdev == hdev) { | ||
419 | hci_pi(sk)->hdev = NULL; | ||
420 | sk->sk_err = EPIPE; | ||
421 | sk->sk_state = BT_OPEN; | ||
422 | sk->sk_state_change(sk); | ||
423 | |||
424 | hci_dev_put(hdev); | ||
425 | } | ||
426 | bh_unlock_sock(sk); | ||
427 | } | ||
428 | read_unlock(&hci_sk_list.lock); | ||
429 | } | ||
430 | } | ||
431 | |||
155 | static int hci_sock_release(struct socket *sock) | 432 | static int hci_sock_release(struct socket *sock) |
156 | { | 433 | { |
157 | struct sock *sk = sock->sk; | 434 | struct sock *sk = sock->sk; |
@@ -164,6 +441,9 @@ static int hci_sock_release(struct socket *sock) | |||
164 | 441 | ||
165 | hdev = hci_pi(sk)->hdev; | 442 | hdev = hci_pi(sk)->hdev; |
166 | 443 | ||
444 | if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR) | ||
445 | atomic_dec(&monitor_promisc); | ||
446 | |||
167 | bt_sock_unlink(&hci_sk_list, sk); | 447 | bt_sock_unlink(&hci_sk_list, sk); |
168 | 448 | ||
169 | if (hdev) { | 449 | if (hdev) { |
@@ -190,7 +470,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) | |||
190 | 470 | ||
191 | hci_dev_lock(hdev); | 471 | hci_dev_lock(hdev); |
192 | 472 | ||
193 | err = hci_blacklist_add(hdev, &bdaddr); | 473 | err = hci_blacklist_add(hdev, &bdaddr, 0); |
194 | 474 | ||
195 | hci_dev_unlock(hdev); | 475 | hci_dev_unlock(hdev); |
196 | 476 | ||
@@ -207,7 +487,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) | |||
207 | 487 | ||
208 | hci_dev_lock(hdev); | 488 | hci_dev_lock(hdev); |
209 | 489 | ||
210 | err = hci_blacklist_del(hdev, &bdaddr); | 490 | err = hci_blacklist_del(hdev, &bdaddr, 0); |
211 | 491 | ||
212 | hci_dev_unlock(hdev); | 492 | hci_dev_unlock(hdev); |
213 | 493 | ||
@@ -340,34 +620,69 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le | |||
340 | if (haddr.hci_family != AF_BLUETOOTH) | 620 | if (haddr.hci_family != AF_BLUETOOTH) |
341 | return -EINVAL; | 621 | return -EINVAL; |
342 | 622 | ||
343 | if (haddr.hci_channel > HCI_CHANNEL_CONTROL) | ||
344 | return -EINVAL; | ||
345 | |||
346 | if (haddr.hci_channel == HCI_CHANNEL_CONTROL) { | ||
347 | if (!enable_mgmt) | ||
348 | return -EINVAL; | ||
349 | set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags); | ||
350 | } | ||
351 | |||
352 | lock_sock(sk); | 623 | lock_sock(sk); |
353 | 624 | ||
354 | if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) { | 625 | if (sk->sk_state == BT_BOUND) { |
355 | err = -EALREADY; | 626 | err = -EALREADY; |
356 | goto done; | 627 | goto done; |
357 | } | 628 | } |
358 | 629 | ||
359 | if (haddr.hci_dev != HCI_DEV_NONE) { | 630 | switch (haddr.hci_channel) { |
360 | hdev = hci_dev_get(haddr.hci_dev); | 631 | case HCI_CHANNEL_RAW: |
361 | if (!hdev) { | 632 | if (hci_pi(sk)->hdev) { |
362 | err = -ENODEV; | 633 | err = -EALREADY; |
363 | goto done; | 634 | goto done; |
364 | } | 635 | } |
365 | 636 | ||
366 | atomic_inc(&hdev->promisc); | 637 | if (haddr.hci_dev != HCI_DEV_NONE) { |
638 | hdev = hci_dev_get(haddr.hci_dev); | ||
639 | if (!hdev) { | ||
640 | err = -ENODEV; | ||
641 | goto done; | ||
642 | } | ||
643 | |||
644 | atomic_inc(&hdev->promisc); | ||
645 | } | ||
646 | |||
647 | hci_pi(sk)->hdev = hdev; | ||
648 | break; | ||
649 | |||
650 | case HCI_CHANNEL_CONTROL: | ||
651 | if (haddr.hci_dev != HCI_DEV_NONE) { | ||
652 | err = -EINVAL; | ||
653 | goto done; | ||
654 | } | ||
655 | |||
656 | if (!capable(CAP_NET_ADMIN)) { | ||
657 | err = -EPERM; | ||
658 | goto done; | ||
659 | } | ||
660 | |||
661 | break; | ||
662 | |||
663 | case HCI_CHANNEL_MONITOR: | ||
664 | if (haddr.hci_dev != HCI_DEV_NONE) { | ||
665 | err = -EINVAL; | ||
666 | goto done; | ||
667 | } | ||
668 | |||
669 | if (!capable(CAP_NET_RAW)) { | ||
670 | err = -EPERM; | ||
671 | goto done; | ||
672 | } | ||
673 | |||
674 | send_monitor_replay(sk); | ||
675 | |||
676 | atomic_inc(&monitor_promisc); | ||
677 | break; | ||
678 | |||
679 | default: | ||
680 | err = -EINVAL; | ||
681 | goto done; | ||
367 | } | 682 | } |
368 | 683 | ||
684 | |||
369 | hci_pi(sk)->channel = haddr.hci_channel; | 685 | hci_pi(sk)->channel = haddr.hci_channel; |
370 | hci_pi(sk)->hdev = hdev; | ||
371 | sk->sk_state = BT_BOUND; | 686 | sk->sk_state = BT_BOUND; |
372 | 687 | ||
373 | done: | 688 | done: |
@@ -462,7 +777,15 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
462 | skb_reset_transport_header(skb); | 777 | skb_reset_transport_header(skb); |
463 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 778 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
464 | 779 | ||
465 | hci_sock_cmsg(sk, msg, skb); | 780 | switch (hci_pi(sk)->channel) { |
781 | case HCI_CHANNEL_RAW: | ||
782 | hci_sock_cmsg(sk, msg, skb); | ||
783 | break; | ||
784 | case HCI_CHANNEL_CONTROL: | ||
785 | case HCI_CHANNEL_MONITOR: | ||
786 | sock_recv_timestamp(msg, sk, skb); | ||
787 | break; | ||
788 | } | ||
466 | 789 | ||
467 | skb_free_datagram(sk, skb); | 790 | skb_free_datagram(sk, skb); |
468 | 791 | ||
@@ -496,6 +819,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
496 | case HCI_CHANNEL_CONTROL: | 819 | case HCI_CHANNEL_CONTROL: |
497 | err = mgmt_control(sk, msg, len); | 820 | err = mgmt_control(sk, msg, len); |
498 | goto done; | 821 | goto done; |
822 | case HCI_CHANNEL_MONITOR: | ||
823 | err = -EOPNOTSUPP; | ||
824 | goto done; | ||
499 | default: | 825 | default: |
500 | err = -EINVAL; | 826 | err = -EINVAL; |
501 | goto done; | 827 | goto done; |
@@ -575,6 +901,11 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char | |||
575 | 901 | ||
576 | lock_sock(sk); | 902 | lock_sock(sk); |
577 | 903 | ||
904 | if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { | ||
905 | err = -EINVAL; | ||
906 | goto done; | ||
907 | } | ||
908 | |||
578 | switch (optname) { | 909 | switch (optname) { |
579 | case HCI_DATA_DIR: | 910 | case HCI_DATA_DIR: |
580 | if (get_user(opt, (int __user *)optval)) { | 911 | if (get_user(opt, (int __user *)optval)) { |
@@ -637,6 +968,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char | |||
637 | break; | 968 | break; |
638 | } | 969 | } |
639 | 970 | ||
971 | done: | ||
640 | release_sock(sk); | 972 | release_sock(sk); |
641 | return err; | 973 | return err; |
642 | } | 974 | } |
@@ -645,11 +977,20 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char | |||
645 | { | 977 | { |
646 | struct hci_ufilter uf; | 978 | struct hci_ufilter uf; |
647 | struct sock *sk = sock->sk; | 979 | struct sock *sk = sock->sk; |
648 | int len, opt; | 980 | int len, opt, err = 0; |
981 | |||
982 | BT_DBG("sk %p, opt %d", sk, optname); | ||
649 | 983 | ||
650 | if (get_user(len, optlen)) | 984 | if (get_user(len, optlen)) |
651 | return -EFAULT; | 985 | return -EFAULT; |
652 | 986 | ||
987 | lock_sock(sk); | ||
988 | |||
989 | if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { | ||
990 | err = -EINVAL; | ||
991 | goto done; | ||
992 | } | ||
993 | |||
653 | switch (optname) { | 994 | switch (optname) { |
654 | case HCI_DATA_DIR: | 995 | case HCI_DATA_DIR: |
655 | if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) | 996 | if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) |
@@ -658,7 +999,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char | |||
658 | opt = 0; | 999 | opt = 0; |
659 | 1000 | ||
660 | if (put_user(opt, optval)) | 1001 | if (put_user(opt, optval)) |
661 | return -EFAULT; | 1002 | err = -EFAULT; |
662 | break; | 1003 | break; |
663 | 1004 | ||
664 | case HCI_TIME_STAMP: | 1005 | case HCI_TIME_STAMP: |
@@ -668,7 +1009,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char | |||
668 | opt = 0; | 1009 | opt = 0; |
669 | 1010 | ||
670 | if (put_user(opt, optval)) | 1011 | if (put_user(opt, optval)) |
671 | return -EFAULT; | 1012 | err = -EFAULT; |
672 | break; | 1013 | break; |
673 | 1014 | ||
674 | case HCI_FILTER: | 1015 | case HCI_FILTER: |
@@ -683,15 +1024,17 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char | |||
683 | 1024 | ||
684 | len = min_t(unsigned int, len, sizeof(uf)); | 1025 | len = min_t(unsigned int, len, sizeof(uf)); |
685 | if (copy_to_user(optval, &uf, len)) | 1026 | if (copy_to_user(optval, &uf, len)) |
686 | return -EFAULT; | 1027 | err = -EFAULT; |
687 | break; | 1028 | break; |
688 | 1029 | ||
689 | default: | 1030 | default: |
690 | return -ENOPROTOOPT; | 1031 | err = -ENOPROTOOPT; |
691 | break; | 1032 | break; |
692 | } | 1033 | } |
693 | 1034 | ||
694 | return 0; | 1035 | done: |
1036 | release_sock(sk); | ||
1037 | return err; | ||
695 | } | 1038 | } |
696 | 1039 | ||
697 | static const struct proto_ops hci_sock_ops = { | 1040 | static const struct proto_ops hci_sock_ops = { |
@@ -749,52 +1092,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol, | |||
749 | return 0; | 1092 | return 0; |
750 | } | 1093 | } |
751 | 1094 | ||
752 | static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | ||
753 | { | ||
754 | struct hci_dev *hdev = (struct hci_dev *) ptr; | ||
755 | struct hci_ev_si_device ev; | ||
756 | |||
757 | BT_DBG("hdev %s event %ld", hdev->name, event); | ||
758 | |||
759 | /* Send event to sockets */ | ||
760 | ev.event = event; | ||
761 | ev.dev_id = hdev->id; | ||
762 | hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); | ||
763 | |||
764 | if (event == HCI_DEV_UNREG) { | ||
765 | struct sock *sk; | ||
766 | struct hlist_node *node; | ||
767 | |||
768 | /* Detach sockets from device */ | ||
769 | read_lock(&hci_sk_list.lock); | ||
770 | sk_for_each(sk, node, &hci_sk_list.head) { | ||
771 | bh_lock_sock_nested(sk); | ||
772 | if (hci_pi(sk)->hdev == hdev) { | ||
773 | hci_pi(sk)->hdev = NULL; | ||
774 | sk->sk_err = EPIPE; | ||
775 | sk->sk_state = BT_OPEN; | ||
776 | sk->sk_state_change(sk); | ||
777 | |||
778 | hci_dev_put(hdev); | ||
779 | } | ||
780 | bh_unlock_sock(sk); | ||
781 | } | ||
782 | read_unlock(&hci_sk_list.lock); | ||
783 | } | ||
784 | |||
785 | return NOTIFY_DONE; | ||
786 | } | ||
787 | |||
788 | static const struct net_proto_family hci_sock_family_ops = { | 1095 | static const struct net_proto_family hci_sock_family_ops = { |
789 | .family = PF_BLUETOOTH, | 1096 | .family = PF_BLUETOOTH, |
790 | .owner = THIS_MODULE, | 1097 | .owner = THIS_MODULE, |
791 | .create = hci_sock_create, | 1098 | .create = hci_sock_create, |
792 | }; | 1099 | }; |
793 | 1100 | ||
794 | static struct notifier_block hci_sock_nblock = { | ||
795 | .notifier_call = hci_sock_dev_event | ||
796 | }; | ||
797 | |||
798 | int __init hci_sock_init(void) | 1101 | int __init hci_sock_init(void) |
799 | { | 1102 | { |
800 | int err; | 1103 | int err; |
@@ -807,8 +1110,6 @@ int __init hci_sock_init(void) | |||
807 | if (err < 0) | 1110 | if (err < 0) |
808 | goto error; | 1111 | goto error; |
809 | 1112 | ||
810 | hci_register_notifier(&hci_sock_nblock); | ||
811 | |||
812 | BT_INFO("HCI socket layer initialized"); | 1113 | BT_INFO("HCI socket layer initialized"); |
813 | 1114 | ||
814 | return 0; | 1115 | return 0; |
@@ -824,10 +1125,5 @@ void hci_sock_cleanup(void) | |||
824 | if (bt_sock_unregister(BTPROTO_HCI) < 0) | 1125 | if (bt_sock_unregister(BTPROTO_HCI) < 0) |
825 | BT_ERR("HCI socket unregistration failed"); | 1126 | BT_ERR("HCI socket unregistration failed"); |
826 | 1127 | ||
827 | hci_unregister_notifier(&hci_sock_nblock); | ||
828 | |||
829 | proto_unregister(&hci_sk_proto); | 1128 | proto_unregister(&hci_sk_proto); |
830 | } | 1129 | } |
831 | |||
832 | module_param(enable_mgmt, bool, 0644); | ||
833 | MODULE_PARM_DESC(enable_mgmt, "Enable Management interface"); | ||