aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/bluetooth/Kconfig35
-rw-r--r--drivers/bluetooth/Makefile4
-rw-r--r--drivers/bluetooth/bluecard_cs.c5
-rw-r--r--drivers/bluetooth/bpa10x.c624
-rw-r--r--drivers/bluetooth/bt3c_cs.c5
-rw-r--r--drivers/bluetooth/btsdio.c406
-rw-r--r--drivers/bluetooth/btuart_cs.c5
-rw-r--r--drivers/bluetooth/btusb.c564
-rw-r--r--drivers/bluetooth/dtl1_cs.c5
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c8
-rw-r--r--drivers/bluetooth/hci_ll.c531
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/niu.c34
-rw-r--r--drivers/net/tg3.c95
-rw-r--r--drivers/net/tg3.h11
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/skbuff.h15
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/net/bluetooth/hci.h604
-rw-r--r--include/net/bluetooth/hci_core.h13
-rw-r--r--include/net/bluetooth/l2cap.h37
-rw-r--r--net/bluetooth/hci_conn.c82
-rw-r--r--net/bluetooth/hci_core.c70
-rw-r--r--net/bluetooth/hci_event.c1651
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hci_sysfs.c37
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap.c306
-rw-r--r--net/bluetooth/rfcomm/core.c60
-rw-r--r--net/bluetooth/rfcomm/tty.c25
-rw-r--r--net/bluetooth/sco.c12
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/dccp/diag.c1
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/ipv4/inet_diag.c7
-rw-r--r--net/ipv4/tcp_diag.c1
-rw-r--r--net/ipv6/ah6.c1
-rw-r--r--net/ipv6/esp6.c1
-rw-r--r--net/sched/sch_teql.c6
47 files changed, 3689 insertions, 1635 deletions
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index b9fbe6e7f9ae..075598e1c502 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -22,6 +22,30 @@ config BT_HCIUSB_SCO
22 22
23 Say Y here to compile support for SCO over HCI USB. 23 Say Y here to compile support for SCO over HCI USB.
24 24
25config BT_HCIBTUSB
26 tristate "HCI USB driver (alternate version)"
27 depends on USB && EXPERIMENTAL && BT_HCIUSB=n
28 help
29 Bluetooth HCI USB driver.
30 This driver is required if you want to use Bluetooth devices with
31 USB interface.
32
33 This driver is still experimental and has no SCO support.
34
35 Say Y here to compile support for Bluetooth USB devices into the
36 kernel or say M to compile it as module (btusb).
37
38config BT_HCIBTSDIO
39 tristate "HCI SDIO driver"
40 depends on MMC
41 help
42 Bluetooth HCI SDIO driver.
43 This driver is required if you want to use Bluetooth device with
44 SDIO interface.
45
46 Say Y here to compile support for Bluetooth SDIO devices into the
47 kernel or say M to compile it as module (btsdio).
48
25config BT_HCIUART 49config BT_HCIUART
26 tristate "HCI UART driver" 50 tristate "HCI UART driver"
27 help 51 help
@@ -55,6 +79,17 @@ config BT_HCIUART_BCSP
55 79
56 Say Y here to compile support for HCI BCSP protocol. 80 Say Y here to compile support for HCI BCSP protocol.
57 81
82config BT_HCIUART_LL
83 bool "HCILL protocol support"
84 depends on BT_HCIUART
85 help
86 HCILL (HCI Low Level) is a serial protocol for communication
87 between Bluetooth device and host. This protocol is required for
88 serial Bluetooth devices that are based on Texas Instruments'
89 BRF chips.
90
91 Say Y here to compile support for HCILL protocol.
92
58config BT_HCIBCM203X 93config BT_HCIBCM203X
59 tristate "HCI BCM203x USB driver" 94 tristate "HCI BCM203x USB driver"
60 depends on USB 95 depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 08c10e178e02..77444afbf107 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -13,7 +13,11 @@ obj-$(CONFIG_BT_HCIBT3C) += bt3c_cs.o
13obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o 13obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o
14obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o 14obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
15 15
16obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
17obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
18
16hci_uart-y := hci_ldisc.o 19hci_uart-y := hci_ldisc.o
17hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o 20hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
18hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o 21hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
22hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
19hci_uart-objs := $(hci_uart-y) 23hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 851de4d5b7de..bcf57927b7a8 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -503,10 +503,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst)
503 unsigned int iobase; 503 unsigned int iobase;
504 unsigned char reg; 504 unsigned char reg;
505 505
506 if (!info || !info->hdev) { 506 BUG_ON(!info->hdev);
507 BT_ERR("Call of irq %d for unknown device", irq);
508 return IRQ_NONE;
509 }
510 507
511 if (!test_bit(CARD_READY, &(info->hw_state))) 508 if (!test_bit(CARD_READY, &(info->hw_state)))
512 return IRQ_HANDLED; 509 return IRQ_HANDLED;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index e8ebd5d3de86..1375b5345a0a 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Digianswer Bluetooth USB driver 3 * Digianswer Bluetooth USB driver
4 * 4 *
5 * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org> 5 * Copyright (C) 2004-2007 Marcel Holtmann <marcel@holtmann.org>
6 * 6 *
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -21,13 +21,14 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/module.h>
25
26#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/slab.h> 27#include <linux/slab.h>
29#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <linux/skbuff.h>
31 32
32#include <linux/usb.h> 33#include <linux/usb.h>
33 34
@@ -39,7 +40,7 @@
39#define BT_DBG(D...) 40#define BT_DBG(D...)
40#endif 41#endif
41 42
42#define VERSION "0.8" 43#define VERSION "0.9"
43 44
44static int ignore = 0; 45static int ignore = 0;
45 46
@@ -52,393 +53,285 @@ static struct usb_device_id bpa10x_table[] = {
52 53
53MODULE_DEVICE_TABLE(usb, bpa10x_table); 54MODULE_DEVICE_TABLE(usb, bpa10x_table);
54 55
55#define BPA10X_CMD_EP 0x00
56#define BPA10X_EVT_EP 0x81
57#define BPA10X_TX_EP 0x02
58#define BPA10X_RX_EP 0x82
59
60#define BPA10X_CMD_BUF_SIZE 252
61#define BPA10X_EVT_BUF_SIZE 16
62#define BPA10X_TX_BUF_SIZE 384
63#define BPA10X_RX_BUF_SIZE 384
64
65struct bpa10x_data { 56struct bpa10x_data {
66 struct hci_dev *hdev; 57 struct hci_dev *hdev;
67 struct usb_device *udev; 58 struct usb_device *udev;
68 59
69 rwlock_t lock; 60 struct usb_anchor tx_anchor;
61 struct usb_anchor rx_anchor;
70 62
71 struct sk_buff_head cmd_queue; 63 struct sk_buff *rx_skb[2];
72 struct urb *cmd_urb;
73 struct urb *evt_urb;
74 struct sk_buff *evt_skb;
75 unsigned int evt_len;
76
77 struct sk_buff_head tx_queue;
78 struct urb *tx_urb;
79 struct urb *rx_urb;
80}; 64};
81 65
82#define HCI_VENDOR_HDR_SIZE 5 66#define HCI_VENDOR_HDR_SIZE 5
83 67
84struct hci_vendor_hdr { 68struct hci_vendor_hdr {
85 __u8 type; 69 __u8 type;
86 __le16 snum; 70 __le16 snum;
87 __le16 dlen; 71 __le16 dlen;
88} __attribute__ ((packed)); 72} __attribute__ ((packed));
89 73
90static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int count) 74static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
91{ 75{
92 struct hci_acl_hdr *ah; 76 struct bpa10x_data *data = hdev->driver_data;
93 struct hci_sco_hdr *sh; 77
94 struct hci_vendor_hdr *vh; 78 BT_DBG("%s queue %d buffer %p count %d", hdev->name,
95 struct sk_buff *skb; 79 queue, buf, count);
96 int len; 80
81 if (queue < 0 || queue > 1)
82 return -EILSEQ;
83
84 hdev->stat.byte_rx += count;
97 85
98 while (count) { 86 while (count) {
99 switch (*buf++) { 87 struct sk_buff *skb = data->rx_skb[queue];
100 case HCI_ACLDATA_PKT: 88 struct { __u8 type; int expect; } *scb;
101 ah = (struct hci_acl_hdr *) buf; 89 int type, len = 0;
102 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(ah->dlen);
103 skb = bt_skb_alloc(len, GFP_ATOMIC);
104 if (skb) {
105 memcpy(skb_put(skb, len), buf, len);
106 skb->dev = (void *) data->hdev;
107 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
108 hci_recv_frame(skb);
109 }
110 break;
111 90
112 case HCI_SCODATA_PKT: 91 if (!skb) {
113 sh = (struct hci_sco_hdr *) buf; 92 /* Start of the frame */
114 len = HCI_SCO_HDR_SIZE + sh->dlen; 93
115 skb = bt_skb_alloc(len, GFP_ATOMIC); 94 type = *((__u8 *) buf);
116 if (skb) { 95 count--; buf++;
117 memcpy(skb_put(skb, len), buf, len); 96
118 skb->dev = (void *) data->hdev; 97 switch (type) {
119 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 98 case HCI_EVENT_PKT:
120 hci_recv_frame(skb); 99 if (count >= HCI_EVENT_HDR_SIZE) {
100 struct hci_event_hdr *h = buf;
101 len = HCI_EVENT_HDR_SIZE + h->plen;
102 } else
103 return -EILSEQ;
104 break;
105
106 case HCI_ACLDATA_PKT:
107 if (count >= HCI_ACL_HDR_SIZE) {
108 struct hci_acl_hdr *h = buf;
109 len = HCI_ACL_HDR_SIZE +
110 __le16_to_cpu(h->dlen);
111 } else
112 return -EILSEQ;
113 break;
114
115 case HCI_SCODATA_PKT:
116 if (count >= HCI_SCO_HDR_SIZE) {
117 struct hci_sco_hdr *h = buf;
118 len = HCI_SCO_HDR_SIZE + h->dlen;
119 } else
120 return -EILSEQ;
121 break;
122
123 case HCI_VENDOR_PKT:
124 if (count >= HCI_VENDOR_HDR_SIZE) {
125 struct hci_vendor_hdr *h = buf;
126 len = HCI_VENDOR_HDR_SIZE +
127 __le16_to_cpu(h->dlen);
128 } else
129 return -EILSEQ;
130 break;
121 } 131 }
122 break;
123 132
124 case HCI_VENDOR_PKT:
125 vh = (struct hci_vendor_hdr *) buf;
126 len = HCI_VENDOR_HDR_SIZE + __le16_to_cpu(vh->dlen);
127 skb = bt_skb_alloc(len, GFP_ATOMIC); 133 skb = bt_skb_alloc(len, GFP_ATOMIC);
128 if (skb) { 134 if (!skb) {
129 memcpy(skb_put(skb, len), buf, len); 135 BT_ERR("%s no memory for packet", hdev->name);
130 skb->dev = (void *) data->hdev; 136 return -ENOMEM;
131 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
132 hci_recv_frame(skb);
133 } 137 }
134 break;
135
136 default:
137 len = count - 1;
138 break;
139 }
140 138
141 buf += len; 139 skb->dev = (void *) hdev;
142 count -= (len + 1);
143 }
144}
145
146static int bpa10x_recv_event(struct bpa10x_data *data, unsigned char *buf, int size)
147{
148 BT_DBG("data %p buf %p size %d", data, buf, size);
149 140
150 if (data->evt_skb) { 141 data->rx_skb[queue] = skb;
151 struct sk_buff *skb = data->evt_skb;
152 142
153 memcpy(skb_put(skb, size), buf, size); 143 scb = (void *) skb->cb;
144 scb->type = type;
145 scb->expect = len;
146 } else {
147 /* Continuation */
154 148
155 if (skb->len == data->evt_len) { 149 scb = (void *) skb->cb;
156 data->evt_skb = NULL; 150 len = scb->expect;
157 data->evt_len = 0;
158 hci_recv_frame(skb);
159 }
160 } else {
161 struct sk_buff *skb;
162 struct hci_event_hdr *hdr;
163 unsigned char pkt_type;
164 int pkt_len = 0;
165
166 if (size < HCI_EVENT_HDR_SIZE + 1) {
167 BT_ERR("%s event packet block with size %d is too short",
168 data->hdev->name, size);
169 return -EILSEQ;
170 } 151 }
171 152
172 pkt_type = *buf++; 153 len = min(len, count);
173 size--;
174
175 if (pkt_type != HCI_EVENT_PKT) {
176 BT_ERR("%s unexpected event packet start byte 0x%02x",
177 data->hdev->name, pkt_type);
178 return -EPROTO;
179 }
180 154
181 hdr = (struct hci_event_hdr *) buf; 155 memcpy(skb_put(skb, len), buf, len);
182 pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen;
183 156
184 skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); 157 scb->expect -= len;
185 if (!skb) {
186 BT_ERR("%s no memory for new event packet",
187 data->hdev->name);
188 return -ENOMEM;
189 }
190 158
191 skb->dev = (void *) data->hdev; 159 if (scb->expect == 0) {
192 bt_cb(skb)->pkt_type = pkt_type; 160 /* Complete frame */
193 161
194 memcpy(skb_put(skb, size), buf, size); 162 data->rx_skb[queue] = NULL;
195 163
196 if (pkt_len == size) { 164 bt_cb(skb)->pkt_type = scb->type;
197 hci_recv_frame(skb); 165 hci_recv_frame(skb);
198 } else {
199 data->evt_skb = skb;
200 data->evt_len = pkt_len;
201 } 166 }
167
168 count -= len; buf += len;
202 } 169 }
203 170
204 return 0; 171 return 0;
205} 172}
206 173
207static void bpa10x_wakeup(struct bpa10x_data *data) 174static void bpa10x_tx_complete(struct urb *urb)
208{ 175{
209 struct urb *urb; 176 struct sk_buff *skb = urb->context;
210 struct sk_buff *skb; 177 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
211 int err;
212 178
213 BT_DBG("data %p", data); 179 BT_DBG("%s urb %p status %d count %d", hdev->name,
180 urb, urb->status, urb->actual_length);
214 181
215 urb = data->cmd_urb; 182 if (!test_bit(HCI_RUNNING, &hdev->flags))
216 if (urb->status == -EINPROGRESS) 183 goto done;
217 skb = NULL; 184
185 if (!urb->status)
186 hdev->stat.byte_tx += urb->transfer_buffer_length;
218 else 187 else
219 skb = skb_dequeue(&data->cmd_queue); 188 hdev->stat.err_tx++;
220 189
221 if (skb) { 190done:
222 struct usb_ctrlrequest *cr; 191 kfree(urb->setup_packet);
223 192
224 if (skb->len > BPA10X_CMD_BUF_SIZE) { 193 kfree_skb(skb);
225 BT_ERR("%s command packet with size %d is too big", 194}
226 data->hdev->name, skb->len); 195
227 kfree_skb(skb); 196static void bpa10x_rx_complete(struct urb *urb)
228 return; 197{
229 } 198 struct hci_dev *hdev = urb->context;
199 struct bpa10x_data *data = hdev->driver_data;
200 int err;
230 201
231 cr = (struct usb_ctrlrequest *) urb->setup_packet; 202 BT_DBG("%s urb %p status %d count %d", hdev->name,
232 cr->wLength = __cpu_to_le16(skb->len); 203 urb, urb->status, urb->actual_length);
233 204
234 skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); 205 if (!test_bit(HCI_RUNNING, &hdev->flags))
235 urb->transfer_buffer_length = skb->len; 206 return;
236 207
237 err = usb_submit_urb(urb, GFP_ATOMIC); 208 if (urb->status == 0) {
238 if (err < 0 && err != -ENODEV) { 209 if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe),
239 BT_ERR("%s submit failed for command urb %p with error %d", 210 urb->transfer_buffer,
240 data->hdev->name, urb, err); 211 urb->actual_length) < 0) {
241 skb_queue_head(&data->cmd_queue, skb); 212 BT_ERR("%s corrupted event packet", hdev->name);
242 } else 213 hdev->stat.err_rx++;
243 kfree_skb(skb); 214 }
244 } 215 }
245 216
246 urb = data->tx_urb; 217 usb_anchor_urb(urb, &data->rx_anchor);
247 if (urb->status == -EINPROGRESS) 218
248 skb = NULL; 219 err = usb_submit_urb(urb, GFP_ATOMIC);
249 else 220 if (err < 0) {
250 skb = skb_dequeue(&data->tx_queue); 221 BT_ERR("%s urb %p failed to resubmit (%d)",
251 222 hdev->name, urb, -err);
252 if (skb) { 223 usb_unanchor_urb(urb);
253 skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
254 urb->transfer_buffer_length = skb->len;
255
256 err = usb_submit_urb(urb, GFP_ATOMIC);
257 if (err < 0 && err != -ENODEV) {
258 BT_ERR("%s submit failed for command urb %p with error %d",
259 data->hdev->name, urb, err);
260 skb_queue_head(&data->tx_queue, skb);
261 } else
262 kfree_skb(skb);
263 } 224 }
264} 225}
265 226
266static void bpa10x_complete(struct urb *urb) 227static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
267{ 228{
268 struct bpa10x_data *data = urb->context; 229 struct bpa10x_data *data = hdev->driver_data;
269 unsigned char *buf = urb->transfer_buffer; 230 struct urb *urb;
270 int err, count = urb->actual_length; 231 unsigned char *buf;
232 unsigned int pipe;
233 int err, size = 16;
271 234
272 BT_DBG("data %p urb %p buf %p count %d", data, urb, buf, count); 235 BT_DBG("%s", hdev->name);
273 236
274 read_lock(&data->lock); 237 urb = usb_alloc_urb(0, GFP_KERNEL);
238 if (!urb)
239 return -ENOMEM;
275 240
276 if (!test_bit(HCI_RUNNING, &data->hdev->flags)) 241 buf = kmalloc(size, GFP_KERNEL);
277 goto unlock; 242 if (!buf) {
243 usb_free_urb(urb);
244 return -ENOMEM;
245 }
278 246
279 if (urb->status < 0 || !count) 247 pipe = usb_rcvintpipe(data->udev, 0x81);
280 goto resubmit;
281 248
282 if (usb_pipein(urb->pipe)) { 249 usb_fill_int_urb(urb, data->udev, pipe, buf, size,
283 data->hdev->stat.byte_rx += count; 250 bpa10x_rx_complete, hdev, 1);
284 251
285 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) 252 urb->transfer_flags |= URB_FREE_BUFFER;
286 bpa10x_recv_event(data, buf, count);
287 253
288 if (usb_pipetype(urb->pipe) == PIPE_BULK) 254 usb_anchor_urb(urb, &data->rx_anchor);
289 bpa10x_recv_bulk(data, buf, count);
290 } else {
291 data->hdev->stat.byte_tx += count;
292 255
293 bpa10x_wakeup(data); 256 err = usb_submit_urb(urb, GFP_KERNEL);
257 if (err < 0) {
258 BT_ERR("%s urb %p submission failed (%d)",
259 hdev->name, urb, -err);
260 usb_unanchor_urb(urb);
261 kfree(buf);
294 } 262 }
295 263
296resubmit: 264 usb_free_urb(urb);
297 if (usb_pipein(urb->pipe)) {
298 err = usb_submit_urb(urb, GFP_ATOMIC);
299 if (err < 0 && err != -ENODEV) {
300 BT_ERR("%s urb %p type %d resubmit status %d",
301 data->hdev->name, urb, usb_pipetype(urb->pipe), err);
302 }
303 }
304 265
305unlock: 266 return err;
306 read_unlock(&data->lock);
307} 267}
308 268
309static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, 269static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
310 size_t size, gfp_t flags, void *data)
311{ 270{
271 struct bpa10x_data *data = hdev->driver_data;
312 struct urb *urb; 272 struct urb *urb;
313 struct usb_ctrlrequest *cr;
314 unsigned char *buf; 273 unsigned char *buf;
274 unsigned int pipe;
275 int err, size = 64;
315 276
316 BT_DBG("udev %p data %p", udev, data); 277 BT_DBG("%s", hdev->name);
317 278
318 urb = usb_alloc_urb(0, flags); 279 urb = usb_alloc_urb(0, GFP_KERNEL);
319 if (!urb) 280 if (!urb)
320 return NULL; 281 return -ENOMEM;
321 282
322 buf = kmalloc(size, flags); 283 buf = kmalloc(size, GFP_KERNEL);
323 if (!buf) { 284 if (!buf) {
324 usb_free_urb(urb); 285 usb_free_urb(urb);
325 return NULL; 286 return -ENOMEM;
326 } 287 }
327 288
328 switch (usb_pipetype(pipe)) { 289 pipe = usb_rcvbulkpipe(data->udev, 0x82);
329 case PIPE_CONTROL:
330 cr = kmalloc(sizeof(*cr), flags);
331 if (!cr) {
332 kfree(buf);
333 usb_free_urb(urb);
334 return NULL;
335 }
336 290
337 cr->bRequestType = USB_TYPE_VENDOR; 291 usb_fill_bulk_urb(urb, data->udev, pipe,
338 cr->bRequest = 0; 292 buf, size, bpa10x_rx_complete, hdev);
339 cr->wIndex = 0;
340 cr->wValue = 0;
341 cr->wLength = __cpu_to_le16(0);
342 293
343 usb_fill_control_urb(urb, udev, pipe, (void *) cr, buf, 0, bpa10x_complete, data); 294 urb->transfer_flags |= URB_FREE_BUFFER;
344 break;
345 295
346 case PIPE_INTERRUPT: 296 usb_anchor_urb(urb, &data->rx_anchor);
347 usb_fill_int_urb(urb, udev, pipe, buf, size, bpa10x_complete, data, 1);
348 break;
349 297
350 case PIPE_BULK: 298 err = usb_submit_urb(urb, GFP_KERNEL);
351 usb_fill_bulk_urb(urb, udev, pipe, buf, size, bpa10x_complete, data); 299 if (err < 0) {
352 break; 300 BT_ERR("%s urb %p submission failed (%d)",
353 301 hdev->name, urb, -err);
354 default: 302 usb_unanchor_urb(urb);
355 kfree(buf); 303 kfree(buf);
356 usb_free_urb(urb);
357 return NULL;
358 } 304 }
359 305
360 return urb;
361}
362
363static inline void bpa10x_free_urb(struct urb *urb)
364{
365 BT_DBG("urb %p", urb);
366
367 if (!urb)
368 return;
369
370 kfree(urb->setup_packet);
371 kfree(urb->transfer_buffer);
372
373 usb_free_urb(urb); 306 usb_free_urb(urb);
307
308 return err;
374} 309}
375 310
376static int bpa10x_open(struct hci_dev *hdev) 311static int bpa10x_open(struct hci_dev *hdev)
377{ 312{
378 struct bpa10x_data *data = hdev->driver_data; 313 struct bpa10x_data *data = hdev->driver_data;
379 struct usb_device *udev = data->udev;
380 unsigned long flags;
381 int err; 314 int err;
382 315
383 BT_DBG("hdev %p data %p", hdev, data); 316 BT_DBG("%s", hdev->name);
384 317
385 if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 318 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
386 return 0; 319 return 0;
387 320
388 data->cmd_urb = bpa10x_alloc_urb(udev, usb_sndctrlpipe(udev, BPA10X_CMD_EP), 321 err = bpa10x_submit_intr_urb(hdev);
389 BPA10X_CMD_BUF_SIZE, GFP_KERNEL, data); 322 if (err < 0)
390 if (!data->cmd_urb) { 323 goto error;
391 err = -ENOMEM;
392 goto done;
393 }
394
395 data->evt_urb = bpa10x_alloc_urb(udev, usb_rcvintpipe(udev, BPA10X_EVT_EP),
396 BPA10X_EVT_BUF_SIZE, GFP_KERNEL, data);
397 if (!data->evt_urb) {
398 bpa10x_free_urb(data->cmd_urb);
399 err = -ENOMEM;
400 goto done;
401 }
402
403 data->rx_urb = bpa10x_alloc_urb(udev, usb_rcvbulkpipe(udev, BPA10X_RX_EP),
404 BPA10X_RX_BUF_SIZE, GFP_KERNEL, data);
405 if (!data->rx_urb) {
406 bpa10x_free_urb(data->evt_urb);
407 bpa10x_free_urb(data->cmd_urb);
408 err = -ENOMEM;
409 goto done;
410 }
411
412 data->tx_urb = bpa10x_alloc_urb(udev, usb_sndbulkpipe(udev, BPA10X_TX_EP),
413 BPA10X_TX_BUF_SIZE, GFP_KERNEL, data);
414 if (!data->rx_urb) {
415 bpa10x_free_urb(data->rx_urb);
416 bpa10x_free_urb(data->evt_urb);
417 bpa10x_free_urb(data->cmd_urb);
418 err = -ENOMEM;
419 goto done;
420 }
421 324
422 write_lock_irqsave(&data->lock, flags); 325 err = bpa10x_submit_bulk_urb(hdev);
326 if (err < 0)
327 goto error;
423 328
424 err = usb_submit_urb(data->evt_urb, GFP_ATOMIC); 329 return 0;
425 if (err < 0) {
426 BT_ERR("%s submit failed for event urb %p with error %d",
427 data->hdev->name, data->evt_urb, err);
428 } else {
429 err = usb_submit_urb(data->rx_urb, GFP_ATOMIC);
430 if (err < 0) {
431 BT_ERR("%s submit failed for rx urb %p with error %d",
432 data->hdev->name, data->evt_urb, err);
433 usb_kill_urb(data->evt_urb);
434 }
435 }
436 330
437 write_unlock_irqrestore(&data->lock, flags); 331error:
332 usb_kill_anchored_urbs(&data->rx_anchor);
438 333
439done: 334 clear_bit(HCI_RUNNING, &hdev->flags);
440 if (err < 0)
441 clear_bit(HCI_RUNNING, &hdev->flags);
442 335
443 return err; 336 return err;
444} 337}
@@ -446,27 +339,13 @@ done:
446static int bpa10x_close(struct hci_dev *hdev) 339static int bpa10x_close(struct hci_dev *hdev)
447{ 340{
448 struct bpa10x_data *data = hdev->driver_data; 341 struct bpa10x_data *data = hdev->driver_data;
449 unsigned long flags;
450 342
451 BT_DBG("hdev %p data %p", hdev, data); 343 BT_DBG("%s", hdev->name);
452 344
453 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 345 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
454 return 0; 346 return 0;
455 347
456 write_lock_irqsave(&data->lock, flags); 348 usb_kill_anchored_urbs(&data->rx_anchor);
457
458 skb_queue_purge(&data->cmd_queue);
459 usb_kill_urb(data->cmd_urb);
460 usb_kill_urb(data->evt_urb);
461 usb_kill_urb(data->rx_urb);
462 usb_kill_urb(data->tx_urb);
463
464 write_unlock_irqrestore(&data->lock, flags);
465
466 bpa10x_free_urb(data->cmd_urb);
467 bpa10x_free_urb(data->evt_urb);
468 bpa10x_free_urb(data->rx_urb);
469 bpa10x_free_urb(data->tx_urb);
470 349
471 return 0; 350 return 0;
472} 351}
@@ -475,9 +354,9 @@ static int bpa10x_flush(struct hci_dev *hdev)
475{ 354{
476 struct bpa10x_data *data = hdev->driver_data; 355 struct bpa10x_data *data = hdev->driver_data;
477 356
478 BT_DBG("hdev %p data %p", hdev, data); 357 BT_DBG("%s", hdev->name);
479 358
480 skb_queue_purge(&data->cmd_queue); 359 usb_kill_anchored_urbs(&data->tx_anchor);
481 360
482 return 0; 361 return 0;
483} 362}
@@ -485,45 +364,78 @@ static int bpa10x_flush(struct hci_dev *hdev)
485static int bpa10x_send_frame(struct sk_buff *skb) 364static int bpa10x_send_frame(struct sk_buff *skb)
486{ 365{
487 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 366 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
488 struct bpa10x_data *data; 367 struct bpa10x_data *data = hdev->driver_data;
489 368 struct usb_ctrlrequest *dr;
490 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); 369 struct urb *urb;
370 unsigned int pipe;
371 int err;
491 372
492 if (!hdev) { 373 BT_DBG("%s", hdev->name);
493 BT_ERR("Frame for unknown HCI device");
494 return -ENODEV;
495 }
496 374
497 if (!test_bit(HCI_RUNNING, &hdev->flags)) 375 if (!test_bit(HCI_RUNNING, &hdev->flags))
498 return -EBUSY; 376 return -EBUSY;
499 377
500 data = hdev->driver_data; 378 urb = usb_alloc_urb(0, GFP_ATOMIC);
379 if (!urb)
380 return -ENOMEM;
501 381
502 /* Prepend skb with frame type */ 382 /* Prepend skb with frame type */
503 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 383 *skb_push(skb, 1) = bt_cb(skb)->pkt_type;
504 384
505 switch (bt_cb(skb)->pkt_type) { 385 switch (bt_cb(skb)->pkt_type) {
506 case HCI_COMMAND_PKT: 386 case HCI_COMMAND_PKT:
387 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
388 if (!dr) {
389 usb_free_urb(urb);
390 return -ENOMEM;
391 }
392
393 dr->bRequestType = USB_TYPE_VENDOR;
394 dr->bRequest = 0;
395 dr->wIndex = 0;
396 dr->wValue = 0;
397 dr->wLength = __cpu_to_le16(skb->len);
398
399 pipe = usb_sndctrlpipe(data->udev, 0x00);
400
401 usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
402 skb->data, skb->len, bpa10x_tx_complete, skb);
403
507 hdev->stat.cmd_tx++; 404 hdev->stat.cmd_tx++;
508 skb_queue_tail(&data->cmd_queue, skb);
509 break; 405 break;
510 406
511 case HCI_ACLDATA_PKT: 407 case HCI_ACLDATA_PKT:
408 pipe = usb_sndbulkpipe(data->udev, 0x02);
409
410 usb_fill_bulk_urb(urb, data->udev, pipe,
411 skb->data, skb->len, bpa10x_tx_complete, skb);
412
512 hdev->stat.acl_tx++; 413 hdev->stat.acl_tx++;
513 skb_queue_tail(&data->tx_queue, skb);
514 break; 414 break;
515 415
516 case HCI_SCODATA_PKT: 416 case HCI_SCODATA_PKT:
417 pipe = usb_sndbulkpipe(data->udev, 0x02);
418
419 usb_fill_bulk_urb(urb, data->udev, pipe,
420 skb->data, skb->len, bpa10x_tx_complete, skb);
421
517 hdev->stat.sco_tx++; 422 hdev->stat.sco_tx++;
518 skb_queue_tail(&data->tx_queue, skb);
519 break; 423 break;
520 };
521 424
522 read_lock(&data->lock); 425 default:
426 return -EILSEQ;
427 }
428
429 usb_anchor_urb(urb, &data->tx_anchor);
523 430
524 bpa10x_wakeup(data); 431 err = usb_submit_urb(urb, GFP_ATOMIC);
432 if (err < 0) {
433 BT_ERR("%s urb %p submission failed", hdev->name, urb);
434 kfree(urb->setup_packet);
435 usb_unanchor_urb(urb);
436 }
525 437
526 read_unlock(&data->lock); 438 usb_free_urb(urb);
527 439
528 return 0; 440 return 0;
529} 441}
@@ -532,16 +444,17 @@ static void bpa10x_destruct(struct hci_dev *hdev)
532{ 444{
533 struct bpa10x_data *data = hdev->driver_data; 445 struct bpa10x_data *data = hdev->driver_data;
534 446
535 BT_DBG("hdev %p data %p", hdev, data); 447 BT_DBG("%s", hdev->name);
536 448
449 kfree(data->rx_skb[0]);
450 kfree(data->rx_skb[1]);
537 kfree(data); 451 kfree(data);
538} 452}
539 453
540static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) 454static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
541{ 455{
542 struct usb_device *udev = interface_to_usbdev(intf);
543 struct hci_dev *hdev;
544 struct bpa10x_data *data; 456 struct bpa10x_data *data;
457 struct hci_dev *hdev;
545 int err; 458 int err;
546 459
547 BT_DBG("intf %p id %p", intf, id); 460 BT_DBG("intf %p id %p", intf, id);
@@ -549,48 +462,43 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
549 if (ignore) 462 if (ignore)
550 return -ENODEV; 463 return -ENODEV;
551 464
552 if (intf->cur_altsetting->desc.bInterfaceNumber > 0) 465 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
553 return -ENODEV; 466 return -ENODEV;
554 467
555 data = kzalloc(sizeof(*data), GFP_KERNEL); 468 data = kzalloc(sizeof(*data), GFP_KERNEL);
556 if (!data) { 469 if (!data)
557 BT_ERR("Can't allocate data structure");
558 return -ENOMEM; 470 return -ENOMEM;
559 }
560
561 data->udev = udev;
562 471
563 rwlock_init(&data->lock); 472 data->udev = interface_to_usbdev(intf);
564 473
565 skb_queue_head_init(&data->cmd_queue); 474 init_usb_anchor(&data->tx_anchor);
566 skb_queue_head_init(&data->tx_queue); 475 init_usb_anchor(&data->rx_anchor);
567 476
568 hdev = hci_alloc_dev(); 477 hdev = hci_alloc_dev();
569 if (!hdev) { 478 if (!hdev) {
570 BT_ERR("Can't allocate HCI device");
571 kfree(data); 479 kfree(data);
572 return -ENOMEM; 480 return -ENOMEM;
573 } 481 }
574 482
575 data->hdev = hdev;
576
577 hdev->type = HCI_USB; 483 hdev->type = HCI_USB;
578 hdev->driver_data = data; 484 hdev->driver_data = data;
485
486 data->hdev = hdev;
487
579 SET_HCIDEV_DEV(hdev, &intf->dev); 488 SET_HCIDEV_DEV(hdev, &intf->dev);
580 489
581 hdev->open = bpa10x_open; 490 hdev->open = bpa10x_open;
582 hdev->close = bpa10x_close; 491 hdev->close = bpa10x_close;
583 hdev->flush = bpa10x_flush; 492 hdev->flush = bpa10x_flush;
584 hdev->send = bpa10x_send_frame; 493 hdev->send = bpa10x_send_frame;
585 hdev->destruct = bpa10x_destruct; 494 hdev->destruct = bpa10x_destruct;
586 495
587 hdev->owner = THIS_MODULE; 496 hdev->owner = THIS_MODULE;
588 497
589 err = hci_register_dev(hdev); 498 err = hci_register_dev(hdev);
590 if (err < 0) { 499 if (err < 0) {
591 BT_ERR("Can't register HCI device");
592 kfree(data);
593 hci_free_dev(hdev); 500 hci_free_dev(hdev);
501 kfree(data);
594 return err; 502 return err;
595 } 503 }
596 504
@@ -602,19 +510,17 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
602static void bpa10x_disconnect(struct usb_interface *intf) 510static void bpa10x_disconnect(struct usb_interface *intf)
603{ 511{
604 struct bpa10x_data *data = usb_get_intfdata(intf); 512 struct bpa10x_data *data = usb_get_intfdata(intf);
605 struct hci_dev *hdev = data->hdev;
606 513
607 BT_DBG("intf %p", intf); 514 BT_DBG("intf %p", intf);
608 515
609 if (!hdev) 516 if (!data)
610 return; 517 return;
611 518
612 usb_set_intfdata(intf, NULL); 519 usb_set_intfdata(intf, NULL);
613 520
614 if (hci_unregister_dev(hdev) < 0) 521 hci_unregister_dev(data->hdev);
615 BT_ERR("Can't unregister HCI device %s", hdev->name);
616 522
617 hci_free_dev(hdev); 523 hci_free_dev(data->hdev);
618} 524}
619 525
620static struct usb_driver bpa10x_driver = { 526static struct usb_driver bpa10x_driver = {
@@ -626,15 +532,9 @@ static struct usb_driver bpa10x_driver = {
626 532
627static int __init bpa10x_init(void) 533static int __init bpa10x_init(void)
628{ 534{
629 int err;
630
631 BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION); 535 BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION);
632 536
633 err = usb_register(&bpa10x_driver); 537 return usb_register(&bpa10x_driver);
634 if (err < 0)
635 BT_ERR("Failed to register USB driver");
636
637 return err;
638} 538}
639 539
640static void __exit bpa10x_exit(void) 540static void __exit bpa10x_exit(void)
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 39516074636b..a18f9b8c9e12 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -344,10 +344,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
344 unsigned int iobase; 344 unsigned int iobase;
345 int iir; 345 int iir;
346 346
347 if (!info || !info->hdev) { 347 BUG_ON(!info->hdev);
348 BT_ERR("Call of irq %d for unknown device", irq);
349 return IRQ_NONE;
350 }
351 348
352 iobase = info->p_dev->io.BasePort1; 349 iobase = info->p_dev->io.BasePort1;
353 350
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
new file mode 100644
index 000000000000..b786f6187902
--- /dev/null
+++ b/drivers/bluetooth/btsdio.c
@@ -0,0 +1,406 @@
1/*
2 *
3 * Generic Bluetooth SDIO driver
4 *
5 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
6 * Copyright (C) 2007 Marcel Holtmann <marcel@holtmann.org>
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/skbuff.h>
33
34#include <linux/mmc/sdio_ids.h>
35#include <linux/mmc/sdio_func.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39
40#ifndef CONFIG_BT_HCIBTSDIO_DEBUG
41#undef BT_DBG
42#define BT_DBG(D...)
43#endif
44
45#define VERSION "0.1"
46
47static const struct sdio_device_id btsdio_table[] = {
48 /* Generic Bluetooth Type-A SDIO device */
49 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) },
50
51 /* Generic Bluetooth Type-B SDIO device */
52 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
53
54 { } /* Terminating entry */
55};
56
57MODULE_DEVICE_TABLE(sdio, btsdio_table);
58
59struct btsdio_data {
60 struct hci_dev *hdev;
61 struct sdio_func *func;
62
63 struct work_struct work;
64
65 struct sk_buff_head txq;
66};
67
68#define REG_RDAT 0x00 /* Receiver Data */
69#define REG_TDAT 0x00 /* Transmitter Data */
70#define REG_PC_RRT 0x10 /* Read Packet Control */
71#define REG_PC_WRT 0x11 /* Write Packet Control */
72#define REG_RTC_STAT 0x12 /* Retry Control Status */
73#define REG_RTC_SET 0x12 /* Retry Control Set */
74#define REG_INTRD 0x13 /* Interrupt Indication */
75#define REG_CL_INTRD 0x13 /* Interrupt Clear */
76#define REG_EN_INTRD 0x14 /* Interrupt Enable */
77#define REG_MD_STAT 0x20 /* Bluetooth Mode Status */
78
79static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb)
80{
81 int err;
82
83 BT_DBG("%s", data->hdev->name);
84
85 /* Prepend Type-A header */
86 skb_push(skb, 4);
87 skb->data[0] = (skb->len & 0x0000ff);
88 skb->data[1] = (skb->len & 0x00ff00) >> 8;
89 skb->data[2] = (skb->len & 0xff0000) >> 16;
90 skb->data[3] = bt_cb(skb)->pkt_type;
91
92 err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len);
93 if (err < 0) {
94 sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL);
95 return err;
96 }
97
98 data->hdev->stat.byte_tx += skb->len;
99
100 kfree_skb(skb);
101
102 return 0;
103}
104
105static void btsdio_work(struct work_struct *work)
106{
107 struct btsdio_data *data = container_of(work, struct btsdio_data, work);
108 struct sk_buff *skb;
109 int err;
110
111 BT_DBG("%s", data->hdev->name);
112
113 sdio_claim_host(data->func);
114
115 while ((skb = skb_dequeue(&data->txq))) {
116 err = btsdio_tx_packet(data, skb);
117 if (err < 0) {
118 data->hdev->stat.err_tx++;
119 skb_queue_head(&data->txq, skb);
120 break;
121 }
122 }
123
124 sdio_release_host(data->func);
125}
126
127static int btsdio_rx_packet(struct btsdio_data *data)
128{
129 u8 hdr[4] __attribute__ ((aligned(4)));
130 struct sk_buff *skb;
131 int err, len;
132
133 BT_DBG("%s", data->hdev->name);
134
135 err = sdio_readsb(data->func, hdr, REG_RDAT, 4);
136 if (err < 0)
137 return err;
138
139 len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16);
140 if (len < 4 || len > 65543)
141 return -EILSEQ;
142
143 skb = bt_skb_alloc(len - 4, GFP_KERNEL);
144 if (!skb) {
145 /* Out of memory. Prepare a read retry and just
146 * return with the expectation that the next time
147 * we're called we'll have more memory. */
148 return -ENOMEM;
149 }
150
151 skb_put(skb, len - 4);
152
153 err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4);
154 if (err < 0) {
155 kfree(skb);
156 return err;
157 }
158
159 data->hdev->stat.byte_rx += len;
160
161 skb->dev = (void *) data->hdev;
162 bt_cb(skb)->pkt_type = hdr[3];
163
164 err = hci_recv_frame(skb);
165 if (err < 0) {
166 kfree(skb);
167 return err;
168 }
169
170 sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
171
172 return 0;
173}
174
175static void btsdio_interrupt(struct sdio_func *func)
176{
177 struct btsdio_data *data = sdio_get_drvdata(func);
178 int intrd;
179
180 BT_DBG("%s", data->hdev->name);
181
182 intrd = sdio_readb(func, REG_INTRD, NULL);
183 if (intrd & 0x01) {
184 sdio_writeb(func, 0x01, REG_CL_INTRD, NULL);
185
186 if (btsdio_rx_packet(data) < 0) {
187 data->hdev->stat.err_rx++;
188 sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL);
189 }
190 }
191}
192
193static int btsdio_open(struct hci_dev *hdev)
194{
195 struct btsdio_data *data = hdev->driver_data;
196 int err;
197
198 BT_DBG("%s", hdev->name);
199
200 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
201 return 0;
202
203 sdio_claim_host(data->func);
204
205 err = sdio_enable_func(data->func);
206 if (err < 0) {
207 clear_bit(HCI_RUNNING, &hdev->flags);
208 goto release;
209 }
210
211 err = sdio_claim_irq(data->func, btsdio_interrupt);
212 if (err < 0) {
213 sdio_disable_func(data->func);
214 clear_bit(HCI_RUNNING, &hdev->flags);
215 goto release;
216 }
217
218 if (data->func->class == SDIO_CLASS_BT_B)
219 sdio_writeb(data->func, 0x00, REG_MD_STAT, NULL);
220
221 sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL);
222
223release:
224 sdio_release_host(data->func);
225
226 return err;
227}
228
229static int btsdio_close(struct hci_dev *hdev)
230{
231 struct btsdio_data *data = hdev->driver_data;
232
233 BT_DBG("%s", hdev->name);
234
235 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
236 return 0;
237
238 sdio_claim_host(data->func);
239
240 sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL);
241
242 sdio_release_irq(data->func);
243 sdio_disable_func(data->func);
244
245 sdio_release_host(data->func);
246
247 return 0;
248}
249
250static int btsdio_flush(struct hci_dev *hdev)
251{
252 struct btsdio_data *data = hdev->driver_data;
253
254 BT_DBG("%s", hdev->name);
255
256 skb_queue_purge(&data->txq);
257
258 return 0;
259}
260
261static int btsdio_send_frame(struct sk_buff *skb)
262{
263 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
264 struct btsdio_data *data = hdev->driver_data;
265
266 BT_DBG("%s", hdev->name);
267
268 if (!test_bit(HCI_RUNNING, &hdev->flags))
269 return -EBUSY;
270
271 switch (bt_cb(skb)->pkt_type) {
272 case HCI_COMMAND_PKT:
273 hdev->stat.cmd_tx++;
274 break;
275
276 case HCI_ACLDATA_PKT:
277 hdev->stat.acl_tx++;
278 break;
279
280 case HCI_SCODATA_PKT:
281 hdev->stat.sco_tx++;
282 break;
283
284 default:
285 return -EILSEQ;
286 }
287
288 skb_queue_tail(&data->txq, skb);
289
290 schedule_work(&data->work);
291
292 return 0;
293}
294
295static void btsdio_destruct(struct hci_dev *hdev)
296{
297 struct btsdio_data *data = hdev->driver_data;
298
299 BT_DBG("%s", hdev->name);
300
301 kfree(data);
302}
303
304static int btsdio_probe(struct sdio_func *func,
305 const struct sdio_device_id *id)
306{
307 struct btsdio_data *data;
308 struct hci_dev *hdev;
309 struct sdio_func_tuple *tuple = func->tuples;
310 int err;
311
312 BT_DBG("func %p id %p class 0x%04x", func, id, func->class);
313
314 while (tuple) {
315 BT_DBG("code 0x%x size %d", tuple->code, tuple->size);
316 tuple = tuple->next;
317 }
318
319 data = kzalloc(sizeof(*data), GFP_KERNEL);
320 if (!data)
321 return -ENOMEM;
322
323 data->func = func;
324
325 INIT_WORK(&data->work, btsdio_work);
326
327 skb_queue_head_init(&data->txq);
328
329 hdev = hci_alloc_dev();
330 if (!hdev) {
331 kfree(data);
332 return -ENOMEM;
333 }
334
335 hdev->type = HCI_SDIO;
336 hdev->driver_data = data;
337
338 data->hdev = hdev;
339
340 SET_HCIDEV_DEV(hdev, &func->dev);
341
342 hdev->open = btsdio_open;
343 hdev->close = btsdio_close;
344 hdev->flush = btsdio_flush;
345 hdev->send = btsdio_send_frame;
346 hdev->destruct = btsdio_destruct;
347
348 hdev->owner = THIS_MODULE;
349
350 err = hci_register_dev(hdev);
351 if (err < 0) {
352 hci_free_dev(hdev);
353 kfree(data);
354 return err;
355 }
356
357 sdio_set_drvdata(func, data);
358
359 return 0;
360}
361
362static void btsdio_remove(struct sdio_func *func)
363{
364 struct btsdio_data *data = sdio_get_drvdata(func);
365 struct hci_dev *hdev;
366
367 BT_DBG("func %p", func);
368
369 if (!data)
370 return;
371
372 hdev = data->hdev;
373
374 sdio_set_drvdata(func, NULL);
375
376 hci_unregister_dev(hdev);
377
378 hci_free_dev(hdev);
379}
380
381static struct sdio_driver btsdio_driver = {
382 .name = "btsdio",
383 .probe = btsdio_probe,
384 .remove = btsdio_remove,
385 .id_table = btsdio_table,
386};
387
388static int __init btsdio_init(void)
389{
390 BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION);
391
392 return sdio_register_driver(&btsdio_driver);
393}
394
395static void __exit btsdio_exit(void)
396{
397 sdio_unregister_driver(&btsdio_driver);
398}
399
400module_init(btsdio_init);
401module_exit(btsdio_exit);
402
403MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
404MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION);
405MODULE_VERSION(VERSION);
406MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index d7d2ea0d86a1..08f48d577aba 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -294,10 +294,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
294 int boguscount = 0; 294 int boguscount = 0;
295 int iir, lsr; 295 int iir, lsr;
296 296
297 if (!info || !info->hdev) { 297 BUG_ON(!info->hdev);
298 BT_ERR("Call of irq %d for unknown device", irq);
299 return IRQ_NONE;
300 }
301 298
302 iobase = info->p_dev->io.BasePort1; 299 iobase = info->p_dev->io.BasePort1;
303 300
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
new file mode 100644
index 000000000000..12e108914f19
--- /dev/null
+++ b/drivers/bluetooth/btusb.c
@@ -0,0 +1,564 @@
1/*
2 *
3 * Generic Bluetooth USB driver
4 *
5 * Copyright (C) 2005-2007 Marcel Holtmann <marcel@holtmann.org>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/skbuff.h>
32
33#include <linux/usb.h>
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
38//#define CONFIG_BT_HCIBTUSB_DEBUG
39#ifndef CONFIG_BT_HCIBTUSB_DEBUG
40#undef BT_DBG
41#define BT_DBG(D...)
42#endif
43
44#define VERSION "0.1"
45
46static struct usb_device_id btusb_table[] = {
47 /* Generic Bluetooth USB device */
48 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
49
50 { } /* Terminating entry */
51};
52
53MODULE_DEVICE_TABLE(usb, btusb_table);
54
55static struct usb_device_id blacklist_table[] = {
56 { } /* Terminating entry */
57};
58
59#define BTUSB_INTR_RUNNING 0
60#define BTUSB_BULK_RUNNING 1
61
62struct btusb_data {
63 struct hci_dev *hdev;
64 struct usb_device *udev;
65
66 spinlock_t lock;
67
68 unsigned long flags;
69
70 struct work_struct work;
71
72 struct usb_anchor tx_anchor;
73 struct usb_anchor intr_anchor;
74 struct usb_anchor bulk_anchor;
75
76 struct usb_endpoint_descriptor *intr_ep;
77 struct usb_endpoint_descriptor *bulk_tx_ep;
78 struct usb_endpoint_descriptor *bulk_rx_ep;
79};
80
81static void btusb_intr_complete(struct urb *urb)
82{
83 struct hci_dev *hdev = urb->context;
84 struct btusb_data *data = hdev->driver_data;
85 int err;
86
87 BT_DBG("%s urb %p status %d count %d", hdev->name,
88 urb, urb->status, urb->actual_length);
89
90 if (!test_bit(HCI_RUNNING, &hdev->flags))
91 return;
92
93 if (urb->status == 0) {
94 if (hci_recv_fragment(hdev, HCI_EVENT_PKT,
95 urb->transfer_buffer,
96 urb->actual_length) < 0) {
97 BT_ERR("%s corrupted event packet", hdev->name);
98 hdev->stat.err_rx++;
99 }
100 }
101
102 if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
103 return;
104
105 usb_anchor_urb(urb, &data->intr_anchor);
106
107 err = usb_submit_urb(urb, GFP_ATOMIC);
108 if (err < 0) {
109 BT_ERR("%s urb %p failed to resubmit (%d)",
110 hdev->name, urb, -err);
111 usb_unanchor_urb(urb);
112 }
113}
114
115static inline int btusb_submit_intr_urb(struct hci_dev *hdev)
116{
117 struct btusb_data *data = hdev->driver_data;
118 struct urb *urb;
119 unsigned char *buf;
120 unsigned int pipe;
121 int err, size;
122
123 BT_DBG("%s", hdev->name);
124
125 urb = usb_alloc_urb(0, GFP_ATOMIC);
126 if (!urb)
127 return -ENOMEM;
128
129 size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
130
131 buf = kmalloc(size, GFP_ATOMIC);
132 if (!buf) {
133 usb_free_urb(urb);
134 return -ENOMEM;
135 }
136
137 pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress);
138
139 usb_fill_int_urb(urb, data->udev, pipe, buf, size,
140 btusb_intr_complete, hdev,
141 data->intr_ep->bInterval);
142
143 urb->transfer_flags |= URB_FREE_BUFFER;
144
145 usb_anchor_urb(urb, &data->intr_anchor);
146
147 err = usb_submit_urb(urb, GFP_ATOMIC);
148 if (err < 0) {
149 BT_ERR("%s urb %p submission failed (%d)",
150 hdev->name, urb, -err);
151 usb_unanchor_urb(urb);
152 kfree(buf);
153 }
154
155 usb_free_urb(urb);
156
157 return err;
158}
159
160static void btusb_bulk_complete(struct urb *urb)
161{
162 struct hci_dev *hdev = urb->context;
163 struct btusb_data *data = hdev->driver_data;
164 int err;
165
166 BT_DBG("%s urb %p status %d count %d", hdev->name,
167 urb, urb->status, urb->actual_length);
168
169 if (!test_bit(HCI_RUNNING, &hdev->flags))
170 return;
171
172 if (urb->status == 0) {
173 if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT,
174 urb->transfer_buffer,
175 urb->actual_length) < 0) {
176 BT_ERR("%s corrupted ACL packet", hdev->name);
177 hdev->stat.err_rx++;
178 }
179 }
180
181 if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
182 return;
183
184 usb_anchor_urb(urb, &data->bulk_anchor);
185
186 err = usb_submit_urb(urb, GFP_ATOMIC);
187 if (err < 0) {
188 BT_ERR("%s urb %p failed to resubmit (%d)",
189 hdev->name, urb, -err);
190 usb_unanchor_urb(urb);
191 }
192}
193
194static inline int btusb_submit_bulk_urb(struct hci_dev *hdev)
195{
196 struct btusb_data *data = hdev->driver_data;
197 struct urb *urb;
198 unsigned char *buf;
199 unsigned int pipe;
200 int err, size;
201
202 BT_DBG("%s", hdev->name);
203
204 urb = usb_alloc_urb(0, GFP_KERNEL);
205 if (!urb)
206 return -ENOMEM;
207
208 size = le16_to_cpu(data->bulk_rx_ep->wMaxPacketSize);
209
210 buf = kmalloc(size, GFP_KERNEL);
211 if (!buf) {
212 usb_free_urb(urb);
213 return -ENOMEM;
214 }
215
216 pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress);
217
218 usb_fill_bulk_urb(urb, data->udev, pipe,
219 buf, size, btusb_bulk_complete, hdev);
220
221 urb->transfer_flags |= URB_FREE_BUFFER;
222
223 usb_anchor_urb(urb, &data->bulk_anchor);
224
225 err = usb_submit_urb(urb, GFP_KERNEL);
226 if (err < 0) {
227 BT_ERR("%s urb %p submission failed (%d)",
228 hdev->name, urb, -err);
229 usb_unanchor_urb(urb);
230 kfree(buf);
231 }
232
233 usb_free_urb(urb);
234
235 return err;
236}
237
238static void btusb_tx_complete(struct urb *urb)
239{
240 struct sk_buff *skb = urb->context;
241 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
242
243 BT_DBG("%s urb %p status %d count %d", hdev->name,
244 urb, urb->status, urb->actual_length);
245
246 if (!test_bit(HCI_RUNNING, &hdev->flags))
247 goto done;
248
249 if (!urb->status)
250 hdev->stat.byte_tx += urb->transfer_buffer_length;
251 else
252 hdev->stat.err_tx++;
253
254done:
255 kfree(urb->setup_packet);
256
257 kfree_skb(skb);
258}
259
260static int btusb_open(struct hci_dev *hdev)
261{
262 struct btusb_data *data = hdev->driver_data;
263 int err;
264
265 BT_DBG("%s", hdev->name);
266
267 if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
268 return 0;
269
270 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
271 return 0;
272
273 err = btusb_submit_intr_urb(hdev);
274 if (err < 0) {
275 clear_bit(BTUSB_INTR_RUNNING, &hdev->flags);
276 clear_bit(HCI_RUNNING, &hdev->flags);
277 }
278
279 return err;
280}
281
282static int btusb_close(struct hci_dev *hdev)
283{
284 struct btusb_data *data = hdev->driver_data;
285
286 BT_DBG("%s", hdev->name);
287
288 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
289 return 0;
290
291 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
292 usb_kill_anchored_urbs(&data->bulk_anchor);
293
294 clear_bit(BTUSB_INTR_RUNNING, &data->flags);
295 usb_kill_anchored_urbs(&data->intr_anchor);
296
297 return 0;
298}
299
300static int btusb_flush(struct hci_dev *hdev)
301{
302 struct btusb_data *data = hdev->driver_data;
303
304 BT_DBG("%s", hdev->name);
305
306 usb_kill_anchored_urbs(&data->tx_anchor);
307
308 return 0;
309}
310
311static int btusb_send_frame(struct sk_buff *skb)
312{
313 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
314 struct btusb_data *data = hdev->driver_data;
315 struct usb_ctrlrequest *dr;
316 struct urb *urb;
317 unsigned int pipe;
318 int err;
319
320 BT_DBG("%s", hdev->name);
321
322 if (!test_bit(HCI_RUNNING, &hdev->flags))
323 return -EBUSY;
324
325 switch (bt_cb(skb)->pkt_type) {
326 case HCI_COMMAND_PKT:
327 urb = usb_alloc_urb(0, GFP_ATOMIC);
328 if (!urb)
329 return -ENOMEM;
330
331 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
332 if (!dr) {
333 usb_free_urb(urb);
334 return -ENOMEM;
335 }
336
337 dr->bRequestType = USB_TYPE_CLASS;
338 dr->bRequest = 0;
339 dr->wIndex = 0;
340 dr->wValue = 0;
341 dr->wLength = __cpu_to_le16(skb->len);
342
343 pipe = usb_sndctrlpipe(data->udev, 0x00);
344
345 usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
346 skb->data, skb->len, btusb_tx_complete, skb);
347
348 hdev->stat.cmd_tx++;
349 break;
350
351 case HCI_ACLDATA_PKT:
352 urb = usb_alloc_urb(0, GFP_ATOMIC);
353 if (!urb)
354 return -ENOMEM;
355
356 pipe = usb_sndbulkpipe(data->udev,
357 data->bulk_tx_ep->bEndpointAddress);
358
359 usb_fill_bulk_urb(urb, data->udev, pipe,
360 skb->data, skb->len, btusb_tx_complete, skb);
361
362 hdev->stat.acl_tx++;
363 break;
364
365 case HCI_SCODATA_PKT:
366 hdev->stat.sco_tx++;
367 kfree_skb(skb);
368 return 0;
369
370 default:
371 return -EILSEQ;
372 }
373
374 usb_anchor_urb(urb, &data->tx_anchor);
375
376 err = usb_submit_urb(urb, GFP_ATOMIC);
377 if (err < 0) {
378 BT_ERR("%s urb %p submission failed", hdev->name, urb);
379 kfree(urb->setup_packet);
380 usb_unanchor_urb(urb);
381 }
382
383 usb_free_urb(urb);
384
385 return err;
386}
387
388static void btusb_destruct(struct hci_dev *hdev)
389{
390 struct btusb_data *data = hdev->driver_data;
391
392 BT_DBG("%s", hdev->name);
393
394 kfree(data);
395}
396
397static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
398{
399 struct btusb_data *data = hdev->driver_data;
400
401 BT_DBG("%s evt %d", hdev->name, evt);
402
403 if (evt == HCI_NOTIFY_CONN_ADD || evt == HCI_NOTIFY_CONN_DEL)
404 schedule_work(&data->work);
405}
406
407static void btusb_work(struct work_struct *work)
408{
409 struct btusb_data *data = container_of(work, struct btusb_data, work);
410 struct hci_dev *hdev = data->hdev;
411
412 if (hdev->conn_hash.acl_num == 0) {
413 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
414 usb_kill_anchored_urbs(&data->bulk_anchor);
415 return;
416 }
417
418 if (!test_and_set_bit(BTUSB_BULK_RUNNING, &data->flags)) {
419 if (btusb_submit_bulk_urb(hdev) < 0)
420 clear_bit(BTUSB_BULK_RUNNING, &data->flags);
421 else
422 btusb_submit_bulk_urb(hdev);
423 }
424}
425
426static int btusb_probe(struct usb_interface *intf,
427 const struct usb_device_id *id)
428{
429 struct usb_endpoint_descriptor *ep_desc;
430 struct btusb_data *data;
431 struct hci_dev *hdev;
432 int i, err;
433
434 BT_DBG("intf %p id %p", intf, id);
435
436 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
437 return -ENODEV;
438
439 if (!id->driver_info) {
440 const struct usb_device_id *match;
441 match = usb_match_id(intf, blacklist_table);
442 if (match)
443 id = match;
444 }
445
446 data = kzalloc(sizeof(*data), GFP_KERNEL);
447 if (!data)
448 return -ENOMEM;
449
450 for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
451 ep_desc = &intf->cur_altsetting->endpoint[i].desc;
452
453 if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
454 data->intr_ep = ep_desc;
455 continue;
456 }
457
458 if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
459 data->bulk_tx_ep = ep_desc;
460 continue;
461 }
462
463 if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
464 data->bulk_rx_ep = ep_desc;
465 continue;
466 }
467 }
468
469 if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
470 kfree(data);
471 return -ENODEV;
472 }
473
474 data->udev = interface_to_usbdev(intf);
475
476 spin_lock_init(&data->lock);
477
478 INIT_WORK(&data->work, btusb_work);
479
480 init_usb_anchor(&data->tx_anchor);
481 init_usb_anchor(&data->intr_anchor);
482 init_usb_anchor(&data->bulk_anchor);
483
484 hdev = hci_alloc_dev();
485 if (!hdev) {
486 kfree(data);
487 return -ENOMEM;
488 }
489
490 hdev->type = HCI_USB;
491 hdev->driver_data = data;
492
493 data->hdev = hdev;
494
495 SET_HCIDEV_DEV(hdev, &intf->dev);
496
497 hdev->open = btusb_open;
498 hdev->close = btusb_close;
499 hdev->flush = btusb_flush;
500 hdev->send = btusb_send_frame;
501 hdev->destruct = btusb_destruct;
502 hdev->notify = btusb_notify;
503
504 hdev->owner = THIS_MODULE;
505
506 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
507
508 err = hci_register_dev(hdev);
509 if (err < 0) {
510 hci_free_dev(hdev);
511 kfree(data);
512 return err;
513 }
514
515 usb_set_intfdata(intf, data);
516
517 return 0;
518}
519
520static void btusb_disconnect(struct usb_interface *intf)
521{
522 struct btusb_data *data = usb_get_intfdata(intf);
523 struct hci_dev *hdev;
524
525 BT_DBG("intf %p", intf);
526
527 if (!data)
528 return;
529
530 hdev = data->hdev;
531
532 usb_set_intfdata(intf, NULL);
533
534 hci_unregister_dev(hdev);
535
536 hci_free_dev(hdev);
537}
538
539static struct usb_driver btusb_driver = {
540 .name = "btusb",
541 .probe = btusb_probe,
542 .disconnect = btusb_disconnect,
543 .id_table = btusb_table,
544};
545
546static int __init btusb_init(void)
547{
548 BT_INFO("Generic Bluetooth USB driver ver %s", VERSION);
549
550 return usb_register(&btusb_driver);
551}
552
553static void __exit btusb_exit(void)
554{
555 usb_deregister(&btusb_driver);
556}
557
558module_init(btusb_init);
559module_exit(btusb_exit);
560
561MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
562MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION);
563MODULE_VERSION(VERSION);
564MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 7f9c54b9964a..dae45cdf02b2 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -298,10 +298,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
298 int boguscount = 0; 298 int boguscount = 0;
299 int iir, lsr; 299 int iir, lsr;
300 300
301 if (!info || !info->hdev) { 301 BUG_ON(!info->hdev);
302 BT_ERR("Call of irq %d for unknown device", irq);
303 return IRQ_NONE;
304 }
305 302
306 iobase = info->p_dev->io.BasePort1; 303 iobase = info->p_dev->io.BasePort1;
307 304
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d66064ccb31c..696f7528f022 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -237,7 +237,8 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
237 if (hciextn && chan == 5) { 237 if (hciextn && chan == 5) {
238 struct hci_command_hdr *hdr = (struct hci_command_hdr *) data; 238 struct hci_command_hdr *hdr = (struct hci_command_hdr *) data;
239 239
240 if (hci_opcode_ogf(__le16_to_cpu(hdr->opcode)) == OGF_VENDOR_CMD) { 240 /* Vendor specific commands */
241 if (hci_opcode_ogf(__le16_to_cpu(hdr->opcode)) == 0x3f) {
241 u8 desc = *(data + HCI_COMMAND_HDR_SIZE); 242 u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
242 if ((desc & 0xf0) == 0xc0) { 243 if ((desc & 0xf0) == 0xc0) {
243 data += HCI_COMMAND_HDR_SIZE + 1; 244 data += HCI_COMMAND_HDR_SIZE + 1;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 6055b9c0ac0f..e68821d074b0 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -549,7 +549,10 @@ static int __init hci_uart_init(void)
549#ifdef CONFIG_BT_HCIUART_BCSP 549#ifdef CONFIG_BT_HCIUART_BCSP
550 bcsp_init(); 550 bcsp_init();
551#endif 551#endif
552 552#ifdef CONFIG_BT_HCIUART_LL
553 ll_init();
554#endif
555
553 return 0; 556 return 0;
554} 557}
555 558
@@ -563,6 +566,9 @@ static void __exit hci_uart_exit(void)
563#ifdef CONFIG_BT_HCIUART_BCSP 566#ifdef CONFIG_BT_HCIUART_BCSP
564 bcsp_deinit(); 567 bcsp_deinit();
565#endif 568#endif
569#ifdef CONFIG_BT_HCIUART_LL
570 ll_deinit();
571#endif
566 572
567 /* Release tty registration of line discipline */ 573 /* Release tty registration of line discipline */
568 if ((err = tty_unregister_ldisc(N_HCI))) 574 if ((err = tty_unregister_ldisc(N_HCI)))
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
new file mode 100644
index 000000000000..8c3e62a17b4a
--- /dev/null
+++ b/drivers/bluetooth/hci_ll.c
@@ -0,0 +1,531 @@
1/*
2 * Texas Instruments' Bluetooth HCILL UART protocol
3 *
4 * HCILL (HCI Low Level) is a Texas Instruments' power management
5 * protocol extension to H4.
6 *
7 * Copyright (C) 2007 Texas Instruments, Inc.
8 *
9 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
10 *
11 * Acknowledgements:
12 * This file is based on hci_h4.c, which was written
13 * by Maxim Krasnyansky and Marcel Holtmann.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2
17 * as published by the Free Software Foundation
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32
33#include <linux/init.h>
34#include <linux/sched.h>
35#include <linux/types.h>
36#include <linux/fcntl.h>
37#include <linux/interrupt.h>
38#include <linux/ptrace.h>
39#include <linux/poll.h>
40
41#include <linux/slab.h>
42#include <linux/tty.h>
43#include <linux/errno.h>
44#include <linux/string.h>
45#include <linux/signal.h>
46#include <linux/ioctl.h>
47#include <linux/skbuff.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
51
52#include "hci_uart.h"
53
54/* HCILL commands */
55#define HCILL_GO_TO_SLEEP_IND 0x30
56#define HCILL_GO_TO_SLEEP_ACK 0x31
57#define HCILL_WAKE_UP_IND 0x32
58#define HCILL_WAKE_UP_ACK 0x33
59
60/* HCILL receiver States */
61#define HCILL_W4_PACKET_TYPE 0
62#define HCILL_W4_EVENT_HDR 1
63#define HCILL_W4_ACL_HDR 2
64#define HCILL_W4_SCO_HDR 3
65#define HCILL_W4_DATA 4
66
67/* HCILL states */
68enum hcill_states_e {
69 HCILL_ASLEEP,
70 HCILL_ASLEEP_TO_AWAKE,
71 HCILL_AWAKE,
72 HCILL_AWAKE_TO_ASLEEP
73};
74
75struct hcill_cmd {
76 u8 cmd;
77} __attribute__((packed));
78
79struct ll_struct {
80 unsigned long rx_state;
81 unsigned long rx_count;
82 struct sk_buff *rx_skb;
83 struct sk_buff_head txq;
84 spinlock_t hcill_lock; /* HCILL state lock */
85 unsigned long hcill_state; /* HCILL power state */
86 struct sk_buff_head tx_wait_q; /* HCILL wait queue */
87};
88
89/*
90 * Builds and sends an HCILL command packet.
91 * These are very simple packets with only 1 cmd byte
92 */
93static int send_hcill_cmd(u8 cmd, struct hci_uart *hu)
94{
95 int err = 0;
96 struct sk_buff *skb = NULL;
97 struct ll_struct *ll = hu->priv;
98 struct hcill_cmd *hcill_packet;
99
100 BT_DBG("hu %p cmd 0x%x", hu, cmd);
101
102 /* allocate packet */
103 skb = bt_skb_alloc(1, GFP_ATOMIC);
104 if (!skb) {
105 BT_ERR("cannot allocate memory for HCILL packet");
106 err = -ENOMEM;
107 goto out;
108 }
109
110 /* prepare packet */
111 hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
112 hcill_packet->cmd = cmd;
113 skb->dev = (void *) hu->hdev;
114
115 /* send packet */
116 skb_queue_tail(&ll->txq, skb);
117out:
118 return err;
119}
120
121/* Initialize protocol */
122static int ll_open(struct hci_uart *hu)
123{
124 struct ll_struct *ll;
125
126 BT_DBG("hu %p", hu);
127
128 ll = kzalloc(sizeof(*ll), GFP_ATOMIC);
129 if (!ll)
130 return -ENOMEM;
131
132 skb_queue_head_init(&ll->txq);
133 skb_queue_head_init(&ll->tx_wait_q);
134 spin_lock_init(&ll->hcill_lock);
135
136 ll->hcill_state = HCILL_AWAKE;
137
138 hu->priv = ll;
139
140 return 0;
141}
142
143/* Flush protocol data */
144static int ll_flush(struct hci_uart *hu)
145{
146 struct ll_struct *ll = hu->priv;
147
148 BT_DBG("hu %p", hu);
149
150 skb_queue_purge(&ll->tx_wait_q);
151 skb_queue_purge(&ll->txq);
152
153 return 0;
154}
155
156/* Close protocol */
157static int ll_close(struct hci_uart *hu)
158{
159 struct ll_struct *ll = hu->priv;
160
161 BT_DBG("hu %p", hu);
162
163 skb_queue_purge(&ll->tx_wait_q);
164 skb_queue_purge(&ll->txq);
165
166 if (ll->rx_skb)
167 kfree_skb(ll->rx_skb);
168
169 hu->priv = NULL;
170
171 kfree(ll);
172
173 return 0;
174}
175
176/*
177 * internal function, which does common work of the device wake up process:
178 * 1. places all pending packets (waiting in tx_wait_q list) in txq list.
179 * 2. changes internal state to HCILL_AWAKE.
180 * Note: assumes that hcill_lock spinlock is taken,
181 * shouldn't be called otherwise!
182 */
183static void __ll_do_awake(struct ll_struct *ll)
184{
185 struct sk_buff *skb = NULL;
186
187 while ((skb = skb_dequeue(&ll->tx_wait_q)))
188 skb_queue_tail(&ll->txq, skb);
189
190 ll->hcill_state = HCILL_AWAKE;
191}
192
193/*
194 * Called upon a wake-up-indication from the device
195 */
196static void ll_device_want_to_wakeup(struct hci_uart *hu)
197{
198 unsigned long flags;
199 struct ll_struct *ll = hu->priv;
200
201 BT_DBG("hu %p", hu);
202
203 /* lock hcill state */
204 spin_lock_irqsave(&ll->hcill_lock, flags);
205
206 switch (ll->hcill_state) {
207 case HCILL_ASLEEP:
208 /* acknowledge device wake up */
209 if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {
210 BT_ERR("cannot acknowledge device wake up");
211 goto out;
212 }
213 break;
214 case HCILL_ASLEEP_TO_AWAKE:
215 /*
216 * this state means that a wake-up-indication
217 * is already on its way to the device,
218 * and will serve as the required wake-up-ack
219 */
220 BT_DBG("dual wake-up-indication");
221 break;
222 default:
223 /* any other state are illegal */
224 BT_ERR("received HCILL_WAKE_UP_IND in state %ld", ll->hcill_state);
225 break;
226 }
227
228 /* send pending packets and change state to HCILL_AWAKE */
229 __ll_do_awake(ll);
230
231out:
232 spin_unlock_irqrestore(&ll->hcill_lock, flags);
233
234 /* actually send the packets */
235 hci_uart_tx_wakeup(hu);
236}
237
238/*
239 * Called upon a sleep-indication from the device
240 */
241static void ll_device_want_to_sleep(struct hci_uart *hu)
242{
243 unsigned long flags;
244 struct ll_struct *ll = hu->priv;
245
246 BT_DBG("hu %p", hu);
247
248 /* lock hcill state */
249 spin_lock_irqsave(&ll->hcill_lock, flags);
250
251 /* sanity check */
252 if (ll->hcill_state != HCILL_AWAKE)
253 BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld", ll->hcill_state);
254
255 /* acknowledge device sleep */
256 if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) {
257 BT_ERR("cannot acknowledge device sleep");
258 goto out;
259 }
260
261 /* update state */
262 ll->hcill_state = HCILL_ASLEEP;
263
264out:
265 spin_unlock_irqrestore(&ll->hcill_lock, flags);
266
267 /* actually send the sleep ack packet */
268 hci_uart_tx_wakeup(hu);
269}
270
271/*
272 * Called upon wake-up-acknowledgement from the device
273 */
274static void ll_device_woke_up(struct hci_uart *hu)
275{
276 unsigned long flags;
277 struct ll_struct *ll = hu->priv;
278
279 BT_DBG("hu %p", hu);
280
281 /* lock hcill state */
282 spin_lock_irqsave(&ll->hcill_lock, flags);
283
284 /* sanity check */
285 if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE)
286 BT_ERR("received HCILL_WAKE_UP_ACK in state %ld", ll->hcill_state);
287
288 /* send pending packets and change state to HCILL_AWAKE */
289 __ll_do_awake(ll);
290
291 spin_unlock_irqrestore(&ll->hcill_lock, flags);
292
293 /* actually send the packets */
294 hci_uart_tx_wakeup(hu);
295}
296
297/* Enqueue frame for transmittion (padding, crc, etc) */
298/* may be called from two simultaneous tasklets */
299static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
300{
301 unsigned long flags = 0;
302 struct ll_struct *ll = hu->priv;
303
304 BT_DBG("hu %p skb %p", hu, skb);
305
306 /* Prepend skb with frame type */
307 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
308
309 /* lock hcill state */
310 spin_lock_irqsave(&ll->hcill_lock, flags);
311
312 /* act according to current state */
313 switch (ll->hcill_state) {
314 case HCILL_AWAKE:
315 BT_DBG("device awake, sending normally");
316 skb_queue_tail(&ll->txq, skb);
317 break;
318 case HCILL_ASLEEP:
319 BT_DBG("device asleep, waking up and queueing packet");
320 /* save packet for later */
321 skb_queue_tail(&ll->tx_wait_q, skb);
322 /* awake device */
323 if (send_hcill_cmd(HCILL_WAKE_UP_IND, hu) < 0) {
324 BT_ERR("cannot wake up device");
325 break;
326 }
327 ll->hcill_state = HCILL_ASLEEP_TO_AWAKE;
328 break;
329 case HCILL_ASLEEP_TO_AWAKE:
330 BT_DBG("device waking up, queueing packet");
331 /* transient state; just keep packet for later */
332 skb_queue_tail(&ll->tx_wait_q, skb);
333 break;
334 default:
335 BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state);
336 kfree_skb(skb);
337 break;
338 }
339
340 spin_unlock_irqrestore(&ll->hcill_lock, flags);
341
342 return 0;
343}
344
345static inline int ll_check_data_len(struct ll_struct *ll, int len)
346{
347 register int room = skb_tailroom(ll->rx_skb);
348
349 BT_DBG("len %d room %d", len, room);
350
351 if (!len) {
352 hci_recv_frame(ll->rx_skb);
353 } else if (len > room) {
354 BT_ERR("Data length is too large");
355 kfree_skb(ll->rx_skb);
356 } else {
357 ll->rx_state = HCILL_W4_DATA;
358 ll->rx_count = len;
359 return len;
360 }
361
362 ll->rx_state = HCILL_W4_PACKET_TYPE;
363 ll->rx_skb = NULL;
364 ll->rx_count = 0;
365
366 return 0;
367}
368
369/* Recv data */
370static int ll_recv(struct hci_uart *hu, void *data, int count)
371{
372 struct ll_struct *ll = hu->priv;
373 register char *ptr;
374 struct hci_event_hdr *eh;
375 struct hci_acl_hdr *ah;
376 struct hci_sco_hdr *sh;
377 register int len, type, dlen;
378
379 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
380
381 ptr = data;
382 while (count) {
383 if (ll->rx_count) {
384 len = min_t(unsigned int, ll->rx_count, count);
385 memcpy(skb_put(ll->rx_skb, len), ptr, len);
386 ll->rx_count -= len; count -= len; ptr += len;
387
388 if (ll->rx_count)
389 continue;
390
391 switch (ll->rx_state) {
392 case HCILL_W4_DATA:
393 BT_DBG("Complete data");
394 hci_recv_frame(ll->rx_skb);
395
396 ll->rx_state = HCILL_W4_PACKET_TYPE;
397 ll->rx_skb = NULL;
398 continue;
399
400 case HCILL_W4_EVENT_HDR:
401 eh = (struct hci_event_hdr *) ll->rx_skb->data;
402
403 BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
404
405 ll_check_data_len(ll, eh->plen);
406 continue;
407
408 case HCILL_W4_ACL_HDR:
409 ah = (struct hci_acl_hdr *) ll->rx_skb->data;
410 dlen = __le16_to_cpu(ah->dlen);
411
412 BT_DBG("ACL header: dlen %d", dlen);
413
414 ll_check_data_len(ll, dlen);
415 continue;
416
417 case HCILL_W4_SCO_HDR:
418 sh = (struct hci_sco_hdr *) ll->rx_skb->data;
419
420 BT_DBG("SCO header: dlen %d", sh->dlen);
421
422 ll_check_data_len(ll, sh->dlen);
423 continue;
424 }
425 }
426
427 /* HCILL_W4_PACKET_TYPE */
428 switch (*ptr) {
429 case HCI_EVENT_PKT:
430 BT_DBG("Event packet");
431 ll->rx_state = HCILL_W4_EVENT_HDR;
432 ll->rx_count = HCI_EVENT_HDR_SIZE;
433 type = HCI_EVENT_PKT;
434 break;
435
436 case HCI_ACLDATA_PKT:
437 BT_DBG("ACL packet");
438 ll->rx_state = HCILL_W4_ACL_HDR;
439 ll->rx_count = HCI_ACL_HDR_SIZE;
440 type = HCI_ACLDATA_PKT;
441 break;
442
443 case HCI_SCODATA_PKT:
444 BT_DBG("SCO packet");
445 ll->rx_state = HCILL_W4_SCO_HDR;
446 ll->rx_count = HCI_SCO_HDR_SIZE;
447 type = HCI_SCODATA_PKT;
448 break;
449
450 /* HCILL signals */
451 case HCILL_GO_TO_SLEEP_IND:
452 BT_DBG("HCILL_GO_TO_SLEEP_IND packet");
453 ll_device_want_to_sleep(hu);
454 ptr++; count--;
455 continue;
456
457 case HCILL_GO_TO_SLEEP_ACK:
458 /* shouldn't happen */
459 BT_ERR("received HCILL_GO_TO_SLEEP_ACK (in state %ld)", ll->hcill_state);
460 ptr++; count--;
461 continue;
462
463 case HCILL_WAKE_UP_IND:
464 BT_DBG("HCILL_WAKE_UP_IND packet");
465 ll_device_want_to_wakeup(hu);
466 ptr++; count--;
467 continue;
468
469 case HCILL_WAKE_UP_ACK:
470 BT_DBG("HCILL_WAKE_UP_ACK packet");
471 ll_device_woke_up(hu);
472 ptr++; count--;
473 continue;
474
475 default:
476 BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr);
477 hu->hdev->stat.err_rx++;
478 ptr++; count--;
479 continue;
480 };
481
482 ptr++; count--;
483
484 /* Allocate packet */
485 ll->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
486 if (!ll->rx_skb) {
487 BT_ERR("Can't allocate mem for new packet");
488 ll->rx_state = HCILL_W4_PACKET_TYPE;
489 ll->rx_count = 0;
490 return 0;
491 }
492
493 ll->rx_skb->dev = (void *) hu->hdev;
494 bt_cb(ll->rx_skb)->pkt_type = type;
495 }
496
497 return count;
498}
499
500static struct sk_buff *ll_dequeue(struct hci_uart *hu)
501{
502 struct ll_struct *ll = hu->priv;
503 return skb_dequeue(&ll->txq);
504}
505
506static struct hci_uart_proto llp = {
507 .id = HCI_UART_LL,
508 .open = ll_open,
509 .close = ll_close,
510 .recv = ll_recv,
511 .enqueue = ll_enqueue,
512 .dequeue = ll_dequeue,
513 .flush = ll_flush,
514};
515
516int ll_init(void)
517{
518 int err = hci_uart_register_proto(&llp);
519
520 if (!err)
521 BT_INFO("HCILL protocol initialized");
522 else
523 BT_ERR("HCILL protocol registration failed");
524
525 return err;
526}
527
528int ll_deinit(void)
529{
530 return hci_uart_unregister_proto(&llp);
531}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 1097ce72393f..50113db06b9f 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -33,12 +33,13 @@
33#define HCIUARTGETDEVICE _IOR('U', 202, int) 33#define HCIUARTGETDEVICE _IOR('U', 202, int)
34 34
35/* UART protocols */ 35/* UART protocols */
36#define HCI_UART_MAX_PROTO 4 36#define HCI_UART_MAX_PROTO 5
37 37
38#define HCI_UART_H4 0 38#define HCI_UART_H4 0
39#define HCI_UART_BCSP 1 39#define HCI_UART_BCSP 1
40#define HCI_UART_3WIRE 2 40#define HCI_UART_3WIRE 2
41#define HCI_UART_H4DS 3 41#define HCI_UART_H4DS 3
42#define HCI_UART_LL 4
42 43
43struct hci_uart; 44struct hci_uart;
44 45
@@ -85,3 +86,8 @@ int h4_deinit(void);
85int bcsp_init(void); 86int bcsp_init(void);
86int bcsp_deinit(void); 87int bcsp_deinit(void);
87#endif 88#endif
89
90#ifdef CONFIG_BT_HCIUART_LL
91int ll_init(void);
92int ll_deinit(void);
93#endif
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index ed53aaab4c02..ae419736158e 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -471,7 +471,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
471 } 471 }
472 472
473 len = max(skb->len, ETH_ZLEN); 473 len = max(skb->len, ETH_ZLEN);
474 queue = skb->queue_mapping; 474 queue = skb_get_queue_mapping(skb);
475#ifdef CONFIG_NETDEVICES_MULTIQUEUE 475#ifdef CONFIG_NETDEVICES_MULTIQUEUE
476 netif_stop_subqueue(dev, queue); 476 netif_stop_subqueue(dev, queue);
477#else 477#else
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ed1f9bbb2a32..112ab079ce7d 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3103,31 +3103,12 @@ static int niu_alloc_tx_ring_info(struct niu *np,
3103 3103
3104static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 3104static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
3105{ 3105{
3106 u16 bs; 3106 u16 bss;
3107 3107
3108 switch (PAGE_SIZE) { 3108 bss = min(PAGE_SHIFT, 15);
3109 case 4 * 1024:
3110 case 8 * 1024:
3111 case 16 * 1024:
3112 case 32 * 1024:
3113 rp->rbr_block_size = PAGE_SIZE;
3114 rp->rbr_blocks_per_page = 1;
3115 break;
3116 3109
3117 default: 3110 rp->rbr_block_size = 1 << bss;
3118 if (PAGE_SIZE % (32 * 1024) == 0) 3111 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
3119 bs = 32 * 1024;
3120 else if (PAGE_SIZE % (16 * 1024) == 0)
3121 bs = 16 * 1024;
3122 else if (PAGE_SIZE % (8 * 1024) == 0)
3123 bs = 8 * 1024;
3124 else if (PAGE_SIZE % (4 * 1024) == 0)
3125 bs = 4 * 1024;
3126 else
3127 BUG();
3128 rp->rbr_block_size = bs;
3129 rp->rbr_blocks_per_page = PAGE_SIZE / bs;
3130 }
3131 3112
3132 rp->rbr_sizes[0] = 256; 3113 rp->rbr_sizes[0] = 256;
3133 rp->rbr_sizes[1] = 1024; 3114 rp->rbr_sizes[1] = 1024;
@@ -7902,12 +7883,7 @@ static int __init niu_init(void)
7902{ 7883{
7903 int err = 0; 7884 int err = 0;
7904 7885
7905 BUILD_BUG_ON((PAGE_SIZE < 4 * 1024) || 7886 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
7906 ((PAGE_SIZE > 32 * 1024) &&
7907 ((PAGE_SIZE % (32 * 1024)) != 0 &&
7908 (PAGE_SIZE % (16 * 1024)) != 0 &&
7909 (PAGE_SIZE % (8 * 1024)) != 0 &&
7910 (PAGE_SIZE % (4 * 1024)) != 0)));
7911 7887
7912 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 7888 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
7913 7889
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 014dc2cfe4d6..09440d783e65 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 66#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.84" 67#define DRV_MODULE_VERSION "3.85"
68#define DRV_MODULE_RELDATE "October 12, 2007" 68#define DRV_MODULE_RELDATE "October 18, 2007"
69 69
70#define TG3_DEF_MAC_MODE 0 70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 71#define TG3_DEF_RX_MODE 0
@@ -200,6 +200,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, 200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, 201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, 202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
@@ -5028,10 +5029,7 @@ static int tg3_poll_fw(struct tg3 *tp)
5028/* Save PCI command register before chip reset */ 5029/* Save PCI command register before chip reset */
5029static void tg3_save_pci_state(struct tg3 *tp) 5030static void tg3_save_pci_state(struct tg3 *tp)
5030{ 5031{
5031 u32 val; 5032 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5032
5033 pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5034 tp->pci_cmd = val;
5035} 5033}
5036 5034
5037/* Restore PCI state after chip reset */ 5035/* Restore PCI state after chip reset */
@@ -5054,7 +5052,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
5054 PCISTATE_ALLOW_APE_SHMEM_WR; 5052 PCISTATE_ALLOW_APE_SHMEM_WR;
5055 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 5053 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5056 5054
5057 pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); 5055 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5058 5056
5059 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 5057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5060 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 5058 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
@@ -10820,9 +10818,24 @@ out_not_found:
10820 strcpy(tp->board_part_number, "none"); 10818 strcpy(tp->board_part_number, "none");
10821} 10819}
10822 10820
10821static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10822{
10823 u32 val;
10824
10825 if (tg3_nvram_read_swab(tp, offset, &val) ||
10826 (val & 0xfc000000) != 0x0c000000 ||
10827 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10828 val != 0)
10829 return 0;
10830
10831 return 1;
10832}
10833
10823static void __devinit tg3_read_fw_ver(struct tg3 *tp) 10834static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10824{ 10835{
10825 u32 val, offset, start; 10836 u32 val, offset, start;
10837 u32 ver_offset;
10838 int i, bcnt;
10826 10839
10827 if (tg3_nvram_read_swab(tp, 0, &val)) 10840 if (tg3_nvram_read_swab(tp, 0, &val))
10828 return; 10841 return;
@@ -10835,29 +10848,71 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10835 return; 10848 return;
10836 10849
10837 offset = tg3_nvram_logical_addr(tp, offset); 10850 offset = tg3_nvram_logical_addr(tp, offset);
10838 if (tg3_nvram_read_swab(tp, offset, &val)) 10851
10852 if (!tg3_fw_img_is_valid(tp, offset) ||
10853 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10839 return; 10854 return;
10840 10855
10841 if ((val & 0xfc000000) == 0x0c000000) { 10856 offset = offset + ver_offset - start;
10842 u32 ver_offset, addr; 10857 for (i = 0; i < 16; i += 4) {
10843 int i; 10858 if (tg3_nvram_read(tp, offset + i, &val))
10859 return;
10844 10860
10845 if (tg3_nvram_read_swab(tp, offset + 4, &val) || 10861 val = le32_to_cpu(val);
10846 tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) 10862 memcpy(tp->fw_ver + i, &val, 4);
10863 }
10864
10865 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10866 (tp->tg3_flags & TG3_FLG3_ENABLE_APE))
10867 return;
10868
10869 for (offset = TG3_NVM_DIR_START;
10870 offset < TG3_NVM_DIR_END;
10871 offset += TG3_NVM_DIRENT_SIZE) {
10872 if (tg3_nvram_read_swab(tp, offset, &val))
10847 return; 10873 return;
10848 10874
10849 if (val != 0) 10875 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10876 break;
10877 }
10878
10879 if (offset == TG3_NVM_DIR_END)
10880 return;
10881
10882 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10883 start = 0x08000000;
10884 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10885 return;
10886
10887 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10888 !tg3_fw_img_is_valid(tp, offset) ||
10889 tg3_nvram_read_swab(tp, offset + 8, &val))
10890 return;
10891
10892 offset += val - start;
10893
10894 bcnt = strlen(tp->fw_ver);
10895
10896 tp->fw_ver[bcnt++] = ',';
10897 tp->fw_ver[bcnt++] = ' ';
10898
10899 for (i = 0; i < 4; i++) {
10900 if (tg3_nvram_read(tp, offset, &val))
10850 return; 10901 return;
10851 10902
10852 addr = offset + ver_offset - start; 10903 val = le32_to_cpu(val);
10853 for (i = 0; i < 16; i += 4) { 10904 offset += sizeof(val);
10854 if (tg3_nvram_read(tp, addr + i, &val))
10855 return;
10856 10905
10857 val = cpu_to_le32(val); 10906 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10858 memcpy(tp->fw_ver + i, &val, 4); 10907 memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10908 break;
10859 } 10909 }
10910
10911 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10912 bcnt += sizeof(val);
10860 } 10913 }
10914
10915 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10861} 10916}
10862 10917
10863static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 10918static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 6dbdad2b8f88..1d5b2a3dd29d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1540,6 +1540,12 @@
1540#define TG3_EEPROM_MAGIC_HW 0xabcd 1540#define TG3_EEPROM_MAGIC_HW 0xabcd
1541#define TG3_EEPROM_MAGIC_HW_MSK 0xffff 1541#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
1542 1542
1543#define TG3_NVM_DIR_START 0x18
1544#define TG3_NVM_DIR_END 0x78
1545#define TG3_NVM_DIRENT_SIZE 0xc
1546#define TG3_NVM_DIRTYPE_SHIFT 24
1547#define TG3_NVM_DIRTYPE_ASFINI 1
1548
1543/* 32K Window into NIC internal memory */ 1549/* 32K Window into NIC internal memory */
1544#define NIC_SRAM_WIN_BASE 0x00008000 1550#define NIC_SRAM_WIN_BASE 0x00008000
1545 1551
@@ -2415,10 +2421,11 @@ struct tg3 {
2415#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2421#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2416 2422
2417 u32 led_ctrl; 2423 u32 led_ctrl;
2418 u32 pci_cmd; 2424 u16 pci_cmd;
2419 2425
2420 char board_part_number[24]; 2426 char board_part_number[24];
2421 char fw_ver[16]; 2427#define TG3_VER_SIZE 32
2428 char fw_ver[TG3_VER_SIZE];
2422 u32 nic_sram_data_cfg; 2429 u32 nic_sram_data_cfg;
2423 u32 pci_clock_ctrl; 2430 u32 pci_clock_ctrl;
2424 struct pci_dev *pdev_peer; 2431 struct pci_dev *pdev_peer;
diff --git a/include/linux/net.h b/include/linux/net.h
index c136abce7ef6..dd79cdb8c4cf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,6 +313,10 @@ static const struct proto_ops name##_ops = { \
313#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \ 313#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \
314 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto)) 314 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto))
315 315
316#define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \
317 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
318 "-type-" __stringify(type))
319
316#ifdef CONFIG_SYSCTL 320#ifdef CONFIG_SYSCTL
317#include <linux/sysctl.h> 321#include <linux/sysctl.h>
318extern ctl_table net_table[]; 322extern ctl_table net_table[];
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6f85db3535e2..4a3f54e358e5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
996 * 996 *
997 * Check individual transmit queue of a device with multiple transmit queues. 997 * Check individual transmit queue of a device with multiple transmit queues.
998 */ 998 */
999static inline int netif_subqueue_stopped(const struct net_device *dev, 999static inline int __netif_subqueue_stopped(const struct net_device *dev,
1000 u16 queue_index) 1000 u16 queue_index)
1001{ 1001{
1002#ifdef CONFIG_NETDEVICES_MULTIQUEUE 1002#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
1007#endif 1007#endif
1008} 1008}
1009 1009
1010static inline int netif_subqueue_stopped(const struct net_device *dev,
1011 struct sk_buff *skb)
1012{
1013 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1014}
1010 1015
1011/** 1016/**
1012 * netif_wake_subqueue - allow sending packets on subqueue 1017 * netif_wake_subqueue - allow sending packets on subqueue
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index df948b44edad..4e10a074ca56 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1943,6 +1943,7 @@
1943#define PCI_DEVICE_ID_TIGON3_5720 0x1658 1943#define PCI_DEVICE_ID_TIGON3_5720 0x1658
1944#define PCI_DEVICE_ID_TIGON3_5721 0x1659 1944#define PCI_DEVICE_ID_TIGON3_5721 0x1659
1945#define PCI_DEVICE_ID_TIGON3_5722 0x165a 1945#define PCI_DEVICE_ID_TIGON3_5722 0x165a
1946#define PCI_DEVICE_ID_TIGON3_5723 0x165b
1946#define PCI_DEVICE_ID_TIGON3_5705M 0x165d 1947#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
1947#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e 1948#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
1948#define PCI_DEVICE_ID_TIGON3_5714 0x1668 1949#define PCI_DEVICE_ID_TIGON3_5714 0x1668
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f93f22b3d2ff..fd4e12f24270 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -41,8 +41,7 @@
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1)) 42 ~(SMP_CACHE_BYTES - 1))
43#define SKB_WITH_OVERHEAD(X) \ 43#define SKB_WITH_OVERHEAD(X) \
44 (((X) - sizeof(struct skb_shared_info)) & \ 44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45 ~(SMP_CACHE_BYTES - 1))
46#define SKB_MAX_ORDER(X, ORDER) \ 45#define SKB_MAX_ORDER(X, ORDER) \
47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
48#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 47#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -301,8 +300,9 @@ struct sk_buff {
301#endif 300#endif
302 301
303 int iif; 302 int iif;
303#ifdef CONFIG_NETDEVICES_MULTIQUEUE
304 __u16 queue_mapping; 304 __u16 queue_mapping;
305 305#endif
306#ifdef CONFIG_NET_SCHED 306#ifdef CONFIG_NET_SCHED
307 __u16 tc_index; /* traffic control index */ 307 __u16 tc_index; /* traffic control index */
308#ifdef CONFIG_NET_CLS_ACT 308#ifdef CONFIG_NET_CLS_ACT
@@ -1770,6 +1770,15 @@ static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1770#endif 1770#endif
1771} 1771}
1772 1772
1773static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1774{
1775#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1776 return skb->queue_mapping;
1777#else
1778 return 0;
1779#endif
1780}
1781
1773static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 1782static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1774{ 1783{
1775#ifdef CONFIG_NETDEVICES_MULTIQUEUE 1784#ifdef CONFIG_NETDEVICES_MULTIQUEUE
diff --git a/include/linux/socket.h b/include/linux/socket.h
index f852e1afd65a..c22ef1c1afb8 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -291,6 +291,7 @@ struct ucred {
291#define SOL_TIPC 271 291#define SOL_TIPC 271
292#define SOL_RXRPC 272 292#define SOL_RXRPC 272
293#define SOL_PPPOL2TP 273 293#define SOL_PPPOL2TP 273
294#define SOL_BLUETOOTH 274
294 295
295/* IPX options */ 296/* IPX options */
296#define IPX_TYPE 1 297#define IPX_TYPE 1
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ebfb96b41106..a8a9eb6af966 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -200,119 +200,18 @@ enum {
200#define HCI_LM_SECURE 0x0020 200#define HCI_LM_SECURE 0x0020
201 201
202/* ----- HCI Commands ---- */ 202/* ----- HCI Commands ---- */
203/* OGF & OCF values */ 203#define HCI_OP_INQUIRY 0x0401
204 204struct hci_cp_inquiry {
205/* Informational Parameters */ 205 __u8 lap[3];
206#define OGF_INFO_PARAM 0x04 206 __u8 length;
207 207 __u8 num_rsp;
208#define OCF_READ_LOCAL_VERSION 0x0001
209struct hci_rp_read_loc_version {
210 __u8 status;
211 __u8 hci_ver;
212 __le16 hci_rev;
213 __u8 lmp_ver;
214 __le16 manufacturer;
215 __le16 lmp_subver;
216} __attribute__ ((packed));
217
218#define OCF_READ_LOCAL_FEATURES 0x0003
219struct hci_rp_read_local_features {
220 __u8 status;
221 __u8 features[8];
222} __attribute__ ((packed));
223
224#define OCF_READ_BUFFER_SIZE 0x0005
225struct hci_rp_read_buffer_size {
226 __u8 status;
227 __le16 acl_mtu;
228 __u8 sco_mtu;
229 __le16 acl_max_pkt;
230 __le16 sco_max_pkt;
231} __attribute__ ((packed));
232
233#define OCF_READ_BD_ADDR 0x0009
234struct hci_rp_read_bd_addr {
235 __u8 status;
236 bdaddr_t bdaddr;
237} __attribute__ ((packed));
238
239/* Host Controller and Baseband */
240#define OGF_HOST_CTL 0x03
241#define OCF_RESET 0x0003
242#define OCF_READ_AUTH_ENABLE 0x001F
243#define OCF_WRITE_AUTH_ENABLE 0x0020
244 #define AUTH_DISABLED 0x00
245 #define AUTH_ENABLED 0x01
246
247#define OCF_READ_ENCRYPT_MODE 0x0021
248#define OCF_WRITE_ENCRYPT_MODE 0x0022
249 #define ENCRYPT_DISABLED 0x00
250 #define ENCRYPT_P2P 0x01
251 #define ENCRYPT_BOTH 0x02
252
253#define OCF_WRITE_CA_TIMEOUT 0x0016
254#define OCF_WRITE_PG_TIMEOUT 0x0018
255
256#define OCF_WRITE_SCAN_ENABLE 0x001A
257 #define SCAN_DISABLED 0x00
258 #define SCAN_INQUIRY 0x01
259 #define SCAN_PAGE 0x02
260
261#define OCF_SET_EVENT_FLT 0x0005
262struct hci_cp_set_event_flt {
263 __u8 flt_type;
264 __u8 cond_type;
265 __u8 condition[0];
266} __attribute__ ((packed));
267
268/* Filter types */
269#define HCI_FLT_CLEAR_ALL 0x00
270#define HCI_FLT_INQ_RESULT 0x01
271#define HCI_FLT_CONN_SETUP 0x02
272
273/* CONN_SETUP Condition types */
274#define HCI_CONN_SETUP_ALLOW_ALL 0x00
275#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
276#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
277
278/* CONN_SETUP Conditions */
279#define HCI_CONN_SETUP_AUTO_OFF 0x01
280#define HCI_CONN_SETUP_AUTO_ON 0x02
281
282#define OCF_READ_CLASS_OF_DEV 0x0023
283struct hci_rp_read_dev_class {
284 __u8 status;
285 __u8 dev_class[3];
286} __attribute__ ((packed));
287
288#define OCF_WRITE_CLASS_OF_DEV 0x0024
289struct hci_cp_write_dev_class {
290 __u8 dev_class[3];
291} __attribute__ ((packed));
292
293#define OCF_READ_VOICE_SETTING 0x0025
294struct hci_rp_read_voice_setting {
295 __u8 status;
296 __le16 voice_setting;
297} __attribute__ ((packed)); 208} __attribute__ ((packed));
298 209
299#define OCF_WRITE_VOICE_SETTING 0x0026 210#define HCI_OP_INQUIRY_CANCEL 0x0402
300struct hci_cp_write_voice_setting {
301 __le16 voice_setting;
302} __attribute__ ((packed));
303 211
304#define OCF_HOST_BUFFER_SIZE 0x0033 212#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
305struct hci_cp_host_buffer_size {
306 __le16 acl_mtu;
307 __u8 sco_mtu;
308 __le16 acl_max_pkt;
309 __le16 sco_max_pkt;
310} __attribute__ ((packed));
311
312/* Link Control */
313#define OGF_LINK_CTL 0x01
314 213
315#define OCF_CREATE_CONN 0x0005 214#define HCI_OP_CREATE_CONN 0x0405
316struct hci_cp_create_conn { 215struct hci_cp_create_conn {
317 bdaddr_t bdaddr; 216 bdaddr_t bdaddr;
318 __le16 pkt_type; 217 __le16 pkt_type;
@@ -322,105 +221,138 @@ struct hci_cp_create_conn {
322 __u8 role_switch; 221 __u8 role_switch;
323} __attribute__ ((packed)); 222} __attribute__ ((packed));
324 223
325#define OCF_CREATE_CONN_CANCEL 0x0008 224#define HCI_OP_DISCONNECT 0x0406
326struct hci_cp_create_conn_cancel {
327 bdaddr_t bdaddr;
328} __attribute__ ((packed));
329
330#define OCF_ACCEPT_CONN_REQ 0x0009
331struct hci_cp_accept_conn_req {
332 bdaddr_t bdaddr;
333 __u8 role;
334} __attribute__ ((packed));
335
336#define OCF_REJECT_CONN_REQ 0x000a
337struct hci_cp_reject_conn_req {
338 bdaddr_t bdaddr;
339 __u8 reason;
340} __attribute__ ((packed));
341
342#define OCF_DISCONNECT 0x0006
343struct hci_cp_disconnect { 225struct hci_cp_disconnect {
344 __le16 handle; 226 __le16 handle;
345 __u8 reason; 227 __u8 reason;
346} __attribute__ ((packed)); 228} __attribute__ ((packed));
347 229
348#define OCF_ADD_SCO 0x0007 230#define HCI_OP_ADD_SCO 0x0407
349struct hci_cp_add_sco { 231struct hci_cp_add_sco {
350 __le16 handle; 232 __le16 handle;
351 __le16 pkt_type; 233 __le16 pkt_type;
352} __attribute__ ((packed)); 234} __attribute__ ((packed));
353 235
354#define OCF_INQUIRY 0x0001 236#define HCI_OP_CREATE_CONN_CANCEL 0x0408
355struct hci_cp_inquiry { 237struct hci_cp_create_conn_cancel {
356 __u8 lap[3]; 238 bdaddr_t bdaddr;
357 __u8 length;
358 __u8 num_rsp;
359} __attribute__ ((packed)); 239} __attribute__ ((packed));
360 240
361#define OCF_INQUIRY_CANCEL 0x0002 241#define HCI_OP_ACCEPT_CONN_REQ 0x0409
242struct hci_cp_accept_conn_req {
243 bdaddr_t bdaddr;
244 __u8 role;
245} __attribute__ ((packed));
362 246
363#define OCF_EXIT_PERIODIC_INQ 0x0004 247#define HCI_OP_REJECT_CONN_REQ 0x040a
248struct hci_cp_reject_conn_req {
249 bdaddr_t bdaddr;
250 __u8 reason;
251} __attribute__ ((packed));
364 252
365#define OCF_LINK_KEY_REPLY 0x000B 253#define HCI_OP_LINK_KEY_REPLY 0x040b
366struct hci_cp_link_key_reply { 254struct hci_cp_link_key_reply {
367 bdaddr_t bdaddr; 255 bdaddr_t bdaddr;
368 __u8 link_key[16]; 256 __u8 link_key[16];
369} __attribute__ ((packed)); 257} __attribute__ ((packed));
370 258
371#define OCF_LINK_KEY_NEG_REPLY 0x000C 259#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
372struct hci_cp_link_key_neg_reply { 260struct hci_cp_link_key_neg_reply {
373 bdaddr_t bdaddr; 261 bdaddr_t bdaddr;
374} __attribute__ ((packed)); 262} __attribute__ ((packed));
375 263
376#define OCF_PIN_CODE_REPLY 0x000D 264#define HCI_OP_PIN_CODE_REPLY 0x040d
377struct hci_cp_pin_code_reply { 265struct hci_cp_pin_code_reply {
378 bdaddr_t bdaddr; 266 bdaddr_t bdaddr;
379 __u8 pin_len; 267 __u8 pin_len;
380 __u8 pin_code[16]; 268 __u8 pin_code[16];
381} __attribute__ ((packed)); 269} __attribute__ ((packed));
382 270
383#define OCF_PIN_CODE_NEG_REPLY 0x000E 271#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
384struct hci_cp_pin_code_neg_reply { 272struct hci_cp_pin_code_neg_reply {
385 bdaddr_t bdaddr; 273 bdaddr_t bdaddr;
386} __attribute__ ((packed)); 274} __attribute__ ((packed));
387 275
388#define OCF_CHANGE_CONN_PTYPE 0x000F 276#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
389struct hci_cp_change_conn_ptype { 277struct hci_cp_change_conn_ptype {
390 __le16 handle; 278 __le16 handle;
391 __le16 pkt_type; 279 __le16 pkt_type;
392} __attribute__ ((packed)); 280} __attribute__ ((packed));
393 281
394#define OCF_AUTH_REQUESTED 0x0011 282#define HCI_OP_AUTH_REQUESTED 0x0411
395struct hci_cp_auth_requested { 283struct hci_cp_auth_requested {
396 __le16 handle; 284 __le16 handle;
397} __attribute__ ((packed)); 285} __attribute__ ((packed));
398 286
399#define OCF_SET_CONN_ENCRYPT 0x0013 287#define HCI_OP_SET_CONN_ENCRYPT 0x0413
400struct hci_cp_set_conn_encrypt { 288struct hci_cp_set_conn_encrypt {
401 __le16 handle; 289 __le16 handle;
402 __u8 encrypt; 290 __u8 encrypt;
403} __attribute__ ((packed)); 291} __attribute__ ((packed));
404 292
405#define OCF_CHANGE_CONN_LINK_KEY 0x0015 293#define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415
406struct hci_cp_change_conn_link_key { 294struct hci_cp_change_conn_link_key {
407 __le16 handle; 295 __le16 handle;
408} __attribute__ ((packed)); 296} __attribute__ ((packed));
409 297
410#define OCF_READ_REMOTE_FEATURES 0x001B 298#define HCI_OP_REMOTE_NAME_REQ 0x0419
299struct hci_cp_remote_name_req {
300 bdaddr_t bdaddr;
301 __u8 pscan_rep_mode;
302 __u8 pscan_mode;
303 __le16 clock_offset;
304} __attribute__ ((packed));
305
306#define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a
307struct hci_cp_remote_name_req_cancel {
308 bdaddr_t bdaddr;
309} __attribute__ ((packed));
310
311#define HCI_OP_READ_REMOTE_FEATURES 0x041b
411struct hci_cp_read_remote_features { 312struct hci_cp_read_remote_features {
412 __le16 handle; 313 __le16 handle;
413} __attribute__ ((packed)); 314} __attribute__ ((packed));
414 315
415#define OCF_READ_REMOTE_VERSION 0x001D 316#define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c
317struct hci_cp_read_remote_ext_features {
318 __le16 handle;
319 __u8 page;
320} __attribute__ ((packed));
321
322#define HCI_OP_READ_REMOTE_VERSION 0x041d
416struct hci_cp_read_remote_version { 323struct hci_cp_read_remote_version {
417 __le16 handle; 324 __le16 handle;
418} __attribute__ ((packed)); 325} __attribute__ ((packed));
419 326
420/* Link Policy */ 327#define HCI_OP_SETUP_SYNC_CONN 0x0428
421#define OGF_LINK_POLICY 0x02 328struct hci_cp_setup_sync_conn {
329 __le16 handle;
330 __le32 tx_bandwidth;
331 __le32 rx_bandwidth;
332 __le16 max_latency;
333 __le16 voice_setting;
334 __u8 retrans_effort;
335 __le16 pkt_type;
336} __attribute__ ((packed));
422 337
423#define OCF_SNIFF_MODE 0x0003 338#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
339struct hci_cp_accept_sync_conn_req {
340 bdaddr_t bdaddr;
341 __le32 tx_bandwidth;
342 __le32 rx_bandwidth;
343 __le16 max_latency;
344 __le16 content_format;
345 __u8 retrans_effort;
346 __le16 pkt_type;
347} __attribute__ ((packed));
348
349#define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a
350struct hci_cp_reject_sync_conn_req {
351 bdaddr_t bdaddr;
352 __u8 reason;
353} __attribute__ ((packed));
354
355#define HCI_OP_SNIFF_MODE 0x0803
424struct hci_cp_sniff_mode { 356struct hci_cp_sniff_mode {
425 __le16 handle; 357 __le16 handle;
426 __le16 max_interval; 358 __le16 max_interval;
@@ -429,12 +361,12 @@ struct hci_cp_sniff_mode {
429 __le16 timeout; 361 __le16 timeout;
430} __attribute__ ((packed)); 362} __attribute__ ((packed));
431 363
432#define OCF_EXIT_SNIFF_MODE 0x0004 364#define HCI_OP_EXIT_SNIFF_MODE 0x0804
433struct hci_cp_exit_sniff_mode { 365struct hci_cp_exit_sniff_mode {
434 __le16 handle; 366 __le16 handle;
435} __attribute__ ((packed)); 367} __attribute__ ((packed));
436 368
437#define OCF_ROLE_DISCOVERY 0x0009 369#define HCI_OP_ROLE_DISCOVERY 0x0809
438struct hci_cp_role_discovery { 370struct hci_cp_role_discovery {
439 __le16 handle; 371 __le16 handle;
440} __attribute__ ((packed)); 372} __attribute__ ((packed));
@@ -444,7 +376,13 @@ struct hci_rp_role_discovery {
444 __u8 role; 376 __u8 role;
445} __attribute__ ((packed)); 377} __attribute__ ((packed));
446 378
447#define OCF_READ_LINK_POLICY 0x000C 379#define HCI_OP_SWITCH_ROLE 0x080b
380struct hci_cp_switch_role {
381 bdaddr_t bdaddr;
382 __u8 role;
383} __attribute__ ((packed));
384
385#define HCI_OP_READ_LINK_POLICY 0x080c
448struct hci_cp_read_link_policy { 386struct hci_cp_read_link_policy {
449 __le16 handle; 387 __le16 handle;
450} __attribute__ ((packed)); 388} __attribute__ ((packed));
@@ -454,13 +392,7 @@ struct hci_rp_read_link_policy {
454 __le16 policy; 392 __le16 policy;
455} __attribute__ ((packed)); 393} __attribute__ ((packed));
456 394
457#define OCF_SWITCH_ROLE 0x000B 395#define HCI_OP_WRITE_LINK_POLICY 0x080d
458struct hci_cp_switch_role {
459 bdaddr_t bdaddr;
460 __u8 role;
461} __attribute__ ((packed));
462
463#define OCF_WRITE_LINK_POLICY 0x000D
464struct hci_cp_write_link_policy { 396struct hci_cp_write_link_policy {
465 __le16 handle; 397 __le16 handle;
466 __le16 policy; 398 __le16 policy;
@@ -470,7 +402,7 @@ struct hci_rp_write_link_policy {
470 __le16 handle; 402 __le16 handle;
471} __attribute__ ((packed)); 403} __attribute__ ((packed));
472 404
473#define OCF_SNIFF_SUBRATE 0x0011 405#define HCI_OP_SNIFF_SUBRATE 0x0811
474struct hci_cp_sniff_subrate { 406struct hci_cp_sniff_subrate {
475 __le16 handle; 407 __le16 handle;
476 __le16 max_latency; 408 __le16 max_latency;
@@ -478,59 +410,156 @@ struct hci_cp_sniff_subrate {
478 __le16 min_local_timeout; 410 __le16 min_local_timeout;
479} __attribute__ ((packed)); 411} __attribute__ ((packed));
480 412
481/* Status params */ 413#define HCI_OP_SET_EVENT_MASK 0x0c01
482#define OGF_STATUS_PARAM 0x05 414struct hci_cp_set_event_mask {
415 __u8 mask[8];
416} __attribute__ ((packed));
483 417
484/* Testing commands */ 418#define HCI_OP_RESET 0x0c03
485#define OGF_TESTING_CMD 0x3E
486 419
487/* Vendor specific commands */ 420#define HCI_OP_SET_EVENT_FLT 0x0c05
488#define OGF_VENDOR_CMD 0x3F 421struct hci_cp_set_event_flt {
422 __u8 flt_type;
423 __u8 cond_type;
424 __u8 condition[0];
425} __attribute__ ((packed));
489 426
490/* ---- HCI Events ---- */ 427/* Filter types */
491#define HCI_EV_INQUIRY_COMPLETE 0x01 428#define HCI_FLT_CLEAR_ALL 0x00
429#define HCI_FLT_INQ_RESULT 0x01
430#define HCI_FLT_CONN_SETUP 0x02
492 431
493#define HCI_EV_INQUIRY_RESULT 0x02 432/* CONN_SETUP Condition types */
494struct inquiry_info { 433#define HCI_CONN_SETUP_ALLOW_ALL 0x00
495 bdaddr_t bdaddr; 434#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
496 __u8 pscan_rep_mode; 435#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
497 __u8 pscan_period_mode; 436
498 __u8 pscan_mode; 437/* CONN_SETUP Conditions */
438#define HCI_CONN_SETUP_AUTO_OFF 0x01
439#define HCI_CONN_SETUP_AUTO_ON 0x02
440
441#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
442struct hci_cp_write_local_name {
443 __u8 name[248];
444} __attribute__ ((packed));
445
446#define HCI_OP_READ_LOCAL_NAME 0x0c14
447struct hci_rp_read_local_name {
448 __u8 status;
449 __u8 name[248];
450} __attribute__ ((packed));
451
452#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
453
454#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
455
456#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
457 #define SCAN_DISABLED 0x00
458 #define SCAN_INQUIRY 0x01
459 #define SCAN_PAGE 0x02
460
461#define HCI_OP_READ_AUTH_ENABLE 0x0c1f
462
463#define HCI_OP_WRITE_AUTH_ENABLE 0x0c20
464 #define AUTH_DISABLED 0x00
465 #define AUTH_ENABLED 0x01
466
467#define HCI_OP_READ_ENCRYPT_MODE 0x0c21
468
469#define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22
470 #define ENCRYPT_DISABLED 0x00
471 #define ENCRYPT_P2P 0x01
472 #define ENCRYPT_BOTH 0x02
473
474#define HCI_OP_READ_CLASS_OF_DEV 0x0c23
475struct hci_rp_read_class_of_dev {
476 __u8 status;
499 __u8 dev_class[3]; 477 __u8 dev_class[3];
500 __le16 clock_offset;
501} __attribute__ ((packed)); 478} __attribute__ ((packed));
502 479
503#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 480#define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24
504struct inquiry_info_with_rssi { 481struct hci_cp_write_class_of_dev {
505 bdaddr_t bdaddr;
506 __u8 pscan_rep_mode;
507 __u8 pscan_period_mode;
508 __u8 dev_class[3]; 482 __u8 dev_class[3];
509 __le16 clock_offset;
510 __s8 rssi;
511} __attribute__ ((packed)); 483} __attribute__ ((packed));
512struct inquiry_info_with_rssi_and_pscan_mode { 484
485#define HCI_OP_READ_VOICE_SETTING 0x0c25
486struct hci_rp_read_voice_setting {
487 __u8 status;
488 __le16 voice_setting;
489} __attribute__ ((packed));
490
491#define HCI_OP_WRITE_VOICE_SETTING 0x0c26
492struct hci_cp_write_voice_setting {
493 __le16 voice_setting;
494} __attribute__ ((packed));
495
496#define HCI_OP_HOST_BUFFER_SIZE 0x0c33
497struct hci_cp_host_buffer_size {
498 __le16 acl_mtu;
499 __u8 sco_mtu;
500 __le16 acl_max_pkt;
501 __le16 sco_max_pkt;
502} __attribute__ ((packed));
503
504#define HCI_OP_READ_LOCAL_VERSION 0x1001
505struct hci_rp_read_local_version {
506 __u8 status;
507 __u8 hci_ver;
508 __le16 hci_rev;
509 __u8 lmp_ver;
510 __le16 manufacturer;
511 __le16 lmp_subver;
512} __attribute__ ((packed));
513
514#define HCI_OP_READ_LOCAL_COMMANDS 0x1002
515struct hci_rp_read_local_commands {
516 __u8 status;
517 __u8 commands[64];
518} __attribute__ ((packed));
519
520#define HCI_OP_READ_LOCAL_FEATURES 0x1003
521struct hci_rp_read_local_features {
522 __u8 status;
523 __u8 features[8];
524} __attribute__ ((packed));
525
526#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
527struct hci_rp_read_local_ext_features {
528 __u8 status;
529 __u8 page;
530 __u8 max_page;
531 __u8 features[8];
532} __attribute__ ((packed));
533
534#define HCI_OP_READ_BUFFER_SIZE 0x1005
535struct hci_rp_read_buffer_size {
536 __u8 status;
537 __le16 acl_mtu;
538 __u8 sco_mtu;
539 __le16 acl_max_pkt;
540 __le16 sco_max_pkt;
541} __attribute__ ((packed));
542
543#define HCI_OP_READ_BD_ADDR 0x1009
544struct hci_rp_read_bd_addr {
545 __u8 status;
513 bdaddr_t bdaddr; 546 bdaddr_t bdaddr;
514 __u8 pscan_rep_mode;
515 __u8 pscan_period_mode;
516 __u8 pscan_mode;
517 __u8 dev_class[3];
518 __le16 clock_offset;
519 __s8 rssi;
520} __attribute__ ((packed)); 547} __attribute__ ((packed));
521 548
522#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2F 549/* ---- HCI Events ---- */
523struct extended_inquiry_info { 550#define HCI_EV_INQUIRY_COMPLETE 0x01
551
552#define HCI_EV_INQUIRY_RESULT 0x02
553struct inquiry_info {
524 bdaddr_t bdaddr; 554 bdaddr_t bdaddr;
525 __u8 pscan_rep_mode; 555 __u8 pscan_rep_mode;
526 __u8 pscan_period_mode; 556 __u8 pscan_period_mode;
557 __u8 pscan_mode;
527 __u8 dev_class[3]; 558 __u8 dev_class[3];
528 __le16 clock_offset; 559 __le16 clock_offset;
529 __s8 rssi;
530 __u8 data[240];
531} __attribute__ ((packed)); 560} __attribute__ ((packed));
532 561
533#define HCI_EV_CONN_COMPLETE 0x03 562#define HCI_EV_CONN_COMPLETE 0x03
534struct hci_ev_conn_complete { 563struct hci_ev_conn_complete {
535 __u8 status; 564 __u8 status;
536 __le16 handle; 565 __le16 handle;
@@ -539,40 +568,63 @@ struct hci_ev_conn_complete {
539 __u8 encr_mode; 568 __u8 encr_mode;
540} __attribute__ ((packed)); 569} __attribute__ ((packed));
541 570
542#define HCI_EV_CONN_REQUEST 0x04 571#define HCI_EV_CONN_REQUEST 0x04
543struct hci_ev_conn_request { 572struct hci_ev_conn_request {
544 bdaddr_t bdaddr; 573 bdaddr_t bdaddr;
545 __u8 dev_class[3]; 574 __u8 dev_class[3];
546 __u8 link_type; 575 __u8 link_type;
547} __attribute__ ((packed)); 576} __attribute__ ((packed));
548 577
549#define HCI_EV_DISCONN_COMPLETE 0x05 578#define HCI_EV_DISCONN_COMPLETE 0x05
550struct hci_ev_disconn_complete { 579struct hci_ev_disconn_complete {
551 __u8 status; 580 __u8 status;
552 __le16 handle; 581 __le16 handle;
553 __u8 reason; 582 __u8 reason;
554} __attribute__ ((packed)); 583} __attribute__ ((packed));
555 584
556#define HCI_EV_AUTH_COMPLETE 0x06 585#define HCI_EV_AUTH_COMPLETE 0x06
557struct hci_ev_auth_complete { 586struct hci_ev_auth_complete {
558 __u8 status; 587 __u8 status;
559 __le16 handle; 588 __le16 handle;
560} __attribute__ ((packed)); 589} __attribute__ ((packed));
561 590
562#define HCI_EV_ENCRYPT_CHANGE 0x08 591#define HCI_EV_REMOTE_NAME 0x07
592struct hci_ev_remote_name {
593 __u8 status;
594 bdaddr_t bdaddr;
595 __u8 name[248];
596} __attribute__ ((packed));
597
598#define HCI_EV_ENCRYPT_CHANGE 0x08
563struct hci_ev_encrypt_change { 599struct hci_ev_encrypt_change {
564 __u8 status; 600 __u8 status;
565 __le16 handle; 601 __le16 handle;
566 __u8 encrypt; 602 __u8 encrypt;
567} __attribute__ ((packed)); 603} __attribute__ ((packed));
568 604
569#define HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE 0x09 605#define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09
570struct hci_ev_change_conn_link_key_complete { 606struct hci_ev_change_link_key_complete {
607 __u8 status;
608 __le16 handle;
609} __attribute__ ((packed));
610
611#define HCI_EV_REMOTE_FEATURES 0x0b
612struct hci_ev_remote_features {
613 __u8 status;
614 __le16 handle;
615 __u8 features[8];
616} __attribute__ ((packed));
617
618#define HCI_EV_REMOTE_VERSION 0x0c
619struct hci_ev_remote_version {
571 __u8 status; 620 __u8 status;
572 __le16 handle; 621 __le16 handle;
622 __u8 lmp_ver;
623 __le16 manufacturer;
624 __le16 lmp_subver;
573} __attribute__ ((packed)); 625} __attribute__ ((packed));
574 626
575#define HCI_EV_QOS_SETUP_COMPLETE 0x0D 627#define HCI_EV_QOS_SETUP_COMPLETE 0x0d
576struct hci_qos { 628struct hci_qos {
577 __u8 service_type; 629 __u8 service_type;
578 __u32 token_rate; 630 __u32 token_rate;
@@ -586,33 +638,33 @@ struct hci_ev_qos_setup_complete {
586 struct hci_qos qos; 638 struct hci_qos qos;
587} __attribute__ ((packed)); 639} __attribute__ ((packed));
588 640
589#define HCI_EV_CMD_COMPLETE 0x0E 641#define HCI_EV_CMD_COMPLETE 0x0e
590struct hci_ev_cmd_complete { 642struct hci_ev_cmd_complete {
591 __u8 ncmd; 643 __u8 ncmd;
592 __le16 opcode; 644 __le16 opcode;
593} __attribute__ ((packed)); 645} __attribute__ ((packed));
594 646
595#define HCI_EV_CMD_STATUS 0x0F 647#define HCI_EV_CMD_STATUS 0x0f
596struct hci_ev_cmd_status { 648struct hci_ev_cmd_status {
597 __u8 status; 649 __u8 status;
598 __u8 ncmd; 650 __u8 ncmd;
599 __le16 opcode; 651 __le16 opcode;
600} __attribute__ ((packed)); 652} __attribute__ ((packed));
601 653
602#define HCI_EV_NUM_COMP_PKTS 0x13 654#define HCI_EV_ROLE_CHANGE 0x12
603struct hci_ev_num_comp_pkts {
604 __u8 num_hndl;
605 /* variable length part */
606} __attribute__ ((packed));
607
608#define HCI_EV_ROLE_CHANGE 0x12
609struct hci_ev_role_change { 655struct hci_ev_role_change {
610 __u8 status; 656 __u8 status;
611 bdaddr_t bdaddr; 657 bdaddr_t bdaddr;
612 __u8 role; 658 __u8 role;
613} __attribute__ ((packed)); 659} __attribute__ ((packed));
614 660
615#define HCI_EV_MODE_CHANGE 0x14 661#define HCI_EV_NUM_COMP_PKTS 0x13
662struct hci_ev_num_comp_pkts {
663 __u8 num_hndl;
664 /* variable length part */
665} __attribute__ ((packed));
666
667#define HCI_EV_MODE_CHANGE 0x14
616struct hci_ev_mode_change { 668struct hci_ev_mode_change {
617 __u8 status; 669 __u8 status;
618 __le16 handle; 670 __le16 handle;
@@ -620,53 +672,88 @@ struct hci_ev_mode_change {
620 __le16 interval; 672 __le16 interval;
621} __attribute__ ((packed)); 673} __attribute__ ((packed));
622 674
623#define HCI_EV_PIN_CODE_REQ 0x16 675#define HCI_EV_PIN_CODE_REQ 0x16
624struct hci_ev_pin_code_req { 676struct hci_ev_pin_code_req {
625 bdaddr_t bdaddr; 677 bdaddr_t bdaddr;
626} __attribute__ ((packed)); 678} __attribute__ ((packed));
627 679
628#define HCI_EV_LINK_KEY_REQ 0x17 680#define HCI_EV_LINK_KEY_REQ 0x17
629struct hci_ev_link_key_req { 681struct hci_ev_link_key_req {
630 bdaddr_t bdaddr; 682 bdaddr_t bdaddr;
631} __attribute__ ((packed)); 683} __attribute__ ((packed));
632 684
633#define HCI_EV_LINK_KEY_NOTIFY 0x18 685#define HCI_EV_LINK_KEY_NOTIFY 0x18
634struct hci_ev_link_key_notify { 686struct hci_ev_link_key_notify {
635 bdaddr_t bdaddr; 687 bdaddr_t bdaddr;
636 __u8 link_key[16]; 688 __u8 link_key[16];
637 __u8 key_type; 689 __u8 key_type;
638} __attribute__ ((packed)); 690} __attribute__ ((packed));
639 691
640#define HCI_EV_REMOTE_FEATURES 0x0B 692#define HCI_EV_CLOCK_OFFSET 0x1c
641struct hci_ev_remote_features { 693struct hci_ev_clock_offset {
642 __u8 status; 694 __u8 status;
643 __le16 handle; 695 __le16 handle;
644 __u8 features[8]; 696 __le16 clock_offset;
645} __attribute__ ((packed)); 697} __attribute__ ((packed));
646 698
647#define HCI_EV_REMOTE_VERSION 0x0C 699#define HCI_EV_PSCAN_REP_MODE 0x20
648struct hci_ev_remote_version { 700struct hci_ev_pscan_rep_mode {
701 bdaddr_t bdaddr;
702 __u8 pscan_rep_mode;
703} __attribute__ ((packed));
704
705#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
706struct inquiry_info_with_rssi {
707 bdaddr_t bdaddr;
708 __u8 pscan_rep_mode;
709 __u8 pscan_period_mode;
710 __u8 dev_class[3];
711 __le16 clock_offset;
712 __s8 rssi;
713} __attribute__ ((packed));
714struct inquiry_info_with_rssi_and_pscan_mode {
715 bdaddr_t bdaddr;
716 __u8 pscan_rep_mode;
717 __u8 pscan_period_mode;
718 __u8 pscan_mode;
719 __u8 dev_class[3];
720 __le16 clock_offset;
721 __s8 rssi;
722} __attribute__ ((packed));
723
724#define HCI_EV_REMOTE_EXT_FEATURES 0x23
725struct hci_ev_remote_ext_features {
649 __u8 status; 726 __u8 status;
650 __le16 handle; 727 __le16 handle;
651 __u8 lmp_ver; 728 __u8 page;
652 __le16 manufacturer; 729 __u8 max_page;
653 __le16 lmp_subver; 730 __u8 features[8];
654} __attribute__ ((packed)); 731} __attribute__ ((packed));
655 732
656#define HCI_EV_CLOCK_OFFSET 0x01C 733#define HCI_EV_SYNC_CONN_COMPLETE 0x2c
657struct hci_ev_clock_offset { 734struct hci_ev_sync_conn_complete {
658 __u8 status; 735 __u8 status;
659 __le16 handle; 736 __le16 handle;
660 __le16 clock_offset; 737 bdaddr_t bdaddr;
738 __u8 link_type;
739 __u8 tx_interval;
740 __u8 retrans_window;
741 __le16 rx_pkt_len;
742 __le16 tx_pkt_len;
743 __u8 air_mode;
661} __attribute__ ((packed)); 744} __attribute__ ((packed));
662 745
663#define HCI_EV_PSCAN_REP_MODE 0x20 746#define HCI_EV_SYNC_CONN_CHANGED 0x2d
664struct hci_ev_pscan_rep_mode { 747struct hci_ev_sync_conn_changed {
665 bdaddr_t bdaddr; 748 __u8 status;
666 __u8 pscan_rep_mode; 749 __le16 handle;
750 __u8 tx_interval;
751 __u8 retrans_window;
752 __le16 rx_pkt_len;
753 __le16 tx_pkt_len;
667} __attribute__ ((packed)); 754} __attribute__ ((packed));
668 755
669#define HCI_EV_SNIFF_SUBRATE 0x2E 756#define HCI_EV_SNIFF_SUBRATE 0x2e
670struct hci_ev_sniff_subrate { 757struct hci_ev_sniff_subrate {
671 __u8 status; 758 __u8 status;
672 __le16 handle; 759 __le16 handle;
@@ -676,14 +763,25 @@ struct hci_ev_sniff_subrate {
676 __le16 max_local_timeout; 763 __le16 max_local_timeout;
677} __attribute__ ((packed)); 764} __attribute__ ((packed));
678 765
766#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f
767struct extended_inquiry_info {
768 bdaddr_t bdaddr;
769 __u8 pscan_rep_mode;
770 __u8 pscan_period_mode;
771 __u8 dev_class[3];
772 __le16 clock_offset;
773 __s8 rssi;
774 __u8 data[240];
775} __attribute__ ((packed));
776
679/* Internal events generated by Bluetooth stack */ 777/* Internal events generated by Bluetooth stack */
680#define HCI_EV_STACK_INTERNAL 0xFD 778#define HCI_EV_STACK_INTERNAL 0xfd
681struct hci_ev_stack_internal { 779struct hci_ev_stack_internal {
682 __u16 type; 780 __u16 type;
683 __u8 data[0]; 781 __u8 data[0];
684} __attribute__ ((packed)); 782} __attribute__ ((packed));
685 783
686#define HCI_EV_SI_DEVICE 0x01 784#define HCI_EV_SI_DEVICE 0x01
687struct hci_ev_si_device { 785struct hci_ev_si_device {
688 __u16 event; 786 __u16 event;
689 __u16 dev_id; 787 __u16 dev_id;
@@ -704,40 +802,40 @@ struct hci_ev_si_security {
704#define HCI_SCO_HDR_SIZE 3 802#define HCI_SCO_HDR_SIZE 3
705 803
706struct hci_command_hdr { 804struct hci_command_hdr {
707 __le16 opcode; /* OCF & OGF */ 805 __le16 opcode; /* OCF & OGF */
708 __u8 plen; 806 __u8 plen;
709} __attribute__ ((packed)); 807} __attribute__ ((packed));
710 808
711struct hci_event_hdr { 809struct hci_event_hdr {
712 __u8 evt; 810 __u8 evt;
713 __u8 plen; 811 __u8 plen;
714} __attribute__ ((packed)); 812} __attribute__ ((packed));
715 813
716struct hci_acl_hdr { 814struct hci_acl_hdr {
717 __le16 handle; /* Handle & Flags(PB, BC) */ 815 __le16 handle; /* Handle & Flags(PB, BC) */
718 __le16 dlen; 816 __le16 dlen;
719} __attribute__ ((packed)); 817} __attribute__ ((packed));
720 818
721struct hci_sco_hdr { 819struct hci_sco_hdr {
722 __le16 handle; 820 __le16 handle;
723 __u8 dlen; 821 __u8 dlen;
724} __attribute__ ((packed)); 822} __attribute__ ((packed));
725 823
726#ifdef __KERNEL__ 824#ifdef __KERNEL__
727#include <linux/skbuff.h> 825#include <linux/skbuff.h>
728static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) 826static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
729{ 827{
730 return (struct hci_event_hdr *)skb->data; 828 return (struct hci_event_hdr *) skb->data;
731} 829}
732 830
733static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) 831static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
734{ 832{
735 return (struct hci_acl_hdr *)skb->data; 833 return (struct hci_acl_hdr *) skb->data;
736} 834}
737 835
738static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) 836static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
739{ 837{
740 return (struct hci_sco_hdr *)skb->data; 838 return (struct hci_sco_hdr *) skb->data;
741} 839}
742#endif 840#endif
743 841
@@ -771,13 +869,13 @@ struct sockaddr_hci {
771struct hci_filter { 869struct hci_filter {
772 unsigned long type_mask; 870 unsigned long type_mask;
773 unsigned long event_mask[2]; 871 unsigned long event_mask[2];
774 __le16 opcode; 872 __le16 opcode;
775}; 873};
776 874
777struct hci_ufilter { 875struct hci_ufilter {
778 __u32 type_mask; 876 __u32 type_mask;
779 __u32 event_mask[2]; 877 __u32 event_mask[2];
780 __le16 opcode; 878 __le16 opcode;
781}; 879};
782 880
783#define HCI_FLT_TYPE_BITS 31 881#define HCI_FLT_TYPE_BITS 31
@@ -825,15 +923,15 @@ struct hci_dev_info {
825struct hci_conn_info { 923struct hci_conn_info {
826 __u16 handle; 924 __u16 handle;
827 bdaddr_t bdaddr; 925 bdaddr_t bdaddr;
828 __u8 type; 926 __u8 type;
829 __u8 out; 927 __u8 out;
830 __u16 state; 928 __u16 state;
831 __u32 link_mode; 929 __u32 link_mode;
832}; 930};
833 931
834struct hci_dev_req { 932struct hci_dev_req {
835 __u16 dev_id; 933 __u16 dev_id;
836 __u32 dev_opt; 934 __u32 dev_opt;
837}; 935};
838 936
839struct hci_dev_list_req { 937struct hci_dev_list_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f67c8a7169b..ea13baa3851b 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -71,7 +71,10 @@ struct hci_dev {
71 __u16 id; 71 __u16 id;
72 __u8 type; 72 __u8 type;
73 bdaddr_t bdaddr; 73 bdaddr_t bdaddr;
74 __u8 dev_name[248];
75 __u8 dev_class[3];
74 __u8 features[8]; 76 __u8 features[8];
77 __u8 commands[64];
75 __u8 hci_ver; 78 __u8 hci_ver;
76 __u16 hci_rev; 79 __u16 hci_rev;
77 __u16 manufacturer; 80 __u16 manufacturer;
@@ -310,10 +313,12 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
310void hci_acl_connect(struct hci_conn *conn); 313void hci_acl_connect(struct hci_conn *conn);
311void hci_acl_disconn(struct hci_conn *conn, __u8 reason); 314void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
312void hci_add_sco(struct hci_conn *conn, __u16 handle); 315void hci_add_sco(struct hci_conn *conn, __u16 handle);
316void hci_setup_sync(struct hci_conn *conn, __u16 handle);
313 317
314struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
315int hci_conn_del(struct hci_conn *conn); 319int hci_conn_del(struct hci_conn *conn);
316void hci_conn_hash_flush(struct hci_dev *hdev); 320void hci_conn_hash_flush(struct hci_dev *hdev);
321void hci_conn_check_pending(struct hci_dev *hdev);
317 322
318struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); 323struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
319int hci_conn_auth(struct hci_conn *conn); 324int hci_conn_auth(struct hci_conn *conn);
@@ -617,11 +622,11 @@ int hci_unregister_cb(struct hci_cb *hcb);
617int hci_register_notifier(struct notifier_block *nb); 622int hci_register_notifier(struct notifier_block *nb);
618int hci_unregister_notifier(struct notifier_block *nb); 623int hci_unregister_notifier(struct notifier_block *nb);
619 624
620int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param); 625int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
621int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); 626int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
622int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 627int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
623 628
624void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf); 629void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
625 630
626void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); 631void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
627 632
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 70e70f5d3dd6..73e115bc12dd 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -29,7 +29,8 @@
29#define L2CAP_DEFAULT_MTU 672 29#define L2CAP_DEFAULT_MTU 672
30#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF 30#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
31 31
32#define L2CAP_CONN_TIMEOUT (HZ * 40) 32#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
33#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
33 34
34/* L2CAP socket address */ 35/* L2CAP socket address */
35struct sockaddr_l2 { 36struct sockaddr_l2 {
@@ -148,6 +149,19 @@ struct l2cap_conf_opt {
148 149
149#define L2CAP_CONF_MAX_SIZE 22 150#define L2CAP_CONF_MAX_SIZE 22
150 151
152struct l2cap_conf_rfc {
153 __u8 mode;
154 __u8 txwin_size;
155 __u8 max_transmit;
156 __le16 retrans_timeout;
157 __le16 monitor_timeout;
158 __le16 max_pdu_size;
159} __attribute__ ((packed));
160
161#define L2CAP_MODE_BASIC 0x00
162#define L2CAP_MODE_RETRANS 0x01
163#define L2CAP_MODE_FLOWCTL 0x02
164
151struct l2cap_disconn_req { 165struct l2cap_disconn_req {
152 __le16 dcid; 166 __le16 dcid;
153 __le16 scid; 167 __le16 scid;
@@ -160,7 +174,6 @@ struct l2cap_disconn_rsp {
160 174
161struct l2cap_info_req { 175struct l2cap_info_req {
162 __le16 type; 176 __le16 type;
163 __u8 data[0];
164} __attribute__ ((packed)); 177} __attribute__ ((packed));
165 178
166struct l2cap_info_rsp { 179struct l2cap_info_rsp {
@@ -192,6 +205,13 @@ struct l2cap_conn {
192 205
193 unsigned int mtu; 206 unsigned int mtu;
194 207
208 __u32 feat_mask;
209
210 __u8 info_state;
211 __u8 info_ident;
212
213 struct timer_list info_timer;
214
195 spinlock_t lock; 215 spinlock_t lock;
196 216
197 struct sk_buff *rx_skb; 217 struct sk_buff *rx_skb;
@@ -202,6 +222,9 @@ struct l2cap_conn {
202 struct l2cap_chan_list chan_list; 222 struct l2cap_chan_list chan_list;
203}; 223};
204 224
225#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
226#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x02
227
205/* ----- L2CAP channel and socket info ----- */ 228/* ----- L2CAP channel and socket info ----- */
206#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) 229#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
207 230
@@ -221,7 +244,6 @@ struct l2cap_pinfo {
221 __u8 conf_len; 244 __u8 conf_len;
222 __u8 conf_state; 245 __u8 conf_state;
223 __u8 conf_retry; 246 __u8 conf_retry;
224 __u16 conf_mtu;
225 247
226 __u8 ident; 248 __u8 ident;
227 249
@@ -232,10 +254,11 @@ struct l2cap_pinfo {
232 struct sock *prev_c; 254 struct sock *prev_c;
233}; 255};
234 256
235#define L2CAP_CONF_REQ_SENT 0x01 257#define L2CAP_CONF_REQ_SENT 0x01
236#define L2CAP_CONF_INPUT_DONE 0x02 258#define L2CAP_CONF_INPUT_DONE 0x02
237#define L2CAP_CONF_OUTPUT_DONE 0x04 259#define L2CAP_CONF_OUTPUT_DONE 0x04
238#define L2CAP_CONF_MAX_RETRIES 2 260
261#define L2CAP_CONF_MAX_RETRIES 2
239 262
240void l2cap_load(void); 263void l2cap_load(void);
241 264
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5fdfc9a67d39..9483320f6dad 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -78,11 +78,11 @@ void hci_acl_connect(struct hci_conn *conn)
78 78
79 cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); 79 cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
80 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) 80 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
81 cp.role_switch = 0x01; 81 cp.role_switch = 0x01;
82 else 82 else
83 cp.role_switch = 0x00; 83 cp.role_switch = 0x00;
84 84
85 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp); 85 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
86} 86}
87 87
88static void hci_acl_connect_cancel(struct hci_conn *conn) 88static void hci_acl_connect_cancel(struct hci_conn *conn)
@@ -95,8 +95,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
95 return; 95 return;
96 96
97 bacpy(&cp.bdaddr, &conn->dst); 97 bacpy(&cp.bdaddr, &conn->dst);
98 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 98 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
99 OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp);
100} 99}
101 100
102void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 101void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
@@ -109,8 +108,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
109 108
110 cp.handle = cpu_to_le16(conn->handle); 109 cp.handle = cpu_to_le16(conn->handle);
111 cp.reason = reason; 110 cp.reason = reason;
112 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 111 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
113 OCF_DISCONNECT, sizeof(cp), &cp);
114} 112}
115 113
116void hci_add_sco(struct hci_conn *conn, __u16 handle) 114void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -126,7 +124,29 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
126 cp.handle = cpu_to_le16(handle); 124 cp.handle = cpu_to_le16(handle);
127 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 125 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
128 126
129 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp); 127 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
128}
129
130void hci_setup_sync(struct hci_conn *conn, __u16 handle)
131{
132 struct hci_dev *hdev = conn->hdev;
133 struct hci_cp_setup_sync_conn cp;
134
135 BT_DBG("%p", conn);
136
137 conn->state = BT_CONNECT;
138 conn->out = 1;
139
140 cp.handle = cpu_to_le16(handle);
141 cp.pkt_type = cpu_to_le16(hdev->esco_type);
142
143 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
144 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
145 cp.max_latency = cpu_to_le16(0xffff);
146 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
147 cp.retrans_effort = 0xff;
148
149 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
130} 150}
131 151
132static void hci_conn_timeout(unsigned long arg) 152static void hci_conn_timeout(unsigned long arg)
@@ -143,7 +163,10 @@ static void hci_conn_timeout(unsigned long arg)
143 163
144 switch (conn->state) { 164 switch (conn->state) {
145 case BT_CONNECT: 165 case BT_CONNECT:
146 hci_acl_connect_cancel(conn); 166 if (conn->type == ACL_LINK)
167 hci_acl_connect_cancel(conn);
168 else
169 hci_acl_disconn(conn, 0x13);
147 break; 170 break;
148 case BT_CONNECTED: 171 case BT_CONNECTED:
149 hci_acl_disconn(conn, 0x13); 172 hci_acl_disconn(conn, 0x13);
@@ -330,8 +353,12 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
330 hci_conn_hold(sco); 353 hci_conn_hold(sco);
331 354
332 if (acl->state == BT_CONNECTED && 355 if (acl->state == BT_CONNECTED &&
333 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) 356 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
334 hci_add_sco(sco, acl->handle); 357 if (lmp_esco_capable(hdev))
358 hci_setup_sync(sco, acl->handle);
359 else
360 hci_add_sco(sco, acl->handle);
361 }
335 362
336 return sco; 363 return sco;
337} 364}
@@ -348,7 +375,7 @@ int hci_conn_auth(struct hci_conn *conn)
348 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 375 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
349 struct hci_cp_auth_requested cp; 376 struct hci_cp_auth_requested cp;
350 cp.handle = cpu_to_le16(conn->handle); 377 cp.handle = cpu_to_le16(conn->handle);
351 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp); 378 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
352 } 379 }
353 return 0; 380 return 0;
354} 381}
@@ -369,7 +396,7 @@ int hci_conn_encrypt(struct hci_conn *conn)
369 struct hci_cp_set_conn_encrypt cp; 396 struct hci_cp_set_conn_encrypt cp;
370 cp.handle = cpu_to_le16(conn->handle); 397 cp.handle = cpu_to_le16(conn->handle);
371 cp.encrypt = 1; 398 cp.encrypt = 1;
372 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); 399 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp);
373 } 400 }
374 return 0; 401 return 0;
375} 402}
@@ -383,7 +410,7 @@ int hci_conn_change_link_key(struct hci_conn *conn)
383 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 410 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
384 struct hci_cp_change_conn_link_key cp; 411 struct hci_cp_change_conn_link_key cp;
385 cp.handle = cpu_to_le16(conn->handle); 412 cp.handle = cpu_to_le16(conn->handle);
386 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); 413 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp);
387 } 414 }
388 return 0; 415 return 0;
389} 416}
@@ -401,7 +428,7 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
401 struct hci_cp_switch_role cp; 428 struct hci_cp_switch_role cp;
402 bacpy(&cp.bdaddr, &conn->dst); 429 bacpy(&cp.bdaddr, &conn->dst);
403 cp.role = role; 430 cp.role = role;
404 hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp); 431 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
405 } 432 }
406 return 0; 433 return 0;
407} 434}
@@ -423,8 +450,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
423 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 450 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
424 struct hci_cp_exit_sniff_mode cp; 451 struct hci_cp_exit_sniff_mode cp;
425 cp.handle = cpu_to_le16(conn->handle); 452 cp.handle = cpu_to_le16(conn->handle);
426 hci_send_cmd(hdev, OGF_LINK_POLICY, 453 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
427 OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp);
428 } 454 }
429 455
430timer: 456timer:
@@ -455,8 +481,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn)
455 cp.max_latency = cpu_to_le16(0); 481 cp.max_latency = cpu_to_le16(0);
456 cp.min_remote_timeout = cpu_to_le16(0); 482 cp.min_remote_timeout = cpu_to_le16(0);
457 cp.min_local_timeout = cpu_to_le16(0); 483 cp.min_local_timeout = cpu_to_le16(0);
458 hci_send_cmd(hdev, OGF_LINK_POLICY, 484 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
459 OCF_SNIFF_SUBRATE, sizeof(cp), &cp);
460 } 485 }
461 486
462 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 487 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
@@ -466,8 +491,7 @@ void hci_conn_enter_sniff_mode(struct hci_conn *conn)
466 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 491 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
467 cp.attempt = cpu_to_le16(4); 492 cp.attempt = cpu_to_le16(4);
468 cp.timeout = cpu_to_le16(1); 493 cp.timeout = cpu_to_le16(1);
469 hci_send_cmd(hdev, OGF_LINK_POLICY, 494 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
470 OCF_SNIFF_MODE, sizeof(cp), &cp);
471 } 495 }
472} 496}
473 497
@@ -493,6 +517,22 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
493 } 517 }
494} 518}
495 519
520/* Check pending connect attempts */
521void hci_conn_check_pending(struct hci_dev *hdev)
522{
523 struct hci_conn *conn;
524
525 BT_DBG("hdev %s", hdev->name);
526
527 hci_dev_lock(hdev);
528
529 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
530 if (conn)
531 hci_acl_connect(conn);
532
533 hci_dev_unlock(hdev);
534}
535
496int hci_get_conn_list(void __user *arg) 536int hci_get_conn_list(void __user *arg)
497{ 537{
498 struct hci_conn_list_req req, *cl; 538 struct hci_conn_list_req req, *cl;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 18e3afc964df..372b0d3b75a8 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -176,7 +176,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt); 176 BT_DBG("%s %ld", hdev->name, opt);
177 177
178 /* Reset device */ 178 /* Reset device */
179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180} 180}
181 181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -202,16 +202,16 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
202 202
203 /* Reset */ 203 /* Reset */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) 204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 206
207 /* Read Local Supported Features */ 207 /* Read Local Supported Features */
208 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); 208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 209
210 /* Read Local Version */ 210 /* Read Local Version */
211 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL); 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 212
213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); 214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 215
216#if 0 216#if 0
217 /* Host buffer size */ 217 /* Host buffer size */
@@ -221,29 +221,35 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
221 cp.sco_mtu = HCI_MAX_SCO_SIZE; 221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 cp.acl_max_pkt = cpu_to_le16(0xffff); 222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff); 223 cp.sco_max_pkt = cpu_to_le16(0xffff);
224 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp); 224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225 } 225 }
226#endif 226#endif
227 227
228 /* Read BD Address */ 228 /* Read BD Address */
229 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); 229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230
231 /* Read Class of Device */
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233
234 /* Read Local Name */
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
230 236
231 /* Read Voice Setting */ 237 /* Read Voice Setting */
232 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); 238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
233 239
234 /* Optional initialization */ 240 /* Optional initialization */
235 241
236 /* Clear Event Filters */ 242 /* Clear Event Filters */
237 flt_type = HCI_FLT_CLEAR_ALL; 243 flt_type = HCI_FLT_CLEAR_ALL;
238 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type); 244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
239 245
240 /* Page timeout ~20 secs */ 246 /* Page timeout ~20 secs */
241 param = cpu_to_le16(0x8000); 247 param = cpu_to_le16(0x8000);
242 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param); 248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
243 249
244 /* Connection accept timeout ~20 secs */ 250 /* Connection accept timeout ~20 secs */
245 param = cpu_to_le16(0x7d00); 251 param = cpu_to_le16(0x7d00);
246 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param); 252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
247} 253}
248 254
249static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 255static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -253,7 +259,7 @@ static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
253 BT_DBG("%s %x", hdev->name, scan); 259 BT_DBG("%s %x", hdev->name, scan);
254 260
255 /* Inquiry and Page scans */ 261 /* Inquiry and Page scans */
256 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); 262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
257} 263}
258 264
259static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 265static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
@@ -263,7 +269,7 @@ static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
263 BT_DBG("%s %x", hdev->name, auth); 269 BT_DBG("%s %x", hdev->name, auth);
264 270
265 /* Authentication */ 271 /* Authentication */
266 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); 272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
267} 273}
268 274
269static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 275static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
@@ -273,7 +279,7 @@ static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
273 BT_DBG("%s %x", hdev->name, encrypt); 279 BT_DBG("%s %x", hdev->name, encrypt);
274 280
275 /* Authentication */ 281 /* Authentication */
276 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); 282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
277} 283}
278 284
279/* Get HCI device by index. 285/* Get HCI device by index.
@@ -384,7 +390,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
384 memcpy(&cp.lap, &ir->lap, 3); 390 memcpy(&cp.lap, &ir->lap, 3);
385 cp.length = ir->length; 391 cp.length = ir->length;
386 cp.num_rsp = ir->num_rsp; 392 cp.num_rsp = ir->num_rsp;
387 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); 393 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
388} 394}
389 395
390int hci_inquiry(void __user *arg) 396int hci_inquiry(void __user *arg)
@@ -1111,13 +1117,13 @@ static int hci_send_frame(struct sk_buff *skb)
1111} 1117}
1112 1118
1113/* Send HCI command */ 1119/* Send HCI command */
1114int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) 1120int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1115{ 1121{
1116 int len = HCI_COMMAND_HDR_SIZE + plen; 1122 int len = HCI_COMMAND_HDR_SIZE + plen;
1117 struct hci_command_hdr *hdr; 1123 struct hci_command_hdr *hdr;
1118 struct sk_buff *skb; 1124 struct sk_buff *skb;
1119 1125
1120 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); 1126 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1121 1127
1122 skb = bt_skb_alloc(len, GFP_ATOMIC); 1128 skb = bt_skb_alloc(len, GFP_ATOMIC);
1123 if (!skb) { 1129 if (!skb) {
@@ -1126,7 +1132,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1126 } 1132 }
1127 1133
1128 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 1134 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1129 hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf)); 1135 hdr->opcode = cpu_to_le16(opcode);
1130 hdr->plen = plen; 1136 hdr->plen = plen;
1131 1137
1132 if (plen) 1138 if (plen)
@@ -1143,7 +1149,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1143} 1149}
1144 1150
1145/* Get data from the previously sent command */ 1151/* Get data from the previously sent command */
1146void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) 1152void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1147{ 1153{
1148 struct hci_command_hdr *hdr; 1154 struct hci_command_hdr *hdr;
1149 1155
@@ -1152,10 +1158,10 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1152 1158
1153 hdr = (void *) hdev->sent_cmd->data; 1159 hdr = (void *) hdev->sent_cmd->data;
1154 1160
1155 if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf))) 1161 if (hdr->opcode != cpu_to_le16(opcode))
1156 return NULL; 1162 return NULL;
1157 1163
1158 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); 1164 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1159 1165
1160 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 1166 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1161} 1167}
@@ -1355,6 +1361,26 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
1355 } 1361 }
1356} 1362}
1357 1363
1364static inline void hci_sched_esco(struct hci_dev *hdev)
1365{
1366 struct hci_conn *conn;
1367 struct sk_buff *skb;
1368 int quote;
1369
1370 BT_DBG("%s", hdev->name);
1371
1372 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1374 BT_DBG("skb %p len %d", skb, skb->len);
1375 hci_send_frame(skb);
1376
1377 conn->sent++;
1378 if (conn->sent == ~0)
1379 conn->sent = 0;
1380 }
1381 }
1382}
1383
1358static void hci_tx_task(unsigned long arg) 1384static void hci_tx_task(unsigned long arg)
1359{ 1385{
1360 struct hci_dev *hdev = (struct hci_dev *) arg; 1386 struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1370,6 +1396,8 @@ static void hci_tx_task(unsigned long arg)
1370 1396
1371 hci_sched_sco(hdev); 1397 hci_sched_sco(hdev);
1372 1398
1399 hci_sched_esco(hdev);
1400
1373 /* Send next queued raw (unknown type) packet */ 1401 /* Send next queued raw (unknown type) packet */
1374 while ((skb = skb_dequeue(&hdev->raw_q))) 1402 while ((skb = skb_dequeue(&hdev->raw_q)))
1375 hci_send_frame(skb); 1403 hci_send_frame(skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4baea1e38652..46df2e403df8 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -52,234 +52,273 @@
52 52
53/* Handle HCI Event packets */ 53/* Handle HCI Event packets */
54 54
55/* Command Complete OGF LINK_CTL */ 55static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
57{ 56{
58 __u8 status; 57 __u8 status = *((__u8 *) skb->data);
59 struct hci_conn *pend;
60 58
61 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 59 BT_DBG("%s status 0x%x", hdev->name, status);
62 60
63 switch (ocf) { 61 if (status)
64 case OCF_INQUIRY_CANCEL: 62 return;
65 case OCF_EXIT_PERIODIC_INQ:
66 status = *((__u8 *) skb->data);
67 63
68 if (status) { 64 clear_bit(HCI_INQUIRY, &hdev->flags);
69 BT_DBG("%s Inquiry cancel error: status 0x%x", hdev->name, status);
70 } else {
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 hci_req_complete(hdev, status);
73 }
74 65
75 hci_dev_lock(hdev); 66 hci_req_complete(hdev, status);
76 67
77 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 68 hci_conn_check_pending(hdev);
78 if (pend) 69}
79 hci_acl_connect(pend);
80 70
81 hci_dev_unlock(hdev); 71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73 __u8 status = *((__u8 *) skb->data);
82 74
83 break; 75 BT_DBG("%s status 0x%x", hdev->name, status);
84 76
85 default: 77 if (status)
86 BT_DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf); 78 return;
87 break; 79
80 clear_bit(HCI_INQUIRY, &hdev->flags);
81
82 hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86{
87 BT_DBG("%s", hdev->name);
88}
89
90static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91{
92 struct hci_rp_role_discovery *rp = (void *) skb->data;
93 struct hci_conn *conn;
94
95 BT_DBG("%s status 0x%x", hdev->name, rp->status);
96
97 if (rp->status)
98 return;
99
100 hci_dev_lock(hdev);
101
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) {
104 if (rp->role)
105 conn->link_mode &= ~HCI_LM_MASTER;
106 else
107 conn->link_mode |= HCI_LM_MASTER;
88 } 108 }
109
110 hci_dev_unlock(hdev);
89} 111}
90 112
91/* Command Complete OGF LINK_POLICY */ 113static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
92static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
93{ 114{
115 struct hci_rp_write_link_policy *rp = (void *) skb->data;
94 struct hci_conn *conn; 116 struct hci_conn *conn;
95 struct hci_rp_role_discovery *rd;
96 struct hci_rp_write_link_policy *lp;
97 void *sent; 117 void *sent;
98 118
99 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 119 BT_DBG("%s status 0x%x", hdev->name, rp->status);
100 120
101 switch (ocf) { 121 if (rp->status)
102 case OCF_ROLE_DISCOVERY: 122 return;
103 rd = (void *) skb->data;
104 123
105 if (rd->status) 124 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
106 break; 125 if (!sent)
126 return;
107 127
108 hci_dev_lock(hdev); 128 hci_dev_lock(hdev);
109 129
110 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rd->handle)); 130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
111 if (conn) { 131 if (conn) {
112 if (rd->role) 132 __le16 policy = get_unaligned((__le16 *) (sent + 2));
113 conn->link_mode &= ~HCI_LM_MASTER; 133 conn->link_policy = __le16_to_cpu(policy);
114 else 134 }
115 conn->link_mode |= HCI_LM_MASTER;
116 }
117 135
118 hci_dev_unlock(hdev); 136 hci_dev_unlock(hdev);
119 break; 137}
120 138
121 case OCF_WRITE_LINK_POLICY: 139static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
122 sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY); 140{
123 if (!sent) 141 __u8 status = *((__u8 *) skb->data);
124 break;
125 142
126 lp = (struct hci_rp_write_link_policy *) skb->data; 143 BT_DBG("%s status 0x%x", hdev->name, status);
127 144
128 if (lp->status) 145 hci_req_complete(hdev, status);
129 break; 146}
130 147
131 hci_dev_lock(hdev); 148static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
149{
150 __u8 status = *((__u8 *) skb->data);
151 void *sent;
132 152
133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle)); 153 BT_DBG("%s status 0x%x", hdev->name, status);
134 if (conn) {
135 __le16 policy = get_unaligned((__le16 *) (sent + 2));
136 conn->link_policy = __le16_to_cpu(policy);
137 }
138 154
139 hci_dev_unlock(hdev); 155 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
140 break; 156 if (!sent)
157 return;
141 158
142 default: 159 if (!status)
143 BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", 160 memcpy(hdev->dev_name, sent, 248);
144 hdev->name, ocf); 161}
145 break; 162
163static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
164{
165 struct hci_rp_read_local_name *rp = (void *) skb->data;
166
167 BT_DBG("%s status 0x%x", hdev->name, rp->status);
168
169 if (rp->status)
170 return;
171
172 memcpy(hdev->dev_name, rp->name, 248);
173}
174
175static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
176{
177 __u8 status = *((__u8 *) skb->data);
178 void *sent;
179
180 BT_DBG("%s status 0x%x", hdev->name, status);
181
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
183 if (!sent)
184 return;
185
186 if (!status) {
187 __u8 param = *((__u8 *) sent);
188
189 if (param == AUTH_ENABLED)
190 set_bit(HCI_AUTH, &hdev->flags);
191 else
192 clear_bit(HCI_AUTH, &hdev->flags);
146 } 193 }
194
195 hci_req_complete(hdev, status);
147} 196}
148 197
149/* Command Complete OGF HOST_CTL */ 198static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
150static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
151{ 199{
152 __u8 status, param; 200 __u8 status = *((__u8 *) skb->data);
153 __u16 setting;
154 struct hci_rp_read_voice_setting *vs;
155 void *sent; 201 void *sent;
156 202
157 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 203 BT_DBG("%s status 0x%x", hdev->name, status);
158 204
159 switch (ocf) { 205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
160 case OCF_RESET: 206 if (!sent)
161 status = *((__u8 *) skb->data); 207 return;
162 hci_req_complete(hdev, status);
163 break;
164 208
165 case OCF_SET_EVENT_FLT: 209 if (!status) {
166 status = *((__u8 *) skb->data); 210 __u8 param = *((__u8 *) sent);
167 if (status) {
168 BT_DBG("%s SET_EVENT_FLT failed %d", hdev->name, status);
169 } else {
170 BT_DBG("%s SET_EVENT_FLT succeseful", hdev->name);
171 }
172 break;
173 211
174 case OCF_WRITE_AUTH_ENABLE: 212 if (param)
175 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE); 213 set_bit(HCI_ENCRYPT, &hdev->flags);
176 if (!sent) 214 else
177 break; 215 clear_bit(HCI_ENCRYPT, &hdev->flags);
216 }
178 217
179 status = *((__u8 *) skb->data); 218 hci_req_complete(hdev, status);
180 param = *((__u8 *) sent); 219}
181 220
182 if (!status) { 221static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
183 if (param == AUTH_ENABLED) 222{
184 set_bit(HCI_AUTH, &hdev->flags); 223 __u8 status = *((__u8 *) skb->data);
185 else 224 void *sent;
186 clear_bit(HCI_AUTH, &hdev->flags);
187 }
188 hci_req_complete(hdev, status);
189 break;
190 225
191 case OCF_WRITE_ENCRYPT_MODE: 226 BT_DBG("%s status 0x%x", hdev->name, status);
192 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE);
193 if (!sent)
194 break;
195 227
196 status = *((__u8 *) skb->data); 228 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
197 param = *((__u8 *) sent); 229 if (!sent)
230 return;
198 231
199 if (!status) { 232 if (!status) {
200 if (param) 233 __u8 param = *((__u8 *) sent);
201 set_bit(HCI_ENCRYPT, &hdev->flags);
202 else
203 clear_bit(HCI_ENCRYPT, &hdev->flags);
204 }
205 hci_req_complete(hdev, status);
206 break;
207 234
208 case OCF_WRITE_CA_TIMEOUT: 235 clear_bit(HCI_PSCAN, &hdev->flags);
209 status = *((__u8 *) skb->data); 236 clear_bit(HCI_ISCAN, &hdev->flags);
210 if (status) {
211 BT_DBG("%s OCF_WRITE_CA_TIMEOUT failed %d", hdev->name, status);
212 } else {
213 BT_DBG("%s OCF_WRITE_CA_TIMEOUT succeseful", hdev->name);
214 }
215 break;
216 237
217 case OCF_WRITE_PG_TIMEOUT: 238 if (param & SCAN_INQUIRY)
218 status = *((__u8 *) skb->data); 239 set_bit(HCI_ISCAN, &hdev->flags);
219 if (status) {
220 BT_DBG("%s OCF_WRITE_PG_TIMEOUT failed %d", hdev->name, status);
221 } else {
222 BT_DBG("%s: OCF_WRITE_PG_TIMEOUT succeseful", hdev->name);
223 }
224 break;
225 240
226 case OCF_WRITE_SCAN_ENABLE: 241 if (param & SCAN_PAGE)
227 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE); 242 set_bit(HCI_PSCAN, &hdev->flags);
228 if (!sent) 243 }
229 break;
230 244
231 status = *((__u8 *) skb->data); 245 hci_req_complete(hdev, status);
232 param = *((__u8 *) sent); 246}
233 247
234 BT_DBG("param 0x%x", param); 248static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
249{
250 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
235 251
236 if (!status) { 252 BT_DBG("%s status 0x%x", hdev->name, rp->status);
237 clear_bit(HCI_PSCAN, &hdev->flags);
238 clear_bit(HCI_ISCAN, &hdev->flags);
239 if (param & SCAN_INQUIRY)
240 set_bit(HCI_ISCAN, &hdev->flags);
241 253
242 if (param & SCAN_PAGE) 254 if (rp->status)
243 set_bit(HCI_PSCAN, &hdev->flags); 255 return;
244 }
245 hci_req_complete(hdev, status);
246 break;
247 256
248 case OCF_READ_VOICE_SETTING: 257 memcpy(hdev->dev_class, rp->dev_class, 3);
249 vs = (struct hci_rp_read_voice_setting *) skb->data;
250 258
251 if (vs->status) { 259 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
252 BT_DBG("%s READ_VOICE_SETTING failed %d", hdev->name, vs->status); 260 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
253 break; 261}
254 }
255 262
256 setting = __le16_to_cpu(vs->voice_setting); 263static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
264{
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
257 267
258 if (hdev->voice_setting != setting ) { 268 BT_DBG("%s status 0x%x", hdev->name, status);
259 hdev->voice_setting = setting;
260 269
261 BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); 270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
271 if (!sent)
272 return;
262 273
263 if (hdev->notify) { 274 if (!status)
264 tasklet_disable(&hdev->tx_task); 275 memcpy(hdev->dev_class, sent, 3);
265 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 276}
266 tasklet_enable(&hdev->tx_task);
267 }
268 }
269 break;
270 277
271 case OCF_WRITE_VOICE_SETTING: 278static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
272 sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_VOICE_SETTING); 279{
273 if (!sent) 280 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
274 break; 281 __u16 setting;
282
283 BT_DBG("%s status 0x%x", hdev->name, rp->status);
284
285 if (rp->status)
286 return;
287
288 setting = __le16_to_cpu(rp->voice_setting);
289
290 if (hdev->voice_setting == setting )
291 return;
292
293 hdev->voice_setting = setting;
275 294
276 status = *((__u8 *) skb->data); 295 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
277 setting = __le16_to_cpu(get_unaligned((__le16 *) sent));
278 296
279 if (!status && hdev->voice_setting != setting) { 297 if (hdev->notify) {
298 tasklet_disable(&hdev->tx_task);
299 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
300 tasklet_enable(&hdev->tx_task);
301 }
302}
303
304static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
305{
306 __u8 status = *((__u8 *) skb->data);
307 void *sent;
308
309 BT_DBG("%s status 0x%x", hdev->name, status);
310
311 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
312 if (!sent)
313 return;
314
315 if (!status) {
316 __u16 setting = __le16_to_cpu(get_unaligned((__le16 *) sent));
317
318 if (hdev->voice_setting != setting) {
280 hdev->voice_setting = setting; 319 hdev->voice_setting = setting;
281 320
282 BT_DBG("%s: voice setting 0x%04x", hdev->name, setting); 321 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
283 322
284 if (hdev->notify) { 323 if (hdev->notify) {
285 tasklet_disable(&hdev->tx_task); 324 tasklet_disable(&hdev->tx_task);
@@ -287,143 +326,153 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
287 tasklet_enable(&hdev->tx_task); 326 tasklet_enable(&hdev->tx_task);
288 } 327 }
289 } 328 }
290 hci_req_complete(hdev, status);
291 break;
292
293 case OCF_HOST_BUFFER_SIZE:
294 status = *((__u8 *) skb->data);
295 if (status) {
296 BT_DBG("%s OCF_BUFFER_SIZE failed %d", hdev->name, status);
297 hci_req_complete(hdev, status);
298 }
299 break;
300
301 default:
302 BT_DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf);
303 break;
304 } 329 }
305} 330}
306 331
307/* Command Complete OGF INFO_PARAM */ 332static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
308static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
309{ 333{
310 struct hci_rp_read_loc_version *lv; 334 __u8 status = *((__u8 *) skb->data);
311 struct hci_rp_read_local_features *lf;
312 struct hci_rp_read_buffer_size *bs;
313 struct hci_rp_read_bd_addr *ba;
314 335
315 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 336 BT_DBG("%s status 0x%x", hdev->name, status);
316 337
317 switch (ocf) { 338 hci_req_complete(hdev, status);
318 case OCF_READ_LOCAL_VERSION: 339}
319 lv = (struct hci_rp_read_loc_version *) skb->data;
320 340
321 if (lv->status) { 341static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
322 BT_DBG("%s READ_LOCAL_VERSION failed %d", hdev->name, lf->status); 342{
323 break; 343 struct hci_rp_read_local_version *rp = (void *) skb->data;
324 }
325 344
326 hdev->hci_ver = lv->hci_ver; 345 BT_DBG("%s status 0x%x", hdev->name, rp->status);
327 hdev->hci_rev = btohs(lv->hci_rev);
328 hdev->manufacturer = btohs(lv->manufacturer);
329 346
330 BT_DBG("%s: manufacturer %d hci_ver %d hci_rev %d", hdev->name, 347 if (rp->status)
331 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); 348 return;
332 349
333 break; 350 hdev->hci_ver = rp->hci_ver;
351 hdev->hci_rev = btohs(rp->hci_rev);
352 hdev->manufacturer = btohs(rp->manufacturer);
334 353
335 case OCF_READ_LOCAL_FEATURES: 354 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
336 lf = (struct hci_rp_read_local_features *) skb->data; 355 hdev->manufacturer,
356 hdev->hci_ver, hdev->hci_rev);
357}
337 358
338 if (lf->status) { 359static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
339 BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); 360{
340 break; 361 struct hci_rp_read_local_commands *rp = (void *) skb->data;
341 }
342 362
343 memcpy(hdev->features, lf->features, sizeof(hdev->features)); 363 BT_DBG("%s status 0x%x", hdev->name, rp->status);
344 364
345 /* Adjust default settings according to features 365 if (rp->status)
346 * supported by device. */ 366 return;
347 if (hdev->features[0] & LMP_3SLOT)
348 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
349 367
350 if (hdev->features[0] & LMP_5SLOT) 368 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
351 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 369}
352 370
353 if (hdev->features[1] & LMP_HV2) { 371static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
354 hdev->pkt_type |= (HCI_HV2); 372{
355 hdev->esco_type |= (ESCO_HV2); 373 struct hci_rp_read_local_features *rp = (void *) skb->data;
356 }
357 374
358 if (hdev->features[1] & LMP_HV3) { 375 BT_DBG("%s status 0x%x", hdev->name, rp->status);
359 hdev->pkt_type |= (HCI_HV3);
360 hdev->esco_type |= (ESCO_HV3);
361 }
362 376
363 if (hdev->features[3] & LMP_ESCO) 377 if (rp->status)
364 hdev->esco_type |= (ESCO_EV3); 378 return;
365 379
366 if (hdev->features[4] & LMP_EV4) 380 memcpy(hdev->features, rp->features, 8);
367 hdev->esco_type |= (ESCO_EV4);
368 381
369 if (hdev->features[4] & LMP_EV5) 382 /* Adjust default settings according to features
370 hdev->esco_type |= (ESCO_EV5); 383 * supported by device. */
371 384
372 BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, 385 if (hdev->features[0] & LMP_3SLOT)
373 lf->features[0], lf->features[1], lf->features[2]); 386 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
374 387
375 break; 388 if (hdev->features[0] & LMP_5SLOT)
389 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
376 390
377 case OCF_READ_BUFFER_SIZE: 391 if (hdev->features[1] & LMP_HV2) {
378 bs = (struct hci_rp_read_buffer_size *) skb->data; 392 hdev->pkt_type |= (HCI_HV2);
393 hdev->esco_type |= (ESCO_HV2);
394 }
379 395
380 if (bs->status) { 396 if (hdev->features[1] & LMP_HV3) {
381 BT_DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status); 397 hdev->pkt_type |= (HCI_HV3);
382 hci_req_complete(hdev, bs->status); 398 hdev->esco_type |= (ESCO_HV3);
383 break; 399 }
384 }
385 400
386 hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); 401 if (hdev->features[3] & LMP_ESCO)
387 hdev->sco_mtu = bs->sco_mtu; 402 hdev->esco_type |= (ESCO_EV3);
388 hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt);
389 hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt);
390 403
391 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 404 if (hdev->features[4] & LMP_EV4)
392 hdev->sco_mtu = 64; 405 hdev->esco_type |= (ESCO_EV4);
393 hdev->sco_pkts = 8;
394 }
395 406
396 hdev->acl_cnt = hdev->acl_pkts; 407 if (hdev->features[4] & LMP_EV5)
397 hdev->sco_cnt = hdev->sco_pkts; 408 hdev->esco_type |= (ESCO_EV5);
398 409
399 BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, 410 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
400 hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); 411 hdev->features[0], hdev->features[1],
401 break; 412 hdev->features[2], hdev->features[3],
413 hdev->features[4], hdev->features[5],
414 hdev->features[6], hdev->features[7]);
415}
402 416
403 case OCF_READ_BD_ADDR: 417static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
404 ba = (struct hci_rp_read_bd_addr *) skb->data; 418{
419 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
405 420
406 if (!ba->status) { 421 BT_DBG("%s status 0x%x", hdev->name, rp->status);
407 bacpy(&hdev->bdaddr, &ba->bdaddr);
408 } else {
409 BT_DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status);
410 }
411 422
412 hci_req_complete(hdev, ba->status); 423 if (rp->status)
413 break; 424 return;
414 425
415 default: 426 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
416 BT_DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf); 427 hdev->sco_mtu = rp->sco_mtu;
417 break; 428 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
429 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
430
431 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
432 hdev->sco_mtu = 64;
433 hdev->sco_pkts = 8;
418 } 434 }
435
436 hdev->acl_cnt = hdev->acl_pkts;
437 hdev->sco_cnt = hdev->sco_pkts;
438
439 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
440 hdev->acl_mtu, hdev->acl_pkts,
441 hdev->sco_mtu, hdev->sco_pkts);
442}
443
444static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
445{
446 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
447
448 BT_DBG("%s status 0x%x", hdev->name, rp->status);
449
450 if (!rp->status)
451 bacpy(&hdev->bdaddr, &rp->bdaddr);
452
453 hci_req_complete(hdev, rp->status);
454}
455
456static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
457{
458 BT_DBG("%s status 0x%x", hdev->name, status);
459
460 if (status) {
461 hci_req_complete(hdev, status);
462
463 hci_conn_check_pending(hdev);
464 } else
465 set_bit(HCI_INQUIRY, &hdev->flags);
419} 466}
420 467
421/* Command Status OGF LINK_CTL */
422static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 468static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
423{ 469{
470 struct hci_cp_create_conn *cp;
424 struct hci_conn *conn; 471 struct hci_conn *conn;
425 struct hci_cp_create_conn *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN);
426 472
473 BT_DBG("%s status 0x%x", hdev->name, status);
474
475 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
427 if (!cp) 476 if (!cp)
428 return; 477 return;
429 478
@@ -431,8 +480,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
431 480
432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
433 482
434 BT_DBG("%s status 0x%x bdaddr %s conn %p", hdev->name, 483 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
435 status, batostr(&cp->bdaddr), conn);
436 484
437 if (status) { 485 if (status) {
438 if (conn && conn->state == BT_CONNECT) { 486 if (conn && conn->state == BT_CONNECT) {
@@ -457,234 +505,138 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
457 hci_dev_unlock(hdev); 505 hci_dev_unlock(hdev);
458} 506}
459 507
460static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status) 508static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
461{ 509{
462 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 510 struct hci_cp_add_sco *cp;
511 struct hci_conn *acl, *sco;
512 __u16 handle;
463 513
464 switch (ocf) { 514 BT_DBG("%s status 0x%x", hdev->name, status);
465 case OCF_CREATE_CONN:
466 hci_cs_create_conn(hdev, status);
467 break;
468
469 case OCF_ADD_SCO:
470 if (status) {
471 struct hci_conn *acl, *sco;
472 struct hci_cp_add_sco *cp = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_ADD_SCO);
473 __u16 handle;
474
475 if (!cp)
476 break;
477 515
478 handle = __le16_to_cpu(cp->handle); 516 if (!status)
479 517 return;
480 BT_DBG("%s Add SCO error: handle %d status 0x%x", hdev->name, handle, status);
481 518
482 hci_dev_lock(hdev); 519 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
520 if (!cp)
521 return;
483 522
484 acl = hci_conn_hash_lookup_handle(hdev, handle); 523 handle = __le16_to_cpu(cp->handle);
485 if (acl && (sco = acl->link)) {
486 sco->state = BT_CLOSED;
487 524
488 hci_proto_connect_cfm(sco, status); 525 BT_DBG("%s handle %d", hdev->name, handle);
489 hci_conn_del(sco);
490 }
491 526
492 hci_dev_unlock(hdev); 527 hci_dev_lock(hdev);
493 }
494 break;
495 528
496 case OCF_INQUIRY: 529 acl = hci_conn_hash_lookup_handle(hdev, handle);
497 if (status) { 530 if (acl && (sco = acl->link)) {
498 BT_DBG("%s Inquiry error: status 0x%x", hdev->name, status); 531 sco->state = BT_CLOSED;
499 hci_req_complete(hdev, status);
500 } else {
501 set_bit(HCI_INQUIRY, &hdev->flags);
502 }
503 break;
504 532
505 default: 533 hci_proto_connect_cfm(sco, status);
506 BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d", 534 hci_conn_del(sco);
507 hdev->name, ocf, status);
508 break;
509 } 535 }
536
537 hci_dev_unlock(hdev);
510} 538}
511 539
512/* Command Status OGF LINK_POLICY */ 540static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
513static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status)
514{ 541{
515 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 542 BT_DBG("%s status 0x%x", hdev->name, status);
516 543}
517 switch (ocf) {
518 case OCF_SNIFF_MODE:
519 if (status) {
520 struct hci_conn *conn;
521 struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE);
522 544
523 if (!cp) 545static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
524 break; 546{
547 struct hci_cp_setup_sync_conn *cp;
548 struct hci_conn *acl, *sco;
549 __u16 handle;
525 550
526 hci_dev_lock(hdev); 551 BT_DBG("%s status 0x%x", hdev->name, status);
527 552
528 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 553 if (!status)
529 if (conn) { 554 return;
530 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
531 }
532
533 hci_dev_unlock(hdev);
534 }
535 break;
536 555
537 case OCF_EXIT_SNIFF_MODE: 556 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
538 if (status) { 557 if (!cp)
539 struct hci_conn *conn; 558 return;
540 struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE);
541 559
542 if (!cp) 560 handle = __le16_to_cpu(cp->handle);
543 break;
544 561
545 hci_dev_lock(hdev); 562 BT_DBG("%s handle %d", hdev->name, handle);
546 563
547 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 564 hci_dev_lock(hdev);
548 if (conn) {
549 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
550 }
551 565
552 hci_dev_unlock(hdev); 566 acl = hci_conn_hash_lookup_handle(hdev, handle);
553 } 567 if (acl && (sco = acl->link)) {
554 break; 568 sco->state = BT_CLOSED;
555 569
556 default: 570 hci_proto_connect_cfm(sco, status);
557 BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf); 571 hci_conn_del(sco);
558 break;
559 } 572 }
560}
561 573
562/* Command Status OGF HOST_CTL */ 574 hci_dev_unlock(hdev);
563static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
564{
565 BT_DBG("%s ocf 0x%x", hdev->name, ocf);
566
567 switch (ocf) {
568 default:
569 BT_DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf);
570 break;
571 }
572} 575}
573 576
574/* Command Status OGF INFO_PARAM */ 577static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
575static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status)
576{ 578{
577 BT_DBG("%s: hci_cs_info_param: ocf 0x%x", hdev->name, ocf); 579 struct hci_cp_sniff_mode *cp;
578 580 struct hci_conn *conn;
579 switch (ocf) {
580 default:
581 BT_DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf);
582 break;
583 }
584}
585 581
586/* Inquiry Complete */ 582 BT_DBG("%s status 0x%x", hdev->name, status);
587static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
588{
589 __u8 status = *((__u8 *) skb->data);
590 struct hci_conn *pend;
591 583
592 BT_DBG("%s status %d", hdev->name, status); 584 if (!status)
585 return;
593 586
594 clear_bit(HCI_INQUIRY, &hdev->flags); 587 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
595 hci_req_complete(hdev, status); 588 if (!cp)
589 return;
596 590
597 hci_dev_lock(hdev); 591 hci_dev_lock(hdev);
598 592
599 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 593 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
600 if (pend) 594 if (conn)
601 hci_acl_connect(pend); 595 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
602 596
603 hci_dev_unlock(hdev); 597 hci_dev_unlock(hdev);
604} 598}
605 599
606/* Inquiry Result */ 600static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
607static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
608{ 601{
609 struct inquiry_data data; 602 struct hci_cp_exit_sniff_mode *cp;
610 struct inquiry_info *info = (struct inquiry_info *) (skb->data + 1); 603 struct hci_conn *conn;
611 int num_rsp = *((__u8 *) skb->data);
612 604
613 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 605 BT_DBG("%s status 0x%x", hdev->name, status);
614 606
615 if (!num_rsp) 607 if (!status)
608 return;
609
610 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
611 if (!cp)
616 return; 612 return;
617 613
618 hci_dev_lock(hdev); 614 hci_dev_lock(hdev);
619 615
620 for (; num_rsp; num_rsp--) { 616 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
621 bacpy(&data.bdaddr, &info->bdaddr); 617 if (conn)
622 data.pscan_rep_mode = info->pscan_rep_mode; 618 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
623 data.pscan_period_mode = info->pscan_period_mode;
624 data.pscan_mode = info->pscan_mode;
625 memcpy(data.dev_class, info->dev_class, 3);
626 data.clock_offset = info->clock_offset;
627 data.rssi = 0x00;
628 info++;
629 hci_inquiry_cache_update(hdev, &data);
630 }
631 619
632 hci_dev_unlock(hdev); 620 hci_dev_unlock(hdev);
633} 621}
634 622
635/* Inquiry Result With RSSI */ 623static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
636static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
637{ 624{
638 struct inquiry_data data; 625 __u8 status = *((__u8 *) skb->data);
639 int num_rsp = *((__u8 *) skb->data);
640
641 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
642
643 if (!num_rsp)
644 return;
645
646 hci_dev_lock(hdev);
647 626
648 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 627 BT_DBG("%s status %d", hdev->name, status);
649 struct inquiry_info_with_rssi_and_pscan_mode *info =
650 (struct inquiry_info_with_rssi_and_pscan_mode *) (skb->data + 1);
651 628
652 for (; num_rsp; num_rsp--) { 629 clear_bit(HCI_INQUIRY, &hdev->flags);
653 bacpy(&data.bdaddr, &info->bdaddr);
654 data.pscan_rep_mode = info->pscan_rep_mode;
655 data.pscan_period_mode = info->pscan_period_mode;
656 data.pscan_mode = info->pscan_mode;
657 memcpy(data.dev_class, info->dev_class, 3);
658 data.clock_offset = info->clock_offset;
659 data.rssi = info->rssi;
660 info++;
661 hci_inquiry_cache_update(hdev, &data);
662 }
663 } else {
664 struct inquiry_info_with_rssi *info =
665 (struct inquiry_info_with_rssi *) (skb->data + 1);
666 630
667 for (; num_rsp; num_rsp--) { 631 hci_req_complete(hdev, status);
668 bacpy(&data.bdaddr, &info->bdaddr);
669 data.pscan_rep_mode = info->pscan_rep_mode;
670 data.pscan_period_mode = info->pscan_period_mode;
671 data.pscan_mode = 0x00;
672 memcpy(data.dev_class, info->dev_class, 3);
673 data.clock_offset = info->clock_offset;
674 data.rssi = info->rssi;
675 info++;
676 hci_inquiry_cache_update(hdev, &data);
677 }
678 }
679 632
680 hci_dev_unlock(hdev); 633 hci_conn_check_pending(hdev);
681} 634}
682 635
683/* Extended Inquiry Result */ 636static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
684static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
685{ 637{
686 struct inquiry_data data; 638 struct inquiry_data data;
687 struct extended_inquiry_info *info = (struct extended_inquiry_info *) (skb->data + 1); 639 struct inquiry_info *info = (void *) (skb->data + 1);
688 int num_rsp = *((__u8 *) skb->data); 640 int num_rsp = *((__u8 *) skb->data);
689 641
690 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 642 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
@@ -696,12 +648,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
696 648
697 for (; num_rsp; num_rsp--) { 649 for (; num_rsp; num_rsp--) {
698 bacpy(&data.bdaddr, &info->bdaddr); 650 bacpy(&data.bdaddr, &info->bdaddr);
699 data.pscan_rep_mode = info->pscan_rep_mode; 651 data.pscan_rep_mode = info->pscan_rep_mode;
700 data.pscan_period_mode = info->pscan_period_mode; 652 data.pscan_period_mode = info->pscan_period_mode;
701 data.pscan_mode = 0x00; 653 data.pscan_mode = info->pscan_mode;
702 memcpy(data.dev_class, info->dev_class, 3); 654 memcpy(data.dev_class, info->dev_class, 3);
703 data.clock_offset = info->clock_offset; 655 data.clock_offset = info->clock_offset;
704 data.rssi = info->rssi; 656 data.rssi = 0x00;
705 info++; 657 info++;
706 hci_inquiry_cache_update(hdev, &data); 658 hci_inquiry_cache_update(hdev, &data);
707 } 659 }
@@ -709,70 +661,18 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
709 hci_dev_unlock(hdev); 661 hci_dev_unlock(hdev);
710} 662}
711 663
712/* Connect Request */
713static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
714{
715 struct hci_ev_conn_request *ev = (struct hci_ev_conn_request *) skb->data;
716 int mask = hdev->link_mode;
717
718 BT_DBG("%s Connection request: %s type 0x%x", hdev->name,
719 batostr(&ev->bdaddr), ev->link_type);
720
721 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
722
723 if (mask & HCI_LM_ACCEPT) {
724 /* Connection accepted */
725 struct hci_conn *conn;
726 struct hci_cp_accept_conn_req cp;
727
728 hci_dev_lock(hdev);
729 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
730 if (!conn) {
731 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
732 BT_ERR("No memmory for new connection");
733 hci_dev_unlock(hdev);
734 return;
735 }
736 }
737 memcpy(conn->dev_class, ev->dev_class, 3);
738 conn->state = BT_CONNECT;
739 hci_dev_unlock(hdev);
740
741 bacpy(&cp.bdaddr, &ev->bdaddr);
742
743 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
744 cp.role = 0x00; /* Become master */
745 else
746 cp.role = 0x01; /* Remain slave */
747
748 hci_send_cmd(hdev, OGF_LINK_CTL,
749 OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp);
750 } else {
751 /* Connection rejected */
752 struct hci_cp_reject_conn_req cp;
753
754 bacpy(&cp.bdaddr, &ev->bdaddr);
755 cp.reason = 0x0f;
756 hci_send_cmd(hdev, OGF_LINK_CTL,
757 OCF_REJECT_CONN_REQ, sizeof(cp), &cp);
758 }
759}
760
761/* Connect Complete */
762static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 664static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
763{ 665{
764 struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; 666 struct hci_ev_conn_complete *ev = (void *) skb->data;
765 struct hci_conn *conn, *pend; 667 struct hci_conn *conn;
766 668
767 BT_DBG("%s", hdev->name); 669 BT_DBG("%s", hdev->name);
768 670
769 hci_dev_lock(hdev); 671 hci_dev_lock(hdev);
770 672
771 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 673 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
772 if (!conn) { 674 if (!conn)
773 hci_dev_unlock(hdev); 675 goto unlock;
774 return;
775 }
776 676
777 if (!ev->status) { 677 if (!ev->status) {
778 conn->handle = __le16_to_cpu(ev->handle); 678 conn->handle = __le16_to_cpu(ev->handle);
@@ -788,8 +688,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
788 if (conn->type == ACL_LINK) { 688 if (conn->type == ACL_LINK) {
789 struct hci_cp_read_remote_features cp; 689 struct hci_cp_read_remote_features cp;
790 cp.handle = ev->handle; 690 cp.handle = ev->handle;
791 hci_send_cmd(hdev, OGF_LINK_CTL, 691 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp);
792 OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp);
793 } 692 }
794 693
795 /* Set link policy */ 694 /* Set link policy */
@@ -797,8 +696,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
797 struct hci_cp_write_link_policy cp; 696 struct hci_cp_write_link_policy cp;
798 cp.handle = ev->handle; 697 cp.handle = ev->handle;
799 cp.policy = cpu_to_le16(hdev->link_policy); 698 cp.policy = cpu_to_le16(hdev->link_policy);
800 hci_send_cmd(hdev, OGF_LINK_POLICY, 699 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
801 OCF_WRITE_LINK_POLICY, sizeof(cp), &cp);
802 } 700 }
803 701
804 /* Set packet type for incoming connection */ 702 /* Set packet type for incoming connection */
@@ -809,8 +707,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
809 cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): 707 cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
810 cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 708 cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
811 709
812 hci_send_cmd(hdev, OGF_LINK_CTL, 710 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
813 OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
814 } else { 711 } else {
815 /* Update disconnect timer */ 712 /* Update disconnect timer */
816 hci_conn_hold(conn); 713 hci_conn_hold(conn);
@@ -822,9 +719,12 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
822 if (conn->type == ACL_LINK) { 719 if (conn->type == ACL_LINK) {
823 struct hci_conn *sco = conn->link; 720 struct hci_conn *sco = conn->link;
824 if (sco) { 721 if (sco) {
825 if (!ev->status) 722 if (!ev->status) {
826 hci_add_sco(sco, conn->handle); 723 if (lmp_esco_capable(hdev))
827 else { 724 hci_setup_sync(sco, conn->handle);
725 else
726 hci_add_sco(sco, conn->handle);
727 } else {
828 hci_proto_connect_cfm(sco, ev->status); 728 hci_proto_connect_cfm(sco, ev->status);
829 hci_conn_del(sco); 729 hci_conn_del(sco);
830 } 730 }
@@ -835,136 +735,104 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
835 if (ev->status) 735 if (ev->status)
836 hci_conn_del(conn); 736 hci_conn_del(conn);
837 737
838 pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 738unlock:
839 if (pend)
840 hci_acl_connect(pend);
841
842 hci_dev_unlock(hdev); 739 hci_dev_unlock(hdev);
843}
844
845/* Disconnect Complete */
846static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
847{
848 struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data;
849 struct hci_conn *conn;
850
851 BT_DBG("%s status %d", hdev->name, ev->status);
852
853 if (ev->status)
854 return;
855 740
856 hci_dev_lock(hdev); 741 hci_conn_check_pending(hdev);
857
858 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
859 if (conn) {
860 conn->state = BT_CLOSED;
861 hci_proto_disconn_ind(conn, ev->reason);
862 hci_conn_del(conn);
863 }
864
865 hci_dev_unlock(hdev);
866} 742}
867 743
868/* Number of completed packets */ 744static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
869static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
870{ 745{
871 struct hci_ev_num_comp_pkts *ev = (struct hci_ev_num_comp_pkts *) skb->data; 746 struct hci_ev_conn_request *ev = (void *) skb->data;
872 __le16 *ptr; 747 int mask = hdev->link_mode;
873 int i;
874
875 skb_pull(skb, sizeof(*ev));
876
877 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
878 748
879 if (skb->len < ev->num_hndl * 4) { 749 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
880 BT_DBG("%s bad parameters", hdev->name); 750 batostr(&ev->bdaddr), ev->link_type);
881 return;
882 }
883 751
884 tasklet_disable(&hdev->tx_task); 752 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
885 753
886 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { 754 if (mask & HCI_LM_ACCEPT) {
755 /* Connection accepted */
887 struct hci_conn *conn; 756 struct hci_conn *conn;
888 __u16 handle, count;
889
890 handle = __le16_to_cpu(get_unaligned(ptr++));
891 count = __le16_to_cpu(get_unaligned(ptr++));
892 757
893 conn = hci_conn_hash_lookup_handle(hdev, handle); 758 hci_dev_lock(hdev);
894 if (conn) {
895 conn->sent -= count;
896 759
897 if (conn->type == ACL_LINK) { 760 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
898 if ((hdev->acl_cnt += count) > hdev->acl_pkts) 761 if (!conn) {
899 hdev->acl_cnt = hdev->acl_pkts; 762 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
900 } else { 763 BT_ERR("No memmory for new connection");
901 if ((hdev->sco_cnt += count) > hdev->sco_pkts) 764 hci_dev_unlock(hdev);
902 hdev->sco_cnt = hdev->sco_pkts; 765 return;
903 } 766 }
904 } 767 }
905 }
906 hci_sched_tx(hdev);
907 768
908 tasklet_enable(&hdev->tx_task); 769 memcpy(conn->dev_class, ev->dev_class, 3);
909} 770 conn->state = BT_CONNECT;
910 771
911/* Role Change */ 772 hci_dev_unlock(hdev);
912static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
913{
914 struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data;
915 struct hci_conn *conn;
916 773
917 BT_DBG("%s status %d", hdev->name, ev->status); 774 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
775 struct hci_cp_accept_conn_req cp;
918 776
919 hci_dev_lock(hdev); 777 bacpy(&cp.bdaddr, &ev->bdaddr);
920 778
921 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 779 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
922 if (conn) { 780 cp.role = 0x00; /* Become master */
923 if (!ev->status) {
924 if (ev->role)
925 conn->link_mode &= ~HCI_LM_MASTER;
926 else 781 else
927 conn->link_mode |= HCI_LM_MASTER; 782 cp.role = 0x01; /* Remain slave */
928 }
929 783
930 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 784 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
785 sizeof(cp), &cp);
786 } else {
787 struct hci_cp_accept_sync_conn_req cp;
931 788
932 hci_role_switch_cfm(conn, ev->status, ev->role); 789 bacpy(&cp.bdaddr, &ev->bdaddr);
933 } 790 cp.pkt_type = cpu_to_le16(hdev->esco_type);
934 791
935 hci_dev_unlock(hdev); 792 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
793 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
794 cp.max_latency = cpu_to_le16(0xffff);
795 cp.content_format = cpu_to_le16(hdev->voice_setting);
796 cp.retrans_effort = 0xff;
797
798 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
799 sizeof(cp), &cp);
800 }
801 } else {
802 /* Connection rejected */
803 struct hci_cp_reject_conn_req cp;
804
805 bacpy(&cp.bdaddr, &ev->bdaddr);
806 cp.reason = 0x0f;
807 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
808 }
936} 809}
937 810
938/* Mode Change */ 811static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
939static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
940{ 812{
941 struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data; 813 struct hci_ev_disconn_complete *ev = (void *) skb->data;
942 struct hci_conn *conn; 814 struct hci_conn *conn;
943 815
944 BT_DBG("%s status %d", hdev->name, ev->status); 816 BT_DBG("%s status %d", hdev->name, ev->status);
945 817
818 if (ev->status)
819 return;
820
946 hci_dev_lock(hdev); 821 hci_dev_lock(hdev);
947 822
948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 823 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
949 if (conn) { 824 if (conn) {
950 conn->mode = ev->mode; 825 conn->state = BT_CLOSED;
951 conn->interval = __le16_to_cpu(ev->interval); 826 hci_proto_disconn_ind(conn, ev->reason);
952 827 hci_conn_del(conn);
953 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
954 if (conn->mode == HCI_CM_ACTIVE)
955 conn->power_save = 1;
956 else
957 conn->power_save = 0;
958 }
959 } 828 }
960 829
961 hci_dev_unlock(hdev); 830 hci_dev_unlock(hdev);
962} 831}
963 832
964/* Authentication Complete */
965static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 833static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
966{ 834{
967 struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; 835 struct hci_ev_auth_complete *ev = (void *) skb->data;
968 struct hci_conn *conn; 836 struct hci_conn *conn;
969 837
970 BT_DBG("%s status %d", hdev->name, ev->status); 838 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -985,8 +853,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
985 struct hci_cp_set_conn_encrypt cp; 853 struct hci_cp_set_conn_encrypt cp;
986 cp.handle = cpu_to_le16(conn->handle); 854 cp.handle = cpu_to_le16(conn->handle);
987 cp.encrypt = 1; 855 cp.encrypt = 1;
988 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 856 hci_send_cmd(conn->hdev,
989 OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); 857 HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp);
990 } else { 858 } else {
991 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 859 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
992 hci_encrypt_cfm(conn, ev->status, 0x00); 860 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -997,10 +865,16 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
997 hci_dev_unlock(hdev); 865 hci_dev_unlock(hdev);
998} 866}
999 867
1000/* Encryption Change */ 868static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
869{
870 BT_DBG("%s", hdev->name);
871
872 hci_conn_check_pending(hdev);
873}
874
1001static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 875static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1002{ 876{
1003 struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; 877 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1004 struct hci_conn *conn; 878 struct hci_conn *conn;
1005 879
1006 BT_DBG("%s status %d", hdev->name, ev->status); 880 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1024,10 +898,9 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1024 hci_dev_unlock(hdev); 898 hci_dev_unlock(hdev);
1025} 899}
1026 900
1027/* Change Connection Link Key Complete */ 901static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1028static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1029{ 902{
1030 struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; 903 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1031 struct hci_conn *conn; 904 struct hci_conn *conn;
1032 905
1033 BT_DBG("%s status %d", hdev->name, ev->status); 906 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1047,25 +920,263 @@ static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, s
1047 hci_dev_unlock(hdev); 920 hci_dev_unlock(hdev);
1048} 921}
1049 922
1050/* Pin Code Request*/ 923static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1051static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1052{ 924{
925 struct hci_ev_remote_features *ev = (void *) skb->data;
926 struct hci_conn *conn;
927
928 BT_DBG("%s status %d", hdev->name, ev->status);
929
930 if (ev->status)
931 return;
932
933 hci_dev_lock(hdev);
934
935 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
936 if (conn)
937 memcpy(conn->features, ev->features, 8);
938
939 hci_dev_unlock(hdev);
1053} 940}
1054 941
1055/* Link Key Request */ 942static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1056static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1057{ 943{
944 BT_DBG("%s", hdev->name);
1058} 945}
1059 946
1060/* Link Key Notification */ 947static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1061static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1062{ 948{
949 BT_DBG("%s", hdev->name);
1063} 950}
1064 951
1065/* Remote Features */ 952static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1066static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1067{ 953{
1068 struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data; 954 struct hci_ev_cmd_complete *ev = (void *) skb->data;
955 __u16 opcode;
956
957 skb_pull(skb, sizeof(*ev));
958
959 opcode = __le16_to_cpu(ev->opcode);
960
961 switch (opcode) {
962 case HCI_OP_INQUIRY_CANCEL:
963 hci_cc_inquiry_cancel(hdev, skb);
964 break;
965
966 case HCI_OP_EXIT_PERIODIC_INQ:
967 hci_cc_exit_periodic_inq(hdev, skb);
968 break;
969
970 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
971 hci_cc_remote_name_req_cancel(hdev, skb);
972 break;
973
974 case HCI_OP_ROLE_DISCOVERY:
975 hci_cc_role_discovery(hdev, skb);
976 break;
977
978 case HCI_OP_WRITE_LINK_POLICY:
979 hci_cc_write_link_policy(hdev, skb);
980 break;
981
982 case HCI_OP_RESET:
983 hci_cc_reset(hdev, skb);
984 break;
985
986 case HCI_OP_WRITE_LOCAL_NAME:
987 hci_cc_write_local_name(hdev, skb);
988 break;
989
990 case HCI_OP_READ_LOCAL_NAME:
991 hci_cc_read_local_name(hdev, skb);
992 break;
993
994 case HCI_OP_WRITE_AUTH_ENABLE:
995 hci_cc_write_auth_enable(hdev, skb);
996 break;
997
998 case HCI_OP_WRITE_ENCRYPT_MODE:
999 hci_cc_write_encrypt_mode(hdev, skb);
1000 break;
1001
1002 case HCI_OP_WRITE_SCAN_ENABLE:
1003 hci_cc_write_scan_enable(hdev, skb);
1004 break;
1005
1006 case HCI_OP_READ_CLASS_OF_DEV:
1007 hci_cc_read_class_of_dev(hdev, skb);
1008 break;
1009
1010 case HCI_OP_WRITE_CLASS_OF_DEV:
1011 hci_cc_write_class_of_dev(hdev, skb);
1012 break;
1013
1014 case HCI_OP_READ_VOICE_SETTING:
1015 hci_cc_read_voice_setting(hdev, skb);
1016 break;
1017
1018 case HCI_OP_WRITE_VOICE_SETTING:
1019 hci_cc_write_voice_setting(hdev, skb);
1020 break;
1021
1022 case HCI_OP_HOST_BUFFER_SIZE:
1023 hci_cc_host_buffer_size(hdev, skb);
1024 break;
1025
1026 case HCI_OP_READ_LOCAL_VERSION:
1027 hci_cc_read_local_version(hdev, skb);
1028 break;
1029
1030 case HCI_OP_READ_LOCAL_COMMANDS:
1031 hci_cc_read_local_commands(hdev, skb);
1032 break;
1033
1034 case HCI_OP_READ_LOCAL_FEATURES:
1035 hci_cc_read_local_features(hdev, skb);
1036 break;
1037
1038 case HCI_OP_READ_BUFFER_SIZE:
1039 hci_cc_read_buffer_size(hdev, skb);
1040 break;
1041
1042 case HCI_OP_READ_BD_ADDR:
1043 hci_cc_read_bd_addr(hdev, skb);
1044 break;
1045
1046 default:
1047 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1048 break;
1049 }
1050
1051 if (ev->ncmd) {
1052 atomic_set(&hdev->cmd_cnt, 1);
1053 if (!skb_queue_empty(&hdev->cmd_q))
1054 hci_sched_cmd(hdev);
1055 }
1056}
1057
1058static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1059{
1060 struct hci_ev_cmd_status *ev = (void *) skb->data;
1061 __u16 opcode;
1062
1063 skb_pull(skb, sizeof(*ev));
1064
1065 opcode = __le16_to_cpu(ev->opcode);
1066
1067 switch (opcode) {
1068 case HCI_OP_INQUIRY:
1069 hci_cs_inquiry(hdev, ev->status);
1070 break;
1071
1072 case HCI_OP_CREATE_CONN:
1073 hci_cs_create_conn(hdev, ev->status);
1074 break;
1075
1076 case HCI_OP_ADD_SCO:
1077 hci_cs_add_sco(hdev, ev->status);
1078 break;
1079
1080 case HCI_OP_REMOTE_NAME_REQ:
1081 hci_cs_remote_name_req(hdev, ev->status);
1082 break;
1083
1084 case HCI_OP_SETUP_SYNC_CONN:
1085 hci_cs_setup_sync_conn(hdev, ev->status);
1086 break;
1087
1088 case HCI_OP_SNIFF_MODE:
1089 hci_cs_sniff_mode(hdev, ev->status);
1090 break;
1091
1092 case HCI_OP_EXIT_SNIFF_MODE:
1093 hci_cs_exit_sniff_mode(hdev, ev->status);
1094 break;
1095
1096 default:
1097 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1098 break;
1099 }
1100
1101 if (ev->ncmd) {
1102 atomic_set(&hdev->cmd_cnt, 1);
1103 if (!skb_queue_empty(&hdev->cmd_q))
1104 hci_sched_cmd(hdev);
1105 }
1106}
1107
1108static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1109{
1110 struct hci_ev_role_change *ev = (void *) skb->data;
1111 struct hci_conn *conn;
1112
1113 BT_DBG("%s status %d", hdev->name, ev->status);
1114
1115 hci_dev_lock(hdev);
1116
1117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1118 if (conn) {
1119 if (!ev->status) {
1120 if (ev->role)
1121 conn->link_mode &= ~HCI_LM_MASTER;
1122 else
1123 conn->link_mode |= HCI_LM_MASTER;
1124 }
1125
1126 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1127
1128 hci_role_switch_cfm(conn, ev->status, ev->role);
1129 }
1130
1131 hci_dev_unlock(hdev);
1132}
1133
1134static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1135{
1136 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1137 __le16 *ptr;
1138 int i;
1139
1140 skb_pull(skb, sizeof(*ev));
1141
1142 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1143
1144 if (skb->len < ev->num_hndl * 4) {
1145 BT_DBG("%s bad parameters", hdev->name);
1146 return;
1147 }
1148
1149 tasklet_disable(&hdev->tx_task);
1150
1151 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1152 struct hci_conn *conn;
1153 __u16 handle, count;
1154
1155 handle = __le16_to_cpu(get_unaligned(ptr++));
1156 count = __le16_to_cpu(get_unaligned(ptr++));
1157
1158 conn = hci_conn_hash_lookup_handle(hdev, handle);
1159 if (conn) {
1160 conn->sent -= count;
1161
1162 if (conn->type == ACL_LINK) {
1163 if ((hdev->acl_cnt += count) > hdev->acl_pkts)
1164 hdev->acl_cnt = hdev->acl_pkts;
1165 } else {
1166 if ((hdev->sco_cnt += count) > hdev->sco_pkts)
1167 hdev->sco_cnt = hdev->sco_pkts;
1168 }
1169 }
1170 }
1171
1172 hci_sched_tx(hdev);
1173
1174 tasklet_enable(&hdev->tx_task);
1175}
1176
1177static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1178{
1179 struct hci_ev_mode_change *ev = (void *) skb->data;
1069 struct hci_conn *conn; 1180 struct hci_conn *conn;
1070 1181
1071 BT_DBG("%s status %d", hdev->name, ev->status); 1182 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1073,17 +1184,39 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1073 hci_dev_lock(hdev); 1184 hci_dev_lock(hdev);
1074 1185
1075 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1186 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1076 if (conn && !ev->status) { 1187 if (conn) {
1077 memcpy(conn->features, ev->features, sizeof(conn->features)); 1188 conn->mode = ev->mode;
1189 conn->interval = __le16_to_cpu(ev->interval);
1190
1191 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
1192 if (conn->mode == HCI_CM_ACTIVE)
1193 conn->power_save = 1;
1194 else
1195 conn->power_save = 0;
1196 }
1078 } 1197 }
1079 1198
1080 hci_dev_unlock(hdev); 1199 hci_dev_unlock(hdev);
1081} 1200}
1082 1201
1083/* Clock Offset */ 1202static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1203{
1204 BT_DBG("%s", hdev->name);
1205}
1206
1207static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1208{
1209 BT_DBG("%s", hdev->name);
1210}
1211
1212static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1213{
1214 BT_DBG("%s", hdev->name);
1215}
1216
1084static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 1217static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
1085{ 1218{
1086 struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; 1219 struct hci_ev_clock_offset *ev = (void *) skb->data;
1087 struct hci_conn *conn; 1220 struct hci_conn *conn;
1088 1221
1089 BT_DBG("%s status %d", hdev->name, ev->status); 1222 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1103,10 +1236,9 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
1103 hci_dev_unlock(hdev); 1236 hci_dev_unlock(hdev);
1104} 1237}
1105 1238
1106/* Page Scan Repetition Mode */
1107static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 1239static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
1108{ 1240{
1109 struct hci_ev_pscan_rep_mode *ev = (struct hci_ev_pscan_rep_mode *) skb->data; 1241 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
1110 struct inquiry_entry *ie; 1242 struct inquiry_entry *ie;
1111 1243
1112 BT_DBG("%s", hdev->name); 1244 BT_DBG("%s", hdev->name);
@@ -1121,10 +1253,91 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
1121 hci_dev_unlock(hdev); 1253 hci_dev_unlock(hdev);
1122} 1254}
1123 1255
1124/* Sniff Subrate */ 1256static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
1257{
1258 struct inquiry_data data;
1259 int num_rsp = *((__u8 *) skb->data);
1260
1261 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1262
1263 if (!num_rsp)
1264 return;
1265
1266 hci_dev_lock(hdev);
1267
1268 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1269 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
1270
1271 for (; num_rsp; num_rsp--) {
1272 bacpy(&data.bdaddr, &info->bdaddr);
1273 data.pscan_rep_mode = info->pscan_rep_mode;
1274 data.pscan_period_mode = info->pscan_period_mode;
1275 data.pscan_mode = info->pscan_mode;
1276 memcpy(data.dev_class, info->dev_class, 3);
1277 data.clock_offset = info->clock_offset;
1278 data.rssi = info->rssi;
1279 info++;
1280 hci_inquiry_cache_update(hdev, &data);
1281 }
1282 } else {
1283 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
1284
1285 for (; num_rsp; num_rsp--) {
1286 bacpy(&data.bdaddr, &info->bdaddr);
1287 data.pscan_rep_mode = info->pscan_rep_mode;
1288 data.pscan_period_mode = info->pscan_period_mode;
1289 data.pscan_mode = 0x00;
1290 memcpy(data.dev_class, info->dev_class, 3);
1291 data.clock_offset = info->clock_offset;
1292 data.rssi = info->rssi;
1293 info++;
1294 hci_inquiry_cache_update(hdev, &data);
1295 }
1296 }
1297
1298 hci_dev_unlock(hdev);
1299}
1300
1301static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1302{
1303 BT_DBG("%s", hdev->name);
1304}
1305
1306static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1307{
1308 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
1309 struct hci_conn *conn;
1310
1311 BT_DBG("%s status %d", hdev->name, ev->status);
1312
1313 hci_dev_lock(hdev);
1314
1315 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1316 if (!conn)
1317 goto unlock;
1318
1319 if (!ev->status) {
1320 conn->handle = __le16_to_cpu(ev->handle);
1321 conn->state = BT_CONNECTED;
1322 } else
1323 conn->state = BT_CLOSED;
1324
1325 hci_proto_connect_cfm(conn, ev->status);
1326 if (ev->status)
1327 hci_conn_del(conn);
1328
1329unlock:
1330 hci_dev_unlock(hdev);
1331}
1332
1333static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
1334{
1335 BT_DBG("%s", hdev->name);
1336}
1337
1125static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 1338static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1126{ 1339{
1127 struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data; 1340 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
1128 struct hci_conn *conn; 1341 struct hci_conn *conn;
1129 1342
1130 BT_DBG("%s status %d", hdev->name, ev->status); 1343 BT_DBG("%s status %d", hdev->name, ev->status);
@@ -1138,22 +1351,42 @@ static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *s
1138 hci_dev_unlock(hdev); 1351 hci_dev_unlock(hdev);
1139} 1352}
1140 1353
1141void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 1354static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1142{ 1355{
1143 struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; 1356 struct inquiry_data data;
1144 struct hci_ev_cmd_complete *ec; 1357 struct extended_inquiry_info *info = (void *) (skb->data + 1);
1145 struct hci_ev_cmd_status *cs; 1358 int num_rsp = *((__u8 *) skb->data);
1146 u16 opcode, ocf, ogf;
1147 1359
1148 skb_pull(skb, HCI_EVENT_HDR_SIZE); 1360 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1149 1361
1150 BT_DBG("%s evt 0x%x", hdev->name, hdr->evt); 1362 if (!num_rsp)
1363 return;
1151 1364
1152 switch (hdr->evt) { 1365 hci_dev_lock(hdev);
1153 case HCI_EV_NUM_COMP_PKTS: 1366
1154 hci_num_comp_pkts_evt(hdev, skb); 1367 for (; num_rsp; num_rsp--) {
1155 break; 1368 bacpy(&data.bdaddr, &info->bdaddr);
1369 data.pscan_rep_mode = info->pscan_rep_mode;
1370 data.pscan_period_mode = info->pscan_period_mode;
1371 data.pscan_mode = 0x00;
1372 memcpy(data.dev_class, info->dev_class, 3);
1373 data.clock_offset = info->clock_offset;
1374 data.rssi = info->rssi;
1375 info++;
1376 hci_inquiry_cache_update(hdev, &data);
1377 }
1156 1378
1379 hci_dev_unlock(hdev);
1380}
1381
1382void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1383{
1384 struct hci_event_hdr *hdr = (void *) skb->data;
1385 __u8 event = hdr->evt;
1386
1387 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1388
1389 switch (event) {
1157 case HCI_EV_INQUIRY_COMPLETE: 1390 case HCI_EV_INQUIRY_COMPLETE:
1158 hci_inquiry_complete_evt(hdev, skb); 1391 hci_inquiry_complete_evt(hdev, skb);
1159 break; 1392 break;
@@ -1162,44 +1395,64 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1162 hci_inquiry_result_evt(hdev, skb); 1395 hci_inquiry_result_evt(hdev, skb);
1163 break; 1396 break;
1164 1397
1165 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 1398 case HCI_EV_CONN_COMPLETE:
1166 hci_inquiry_result_with_rssi_evt(hdev, skb); 1399 hci_conn_complete_evt(hdev, skb);
1167 break;
1168
1169 case HCI_EV_EXTENDED_INQUIRY_RESULT:
1170 hci_extended_inquiry_result_evt(hdev, skb);
1171 break; 1400 break;
1172 1401
1173 case HCI_EV_CONN_REQUEST: 1402 case HCI_EV_CONN_REQUEST:
1174 hci_conn_request_evt(hdev, skb); 1403 hci_conn_request_evt(hdev, skb);
1175 break; 1404 break;
1176 1405
1177 case HCI_EV_CONN_COMPLETE:
1178 hci_conn_complete_evt(hdev, skb);
1179 break;
1180
1181 case HCI_EV_DISCONN_COMPLETE: 1406 case HCI_EV_DISCONN_COMPLETE:
1182 hci_disconn_complete_evt(hdev, skb); 1407 hci_disconn_complete_evt(hdev, skb);
1183 break; 1408 break;
1184 1409
1185 case HCI_EV_ROLE_CHANGE:
1186 hci_role_change_evt(hdev, skb);
1187 break;
1188
1189 case HCI_EV_MODE_CHANGE:
1190 hci_mode_change_evt(hdev, skb);
1191 break;
1192
1193 case HCI_EV_AUTH_COMPLETE: 1410 case HCI_EV_AUTH_COMPLETE:
1194 hci_auth_complete_evt(hdev, skb); 1411 hci_auth_complete_evt(hdev, skb);
1195 break; 1412 break;
1196 1413
1414 case HCI_EV_REMOTE_NAME:
1415 hci_remote_name_evt(hdev, skb);
1416 break;
1417
1197 case HCI_EV_ENCRYPT_CHANGE: 1418 case HCI_EV_ENCRYPT_CHANGE:
1198 hci_encrypt_change_evt(hdev, skb); 1419 hci_encrypt_change_evt(hdev, skb);
1199 break; 1420 break;
1200 1421
1201 case HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE: 1422 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
1202 hci_change_conn_link_key_complete_evt(hdev, skb); 1423 hci_change_link_key_complete_evt(hdev, skb);
1424 break;
1425
1426 case HCI_EV_REMOTE_FEATURES:
1427 hci_remote_features_evt(hdev, skb);
1428 break;
1429
1430 case HCI_EV_REMOTE_VERSION:
1431 hci_remote_version_evt(hdev, skb);
1432 break;
1433
1434 case HCI_EV_QOS_SETUP_COMPLETE:
1435 hci_qos_setup_complete_evt(hdev, skb);
1436 break;
1437
1438 case HCI_EV_CMD_COMPLETE:
1439 hci_cmd_complete_evt(hdev, skb);
1440 break;
1441
1442 case HCI_EV_CMD_STATUS:
1443 hci_cmd_status_evt(hdev, skb);
1444 break;
1445
1446 case HCI_EV_ROLE_CHANGE:
1447 hci_role_change_evt(hdev, skb);
1448 break;
1449
1450 case HCI_EV_NUM_COMP_PKTS:
1451 hci_num_comp_pkts_evt(hdev, skb);
1452 break;
1453
1454 case HCI_EV_MODE_CHANGE:
1455 hci_mode_change_evt(hdev, skb);
1203 break; 1456 break;
1204 1457
1205 case HCI_EV_PIN_CODE_REQ: 1458 case HCI_EV_PIN_CODE_REQ:
@@ -1214,10 +1467,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1214 hci_link_key_notify_evt(hdev, skb); 1467 hci_link_key_notify_evt(hdev, skb);
1215 break; 1468 break;
1216 1469
1217 case HCI_EV_REMOTE_FEATURES:
1218 hci_remote_features_evt(hdev, skb);
1219 break;
1220
1221 case HCI_EV_CLOCK_OFFSET: 1470 case HCI_EV_CLOCK_OFFSET:
1222 hci_clock_offset_evt(hdev, skb); 1471 hci_clock_offset_evt(hdev, skb);
1223 break; 1472 break;
@@ -1226,82 +1475,32 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1226 hci_pscan_rep_mode_evt(hdev, skb); 1475 hci_pscan_rep_mode_evt(hdev, skb);
1227 break; 1476 break;
1228 1477
1229 case HCI_EV_SNIFF_SUBRATE: 1478 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
1230 hci_sniff_subrate_evt(hdev, skb); 1479 hci_inquiry_result_with_rssi_evt(hdev, skb);
1231 break; 1480 break;
1232 1481
1233 case HCI_EV_CMD_STATUS: 1482 case HCI_EV_REMOTE_EXT_FEATURES:
1234 cs = (struct hci_ev_cmd_status *) skb->data; 1483 hci_remote_ext_features_evt(hdev, skb);
1235 skb_pull(skb, sizeof(cs));
1236
1237 opcode = __le16_to_cpu(cs->opcode);
1238 ogf = hci_opcode_ogf(opcode);
1239 ocf = hci_opcode_ocf(opcode);
1240
1241 switch (ogf) {
1242 case OGF_INFO_PARAM:
1243 hci_cs_info_param(hdev, ocf, cs->status);
1244 break;
1245
1246 case OGF_HOST_CTL:
1247 hci_cs_host_ctl(hdev, ocf, cs->status);
1248 break;
1249
1250 case OGF_LINK_CTL:
1251 hci_cs_link_ctl(hdev, ocf, cs->status);
1252 break;
1253
1254 case OGF_LINK_POLICY:
1255 hci_cs_link_policy(hdev, ocf, cs->status);
1256 break;
1257
1258 default:
1259 BT_DBG("%s Command Status OGF %x", hdev->name, ogf);
1260 break;
1261 }
1262
1263 if (cs->ncmd) {
1264 atomic_set(&hdev->cmd_cnt, 1);
1265 if (!skb_queue_empty(&hdev->cmd_q))
1266 hci_sched_cmd(hdev);
1267 }
1268 break; 1484 break;
1269 1485
1270 case HCI_EV_CMD_COMPLETE: 1486 case HCI_EV_SYNC_CONN_COMPLETE:
1271 ec = (struct hci_ev_cmd_complete *) skb->data; 1487 hci_sync_conn_complete_evt(hdev, skb);
1272 skb_pull(skb, sizeof(*ec)); 1488 break;
1273
1274 opcode = __le16_to_cpu(ec->opcode);
1275 ogf = hci_opcode_ogf(opcode);
1276 ocf = hci_opcode_ocf(opcode);
1277
1278 switch (ogf) {
1279 case OGF_INFO_PARAM:
1280 hci_cc_info_param(hdev, ocf, skb);
1281 break;
1282
1283 case OGF_HOST_CTL:
1284 hci_cc_host_ctl(hdev, ocf, skb);
1285 break;
1286 1489
1287 case OGF_LINK_CTL: 1490 case HCI_EV_SYNC_CONN_CHANGED:
1288 hci_cc_link_ctl(hdev, ocf, skb); 1491 hci_sync_conn_changed_evt(hdev, skb);
1289 break; 1492 break;
1290 1493
1291 case OGF_LINK_POLICY: 1494 case HCI_EV_SNIFF_SUBRATE:
1292 hci_cc_link_policy(hdev, ocf, skb); 1495 hci_sniff_subrate_evt(hdev, skb);
1293 break; 1496 break;
1294 1497
1295 default: 1498 case HCI_EV_EXTENDED_INQUIRY_RESULT:
1296 BT_DBG("%s Command Completed OGF %x", hdev->name, ogf); 1499 hci_extended_inquiry_result_evt(hdev, skb);
1297 break; 1500 break;
1298 }
1299 1501
1300 if (ec->ncmd) { 1502 default:
1301 atomic_set(&hdev->cmd_cnt, 1); 1503 BT_DBG("%s event 0x%x", hdev->name, event);
1302 if (!skb_queue_empty(&hdev->cmd_q))
1303 hci_sched_cmd(hdev);
1304 }
1305 break; 1504 break;
1306 } 1505 }
1307 1506
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 43dd6373bff9..8825102c517c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -451,7 +451,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
451 goto drop; 451 goto drop;
452 } 452 }
453 453
454 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) { 454 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
455 skb_queue_tail(&hdev->raw_q, skb); 455 skb_queue_tail(&hdev->raw_q, skb);
456 hci_sched_tx(hdev); 456 hci_sched_tx(hdev);
457 } else { 457 } else {
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 25835403d659..cef1e3e1881c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -41,6 +41,26 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char
41 return sprintf(buf, "%s\n", typetostr(hdev->type)); 41 return sprintf(buf, "%s\n", typetostr(hdev->type));
42} 42}
43 43
44static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
45{
46 struct hci_dev *hdev = dev_get_drvdata(dev);
47 char name[249];
48 int i;
49
50 for (i = 0; i < 248; i++)
51 name[i] = hdev->dev_name[i];
52
53 name[248] = '\0';
54 return sprintf(buf, "%s\n", name);
55}
56
57static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
58{
59 struct hci_dev *hdev = dev_get_drvdata(dev);
60 return sprintf(buf, "0x%.2x%.2x%.2x\n",
61 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
62}
63
44static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 64static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
45{ 65{
46 struct hci_dev *hdev = dev_get_drvdata(dev); 66 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -49,6 +69,17 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr, c
49 return sprintf(buf, "%s\n", batostr(&bdaddr)); 69 return sprintf(buf, "%s\n", batostr(&bdaddr));
50} 70}
51 71
72static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
73{
74 struct hci_dev *hdev = dev_get_drvdata(dev);
75
76 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
77 hdev->features[0], hdev->features[1],
78 hdev->features[2], hdev->features[3],
79 hdev->features[4], hdev->features[5],
80 hdev->features[6], hdev->features[7]);
81}
82
52static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 83static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
53{ 84{
54 struct hci_dev *hdev = dev_get_drvdata(dev); 85 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -170,7 +201,10 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
170} 201}
171 202
172static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 203static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
204static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
205static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
173static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 206static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
207static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
174static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); 208static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
175static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); 209static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
176static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 210static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
@@ -185,7 +219,10 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
185 219
186static struct device_attribute *bt_attrs[] = { 220static struct device_attribute *bt_attrs[] = {
187 &dev_attr_type, 221 &dev_attr_type,
222 &dev_attr_name,
223 &dev_attr_class,
188 &dev_attr_address, 224 &dev_attr_address,
225 &dev_attr_features,
189 &dev_attr_manufacturer, 226 &dev_attr_manufacturer,
190 &dev_attr_hci_version, 227 &dev_attr_hci_version,
191 &dev_attr_hci_revision, 228 &dev_attr_hci_revision,
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 66c736953cfe..4bbacddeb49d 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -247,7 +247,7 @@ static inline int hidp_queue_report(struct hidp_session *session, unsigned char
247{ 247{
248 struct sk_buff *skb; 248 struct sk_buff *skb;
249 249
250 BT_DBG("session %p hid %p data %p size %d", session, device, data, size); 250 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
251 251
252 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 252 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
253 BT_ERR("Can't allocate memory for new frame"); 253 BT_ERR("Can't allocate memory for new frame");
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 36ef27b625db..6fbbae78b304 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -55,7 +55,9 @@
55#define BT_DBG(D...) 55#define BT_DBG(D...)
56#endif 56#endif
57 57
58#define VERSION "2.8" 58#define VERSION "2.9"
59
60static u32 l2cap_feat_mask = 0x0000;
59 61
60static const struct proto_ops l2cap_sock_ops; 62static const struct proto_ops l2cap_sock_ops;
61 63
@@ -258,7 +260,119 @@ static void l2cap_chan_del(struct sock *sk, int err)
258 sk->sk_state_change(sk); 260 sk->sk_state_change(sk);
259} 261}
260 262
263static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
264{
265 u8 id;
266
267 /* Get next available identificator.
268 * 1 - 128 are used by kernel.
269 * 129 - 199 are reserved.
270 * 200 - 254 are used by utilities like l2ping, etc.
271 */
272
273 spin_lock_bh(&conn->lock);
274
275 if (++conn->tx_ident > 128)
276 conn->tx_ident = 1;
277
278 id = conn->tx_ident;
279
280 spin_unlock_bh(&conn->lock);
281
282 return id;
283}
284
285static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
286{
287 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
288
289 BT_DBG("code 0x%2.2x", code);
290
291 if (!skb)
292 return -ENOMEM;
293
294 return hci_send_acl(conn->hcon, skb, 0);
295}
296
261/* ---- L2CAP connections ---- */ 297/* ---- L2CAP connections ---- */
298static void l2cap_conn_start(struct l2cap_conn *conn)
299{
300 struct l2cap_chan_list *l = &conn->chan_list;
301 struct sock *sk;
302
303 BT_DBG("conn %p", conn);
304
305 read_lock(&l->lock);
306
307 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
308 bh_lock_sock(sk);
309
310 if (sk->sk_type != SOCK_SEQPACKET) {
311 l2cap_sock_clear_timer(sk);
312 sk->sk_state = BT_CONNECTED;
313 sk->sk_state_change(sk);
314 } else if (sk->sk_state == BT_CONNECT) {
315 struct l2cap_conn_req req;
316 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
319 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
320 L2CAP_CONN_REQ, sizeof(req), &req);
321 }
322
323 bh_unlock_sock(sk);
324 }
325
326 read_unlock(&l->lock);
327}
328
329static void l2cap_conn_ready(struct l2cap_conn *conn)
330{
331 BT_DBG("conn %p", conn);
332
333 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
334 struct l2cap_info_req req;
335
336 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
337
338 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
339 conn->info_ident = l2cap_get_ident(conn);
340
341 mod_timer(&conn->info_timer,
342 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
343
344 l2cap_send_cmd(conn, conn->info_ident,
345 L2CAP_INFO_REQ, sizeof(req), &req);
346 }
347}
348
349/* Notify sockets that we cannot guaranty reliability anymore */
350static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
351{
352 struct l2cap_chan_list *l = &conn->chan_list;
353 struct sock *sk;
354
355 BT_DBG("conn %p", conn);
356
357 read_lock(&l->lock);
358
359 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
360 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
361 sk->sk_err = err;
362 }
363
364 read_unlock(&l->lock);
365}
366
367static void l2cap_info_timeout(unsigned long arg)
368{
369 struct l2cap_conn *conn = (void *) arg;
370
371 conn->info_ident = 0;
372
373 l2cap_conn_start(conn);
374}
375
262static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 376static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
263{ 377{
264 struct l2cap_conn *conn = hcon->l2cap_data; 378 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -279,6 +393,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
279 conn->src = &hcon->hdev->bdaddr; 393 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst; 394 conn->dst = &hcon->dst;
281 395
396 conn->feat_mask = 0;
397
398 init_timer(&conn->info_timer);
399 conn->info_timer.function = l2cap_info_timeout;
400 conn->info_timer.data = (unsigned long) conn;
401
282 spin_lock_init(&conn->lock); 402 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock); 403 rwlock_init(&conn->chan_list.lock);
284 404
@@ -318,40 +438,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
318 write_unlock_bh(&l->lock); 438 write_unlock_bh(&l->lock);
319} 439}
320 440
321static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
322{
323 u8 id;
324
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
329 */
330
331 spin_lock_bh(&conn->lock);
332
333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1;
335
336 id = conn->tx_ident;
337
338 spin_unlock_bh(&conn->lock);
339
340 return id;
341}
342
343static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
344{
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
346
347 BT_DBG("code 0x%2.2x", code);
348
349 if (!skb)
350 return -ENOMEM;
351
352 return hci_send_acl(conn->hcon, skb, 0);
353}
354
355/* ---- Socket interface ---- */ 441/* ---- Socket interface ---- */
356static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) 442static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
357{ 443{
@@ -508,7 +594,6 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
508 594
509 /* Default config options */ 595 /* Default config options */
510 pi->conf_len = 0; 596 pi->conf_len = 0;
511 pi->conf_mtu = L2CAP_DEFAULT_MTU;
512 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; 597 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513} 598}
514 599
@@ -530,7 +615,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
530 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 615 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531 616
532 sk->sk_destruct = l2cap_sock_destruct; 617 sk->sk_destruct = l2cap_sock_destruct;
533 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; 618 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
534 619
535 sock_reset_flag(sk, SOCK_ZAPPED); 620 sock_reset_flag(sk, SOCK_ZAPPED);
536 621
@@ -650,6 +735,11 @@ static int l2cap_do_connect(struct sock *sk)
650 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 735 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651 736
652 if (hcon->state == BT_CONNECTED) { 737 if (hcon->state == BT_CONNECTED) {
738 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
739 l2cap_conn_ready(conn);
740 goto done;
741 }
742
653 if (sk->sk_type == SOCK_SEQPACKET) { 743 if (sk->sk_type == SOCK_SEQPACKET) {
654 struct l2cap_conn_req req; 744 struct l2cap_conn_req req;
655 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 745 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
@@ -958,7 +1048,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
958 opts.imtu = l2cap_pi(sk)->imtu; 1048 opts.imtu = l2cap_pi(sk)->imtu;
959 opts.omtu = l2cap_pi(sk)->omtu; 1049 opts.omtu = l2cap_pi(sk)->omtu;
960 opts.flush_to = l2cap_pi(sk)->flush_to; 1050 opts.flush_to = l2cap_pi(sk)->flush_to;
961 opts.mode = 0x00; 1051 opts.mode = L2CAP_MODE_BASIC;
962 1052
963 len = min_t(unsigned int, sizeof(opts), optlen); 1053 len = min_t(unsigned int, sizeof(opts), optlen);
964 if (copy_from_user((char *) &opts, optval, len)) { 1054 if (copy_from_user((char *) &opts, optval, len)) {
@@ -1007,7 +1097,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
1007 opts.imtu = l2cap_pi(sk)->imtu; 1097 opts.imtu = l2cap_pi(sk)->imtu;
1008 opts.omtu = l2cap_pi(sk)->omtu; 1098 opts.omtu = l2cap_pi(sk)->omtu;
1009 opts.flush_to = l2cap_pi(sk)->flush_to; 1099 opts.flush_to = l2cap_pi(sk)->flush_to;
1010 opts.mode = 0x00; 1100 opts.mode = L2CAP_MODE_BASIC;
1011 1101
1012 len = min_t(unsigned int, len, sizeof(opts)); 1102 len = min_t(unsigned int, len, sizeof(opts));
1013 if (copy_to_user(optval, (char *) &opts, len)) 1103 if (copy_to_user(optval, (char *) &opts, len))
@@ -1084,52 +1174,6 @@ static int l2cap_sock_release(struct socket *sock)
1084 return err; 1174 return err;
1085} 1175}
1086 1176
1087static void l2cap_conn_ready(struct l2cap_conn *conn)
1088{
1089 struct l2cap_chan_list *l = &conn->chan_list;
1090 struct sock *sk;
1091
1092 BT_DBG("conn %p", conn);
1093
1094 read_lock(&l->lock);
1095
1096 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1097 bh_lock_sock(sk);
1098
1099 if (sk->sk_type != SOCK_SEQPACKET) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1102 sk->sk_state_change(sk);
1103 } else if (sk->sk_state == BT_CONNECT) {
1104 struct l2cap_conn_req req;
1105 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1106 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1107 req.psm = l2cap_pi(sk)->psm;
1108 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1109 }
1110
1111 bh_unlock_sock(sk);
1112 }
1113
1114 read_unlock(&l->lock);
1115}
1116
1117/* Notify sockets that we cannot guaranty reliability anymore */
1118static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1119{
1120 struct l2cap_chan_list *l = &conn->chan_list;
1121 struct sock *sk;
1122
1123 BT_DBG("conn %p", conn);
1124
1125 read_lock(&l->lock);
1126 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1127 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1128 sk->sk_err = err;
1129 }
1130 read_unlock(&l->lock);
1131}
1132
1133static void l2cap_chan_ready(struct sock *sk) 1177static void l2cap_chan_ready(struct sock *sk)
1134{ 1178{
1135 struct sock *parent = bt_sk(sk)->parent; 1179 struct sock *parent = bt_sk(sk)->parent;
@@ -1256,11 +1300,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
1256 break; 1300 break;
1257 1301
1258 case 2: 1302 case 2:
1259 *val = __le16_to_cpu(*((__le16 *)opt->val)); 1303 *val = __le16_to_cpu(*((__le16 *) opt->val));
1260 break; 1304 break;
1261 1305
1262 case 4: 1306 case 4:
1263 *val = __le32_to_cpu(*((__le32 *)opt->val)); 1307 *val = __le32_to_cpu(*((__le32 *) opt->val));
1264 break; 1308 break;
1265 1309
1266 default: 1310 default:
@@ -1332,6 +1376,8 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1332 int len = pi->conf_len; 1376 int len = pi->conf_len;
1333 int type, hint, olen; 1377 int type, hint, olen;
1334 unsigned long val; 1378 unsigned long val;
1379 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1380 u16 mtu = L2CAP_DEFAULT_MTU;
1335 u16 result = L2CAP_CONF_SUCCESS; 1381 u16 result = L2CAP_CONF_SUCCESS;
1336 1382
1337 BT_DBG("sk %p", sk); 1383 BT_DBG("sk %p", sk);
@@ -1344,7 +1390,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1344 1390
1345 switch (type) { 1391 switch (type) {
1346 case L2CAP_CONF_MTU: 1392 case L2CAP_CONF_MTU:
1347 pi->conf_mtu = val; 1393 mtu = val;
1348 break; 1394 break;
1349 1395
1350 case L2CAP_CONF_FLUSH_TO: 1396 case L2CAP_CONF_FLUSH_TO:
@@ -1354,6 +1400,11 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1354 case L2CAP_CONF_QOS: 1400 case L2CAP_CONF_QOS:
1355 break; 1401 break;
1356 1402
1403 case L2CAP_CONF_RFC:
1404 if (olen == sizeof(rfc))
1405 memcpy(&rfc, (void *) val, olen);
1406 break;
1407
1357 default: 1408 default:
1358 if (hint) 1409 if (hint)
1359 break; 1410 break;
@@ -1368,12 +1419,24 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1368 /* Configure output options and let the other side know 1419 /* Configure output options and let the other side know
1369 * which ones we don't like. */ 1420 * which ones we don't like. */
1370 1421
1371 if (pi->conf_mtu < pi->omtu) 1422 if (rfc.mode == L2CAP_MODE_BASIC) {
1423 if (mtu < pi->omtu)
1424 result = L2CAP_CONF_UNACCEPT;
1425 else {
1426 pi->omtu = mtu;
1427 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1428 }
1429
1430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1431 } else {
1372 result = L2CAP_CONF_UNACCEPT; 1432 result = L2CAP_CONF_UNACCEPT;
1373 else
1374 pi->omtu = pi->conf_mtu;
1375 1433
1376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 1434 memset(&rfc, 0, sizeof(rfc));
1435 rfc.mode = L2CAP_MODE_BASIC;
1436
1437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1438 sizeof(rfc), (unsigned long) &rfc);
1439 }
1377 } 1440 }
1378 1441
1379 rsp->scid = cpu_to_le16(pi->dcid); 1442 rsp->scid = cpu_to_le16(pi->dcid);
@@ -1397,6 +1460,23 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla
1397 return ptr - data; 1460 return ptr - data;
1398} 1461}
1399 1462
1463static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1464{
1465 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1466
1467 if (rej->reason != 0x0000)
1468 return 0;
1469
1470 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1471 cmd->ident == conn->info_ident) {
1472 conn->info_ident = 0;
1473 del_timer(&conn->info_timer);
1474 l2cap_conn_start(conn);
1475 }
1476
1477 return 0;
1478}
1479
1400static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 1480static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1401{ 1481{
1402 struct l2cap_chan_list *list = &conn->chan_list; 1482 struct l2cap_chan_list *list = &conn->chan_list;
@@ -1577,16 +1657,19 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
1577 1657
1578 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 1658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1579 1659
1580 /* Output config done. */
1581 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1582
1583 /* Reset config buffer. */ 1660 /* Reset config buffer. */
1584 l2cap_pi(sk)->conf_len = 0; 1661 l2cap_pi(sk)->conf_len = 0;
1585 1662
1663 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1664 goto unlock;
1665
1586 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 1666 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1587 sk->sk_state = BT_CONNECTED; 1667 sk->sk_state = BT_CONNECTED;
1588 l2cap_chan_ready(sk); 1668 l2cap_chan_ready(sk);
1589 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { 1669 goto unlock;
1670 }
1671
1672 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1590 u8 req[64]; 1673 u8 req[64];
1591 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1592 l2cap_build_conf_req(sk, req), req); 1675 l2cap_build_conf_req(sk, req), req);
@@ -1646,7 +1729,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
1646 if (flags & 0x01) 1729 if (flags & 0x01)
1647 goto done; 1730 goto done;
1648 1731
1649 /* Input config done */
1650 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 1732 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1651 1733
1652 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 1734 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
@@ -1711,16 +1793,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
1711static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 1793static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1712{ 1794{
1713 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 1795 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1714 struct l2cap_info_rsp rsp;
1715 u16 type; 1796 u16 type;
1716 1797
1717 type = __le16_to_cpu(req->type); 1798 type = __le16_to_cpu(req->type);
1718 1799
1719 BT_DBG("type 0x%4.4x", type); 1800 BT_DBG("type 0x%4.4x", type);
1720 1801
1721 rsp.type = cpu_to_le16(type); 1802 if (type == L2CAP_IT_FEAT_MASK) {
1722 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 1803 u8 buf[8];
1723 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); 1804 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1805 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1806 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1807 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1808 l2cap_send_cmd(conn, cmd->ident,
1809 L2CAP_INFO_RSP, sizeof(buf), buf);
1810 } else {
1811 struct l2cap_info_rsp rsp;
1812 rsp.type = cpu_to_le16(type);
1813 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1814 l2cap_send_cmd(conn, cmd->ident,
1815 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1816 }
1724 1817
1725 return 0; 1818 return 0;
1726} 1819}
@@ -1735,6 +1828,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
1735 1828
1736 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 1829 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1737 1830
1831 conn->info_ident = 0;
1832
1833 del_timer(&conn->info_timer);
1834
1835 if (type == L2CAP_IT_FEAT_MASK)
1836 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1837
1838 l2cap_conn_start(conn);
1839
1738 return 0; 1840 return 0;
1739} 1841}
1740 1842
@@ -1764,7 +1866,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
1764 1866
1765 switch (cmd.code) { 1867 switch (cmd.code) {
1766 case L2CAP_COMMAND_REJ: 1868 case L2CAP_COMMAND_REJ:
1767 /* FIXME: We should process this */ 1869 l2cap_command_rej(conn, &cmd, data);
1768 break; 1870 break;
1769 1871
1770 case L2CAP_CONN_REQ: 1872 case L2CAP_CONN_REQ:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index bb7220770f2c..e7ac6ba7ecab 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,11 +33,11 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/signal.h> 34#include <linux/signal.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/freezer.h>
37#include <linux/wait.h> 36#include <linux/wait.h>
38#include <linux/device.h> 37#include <linux/device.h>
39#include <linux/net.h> 38#include <linux/net.h>
40#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/kthread.h>
41 41
42#include <net/sock.h> 42#include <net/sock.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -68,7 +68,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
68static unsigned long rfcomm_event; 68static unsigned long rfcomm_event;
69 69
70static LIST_HEAD(session_list); 70static LIST_HEAD(session_list);
71static atomic_t terminate, running;
72 71
73static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); 72static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len);
74static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); 73static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci);
@@ -1850,26 +1849,6 @@ static inline void rfcomm_process_sessions(void)
1850 rfcomm_unlock(); 1849 rfcomm_unlock();
1851} 1850}
1852 1851
1853static void rfcomm_worker(void)
1854{
1855 BT_DBG("");
1856
1857 while (!atomic_read(&terminate)) {
1858 set_current_state(TASK_INTERRUPTIBLE);
1859 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
1860 /* No pending events. Let's sleep.
1861 * Incoming connections and data will wake us up. */
1862 schedule();
1863 }
1864 set_current_state(TASK_RUNNING);
1865
1866 /* Process stuff */
1867 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
1868 rfcomm_process_sessions();
1869 }
1870 return;
1871}
1872
1873static int rfcomm_add_listener(bdaddr_t *ba) 1852static int rfcomm_add_listener(bdaddr_t *ba)
1874{ 1853{
1875 struct sockaddr_l2 addr; 1854 struct sockaddr_l2 addr;
@@ -1935,22 +1914,28 @@ static void rfcomm_kill_listener(void)
1935 1914
1936static int rfcomm_run(void *unused) 1915static int rfcomm_run(void *unused)
1937{ 1916{
1938 rfcomm_thread = current; 1917 BT_DBG("");
1939
1940 atomic_inc(&running);
1941 1918
1942 daemonize("krfcommd");
1943 set_user_nice(current, -10); 1919 set_user_nice(current, -10);
1944 1920
1945 BT_DBG("");
1946
1947 rfcomm_add_listener(BDADDR_ANY); 1921 rfcomm_add_listener(BDADDR_ANY);
1948 1922
1949 rfcomm_worker(); 1923 while (!kthread_should_stop()) {
1924 set_current_state(TASK_INTERRUPTIBLE);
1925 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
1926 /* No pending events. Let's sleep.
1927 * Incoming connections and data will wake us up. */
1928 schedule();
1929 }
1930 set_current_state(TASK_RUNNING);
1931
1932 /* Process stuff */
1933 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
1934 rfcomm_process_sessions();
1935 }
1950 1936
1951 rfcomm_kill_listener(); 1937 rfcomm_kill_listener();
1952 1938
1953 atomic_dec(&running);
1954 return 0; 1939 return 0;
1955} 1940}
1956 1941
@@ -2059,7 +2044,11 @@ static int __init rfcomm_init(void)
2059 2044
2060 hci_register_cb(&rfcomm_cb); 2045 hci_register_cb(&rfcomm_cb);
2061 2046
2062 kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); 2047 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
2048 if (IS_ERR(rfcomm_thread)) {
2049 hci_unregister_cb(&rfcomm_cb);
2050 return PTR_ERR(rfcomm_thread);
2051 }
2063 2052
2064 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2053 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
2065 BT_ERR("Failed to create RFCOMM info file"); 2054 BT_ERR("Failed to create RFCOMM info file");
@@ -2081,14 +2070,7 @@ static void __exit rfcomm_exit(void)
2081 2070
2082 hci_unregister_cb(&rfcomm_cb); 2071 hci_unregister_cb(&rfcomm_cb);
2083 2072
2084 /* Terminate working thread. 2073 kthread_stop(rfcomm_thread);
2085 * ie. Set terminate flag and wake it up */
2086 atomic_inc(&terminate);
2087 rfcomm_schedule(RFCOMM_SCHED_STATE);
2088
2089 /* Wait until thread is running */
2090 while (atomic_read(&running))
2091 schedule();
2092 2074
2093#ifdef CONFIG_BT_RFCOMM_TTY 2075#ifdef CONFIG_BT_RFCOMM_TTY
2094 rfcomm_cleanup_ttys(); 2076 rfcomm_cleanup_ttys();
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 22a832098d44..e447651a2dbe 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -189,6 +189,23 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
189 return conn ? &conn->dev : NULL; 189 return conn ? &conn->dev : NULL;
190} 190}
191 191
192static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
193{
194 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
195 bdaddr_t bdaddr;
196 baswap(&bdaddr, &dev->dst);
197 return sprintf(buf, "%s\n", batostr(&bdaddr));
198}
199
200static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
201{
202 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
203 return sprintf(buf, "%d\n", dev->channel);
204}
205
206static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
207static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
208
192static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) 209static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
193{ 210{
194 struct rfcomm_dev *dev; 211 struct rfcomm_dev *dev;
@@ -281,6 +298,14 @@ out:
281 return err; 298 return err;
282 } 299 }
283 300
301 dev_set_drvdata(dev->tty_dev, dev);
302
303 if (device_create_file(dev->tty_dev, &dev_attr_address) < 0)
304 BT_ERR("Failed to create address attribute");
305
306 if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0)
307 BT_ERR("Failed to create channel attribute");
308
284 return dev->id; 309 return dev->id;
285} 310}
286 311
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 65b6fb1c4154..82d0dfdfa7e2 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk)
189 struct sco_conn *conn; 189 struct sco_conn *conn;
190 struct hci_conn *hcon; 190 struct hci_conn *hcon;
191 struct hci_dev *hdev; 191 struct hci_dev *hdev;
192 int err = 0; 192 int err, type;
193 193
194 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 194 BT_DBG("%s -> %s", batostr(src), batostr(dst));
195 195
@@ -200,7 +200,9 @@ static int sco_connect(struct sock *sk)
200 200
201 err = -ENOMEM; 201 err = -ENOMEM;
202 202
203 hcon = hci_connect(hdev, SCO_LINK, dst); 203 type = lmp_esco_capable(hdev) ? ESCO_LINK : SCO_LINK;
204
205 hcon = hci_connect(hdev, type, dst);
204 if (!hcon) 206 if (!hcon)
205 goto done; 207 goto done;
206 208
@@ -224,6 +226,7 @@ static int sco_connect(struct sock *sk)
224 sk->sk_state = BT_CONNECT; 226 sk->sk_state = BT_CONNECT;
225 sco_sock_set_timer(sk, sk->sk_sndtimeo); 227 sco_sock_set_timer(sk, sk->sk_sndtimeo);
226 } 228 }
229
227done: 230done:
228 hci_dev_unlock_bh(hdev); 231 hci_dev_unlock_bh(hdev);
229 hci_dev_put(hdev); 232 hci_dev_put(hdev);
@@ -846,7 +849,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
846{ 849{
847 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 850 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
848 851
849 if (hcon->type != SCO_LINK) 852 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
850 return 0; 853 return 0;
851 854
852 if (!status) { 855 if (!status) {
@@ -865,10 +868,11 @@ static int sco_disconn_ind(struct hci_conn *hcon, __u8 reason)
865{ 868{
866 BT_DBG("hcon %p reason %d", hcon, reason); 869 BT_DBG("hcon %p reason %d", hcon, reason);
867 870
868 if (hcon->type != SCO_LINK) 871 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
869 return 0; 872 return 0;
870 873
871 sco_conn_del(hcon, bt_err(reason)); 874 sco_conn_del(hcon, bt_err(reason));
875
872 return 0; 876 return 0;
873} 877}
874 878
diff --git a/net/core/dev.c b/net/core/dev.c
index 38b03da5c1ca..872658927e47 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1553,7 +1553,7 @@ gso:
1553 return rc; 1553 return rc;
1554 } 1554 }
1555 if (unlikely((netif_queue_stopped(dev) || 1555 if (unlikely((netif_queue_stopped(dev) ||
1556 netif_subqueue_stopped(dev, skb->queue_mapping)) && 1556 netif_subqueue_stopped(dev, skb)) &&
1557 skb->next)) 1557 skb->next))
1558 return NETDEV_TX_BUSY; 1558 return NETDEV_TX_BUSY;
1559 } while (skb->next); 1559 } while (skb->next);
@@ -1661,7 +1661,7 @@ gso:
1661 q = dev->qdisc; 1661 q = dev->qdisc;
1662 if (q->enqueue) { 1662 if (q->enqueue) {
1663 /* reset queue_mapping to zero */ 1663 /* reset queue_mapping to zero */
1664 skb->queue_mapping = 0; 1664 skb_set_queue_mapping(skb, 0);
1665 rc = q->enqueue(skb, q); 1665 rc = q->enqueue(skb, q);
1666 qdisc_run(dev); 1666 qdisc_run(dev);
1667 spin_unlock(&dev->queue_lock); 1667 spin_unlock(&dev->queue_lock);
@@ -1692,7 +1692,7 @@ gso:
1692 HARD_TX_LOCK(dev, cpu); 1692 HARD_TX_LOCK(dev, cpu);
1693 1693
1694 if (!netif_queue_stopped(dev) && 1694 if (!netif_queue_stopped(dev) &&
1695 !netif_subqueue_stopped(dev, skb->queue_mapping)) { 1695 !netif_subqueue_stopped(dev, skb)) {
1696 rc = 0; 1696 rc = 0;
1697 if (!dev_hard_start_xmit(skb, dev)) { 1697 if (!dev_hard_start_xmit(skb, dev)) {
1698 HARD_TX_UNLOCK(dev); 1698 HARD_TX_UNLOCK(dev);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 67ba9914e52e..05979e356963 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1438,6 +1438,9 @@ int neigh_table_clear(struct neigh_table *tbl)
1438 free_percpu(tbl->stats); 1438 free_percpu(tbl->stats);
1439 tbl->stats = NULL; 1439 tbl->stats = NULL;
1440 1440
1441 kmem_cache_destroy(tbl->kmem_cachep);
1442 tbl->kmem_cachep = NULL;
1443
1441 return 0; 1444 return 0;
1442} 1445}
1443 1446
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 95daba624967..bf8d18f1b013 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work)
67 local_irq_save(flags); 67 local_irq_save(flags);
68 netif_tx_lock(dev); 68 netif_tx_lock(dev);
69 if ((netif_queue_stopped(dev) || 69 if ((netif_queue_stopped(dev) ||
70 netif_subqueue_stopped(dev, skb->queue_mapping)) || 70 netif_subqueue_stopped(dev, skb)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72 skb_queue_head(&npinfo->txq, skb); 72 skb_queue_head(&npinfo->txq, skb);
73 netif_tx_unlock(dev); 73 netif_tx_unlock(dev);
@@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
269 tries > 0; --tries) { 269 tries > 0; --tries) {
270 if (netif_tx_trylock(dev)) { 270 if (netif_tx_trylock(dev)) {
271 if (!netif_queue_stopped(dev) && 271 if (!netif_queue_stopped(dev) &&
272 !netif_subqueue_stopped(dev, skb->queue_mapping)) 272 !netif_subqueue_stopped(dev, skb))
273 status = dev->hard_start_xmit(skb, dev); 273 status = dev->hard_start_xmit(skb, dev);
274 netif_tx_unlock(dev); 274 netif_tx_unlock(dev);
275 275
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c4719edb55c0..de33f36947e9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2603,8 +2603,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2603 skb->network_header = skb->tail; 2603 skb->network_header = skb->tail;
2604 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2604 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2605 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2605 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2606 skb->queue_mapping = pkt_dev->cur_queue_map; 2606 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map);
2607
2608 iph = ip_hdr(skb); 2607 iph = ip_hdr(skb);
2609 udph = udp_hdr(skb); 2608 udph = udp_hdr(skb);
2610 2609
@@ -2941,8 +2940,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2941 skb->network_header = skb->tail; 2940 skb->network_header = skb->tail;
2942 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2941 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2943 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2942 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
2944 skb->queue_mapping = pkt_dev->cur_queue_map; 2943 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map);
2945
2946 iph = ipv6_hdr(skb); 2944 iph = ipv6_hdr(skb);
2947 udph = udp_hdr(skb); 2945 udph = udp_hdr(skb);
2948 2946
@@ -3385,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3385 3383
3386 if ((netif_queue_stopped(odev) || 3384 if ((netif_queue_stopped(odev) ||
3387 (pkt_dev->skb && 3385 (pkt_dev->skb &&
3388 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || 3386 netif_subqueue_stopped(odev, pkt_dev->skb))) ||
3389 need_resched()) { 3387 need_resched()) {
3390 idle_start = getCurUs(); 3388 idle_start = getCurUs();
3391 3389
@@ -3402,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3402 pkt_dev->idle_acc += getCurUs() - idle_start; 3400 pkt_dev->idle_acc += getCurUs() - idle_start;
3403 3401
3404 if (netif_queue_stopped(odev) || 3402 if (netif_queue_stopped(odev) ||
3405 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { 3403 netif_subqueue_stopped(odev, pkt_dev->skb)) {
3406 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3404 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3407 pkt_dev->next_tx_ns = 0; 3405 pkt_dev->next_tx_ns = 0;
3408 goto out; /* Try the next interface */ 3406 goto out; /* Try the next interface */
@@ -3431,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3431 3429
3432 netif_tx_lock_bh(odev); 3430 netif_tx_lock_bh(odev);
3433 if (!netif_queue_stopped(odev) && 3431 if (!netif_queue_stopped(odev) &&
3434 !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { 3432 !netif_subqueue_stopped(odev, pkt_dev->skb)) {
3435 3433
3436 atomic_inc(&(pkt_dev->skb->users)); 3434 atomic_inc(&(pkt_dev->skb->users));
3437 retry_now: 3435 retry_now:
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 0f3745585a94..d8a3509b26f6 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -68,3 +68,4 @@ module_exit(dccp_diag_fini);
68MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
69MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 69MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
70MODULE_DESCRIPTION("DCCP inet_diag handler"); 70MODULE_DESCRIPTION("DCCP inet_diag handler");
71MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 44f6e17e105f..222549ab274a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -1037,8 +1037,8 @@ module_exit(dccp_v4_exit);
1037 * values directly, Also cover the case where the protocol is not specified, 1037 * values directly, Also cover the case where the protocol is not specified,
1038 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP 1038 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
1039 */ 1039 */
1040MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6"); 1040MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
1041MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6"); 1041MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
1042MODULE_LICENSE("GPL"); 1042MODULE_LICENSE("GPL");
1043MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1043MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1044MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); 1044MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index cac53548c2d8..bbadd6681b83 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1219,8 +1219,8 @@ module_exit(dccp_v6_exit);
1219 * values directly, Also cover the case where the protocol is not specified, 1219 * values directly, Also cover the case where the protocol is not specified,
1220 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP 1220 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1221 */ 1221 */
1222MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6"); 1222MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1223MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6"); 1223MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1224MODULE_LICENSE("GPL"); 1224MODULE_LICENSE("GPL");
1225MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1225MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1226MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); 1226MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7eb83ebed2ec..dc429b6b0ba6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -815,6 +815,12 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
815 nlmsg_len(nlh) < hdrlen) 815 nlmsg_len(nlh) < hdrlen)
816 return -EINVAL; 816 return -EINVAL;
817 817
818#ifdef CONFIG_KMOD
819 if (inet_diag_table[nlh->nlmsg_type] == NULL)
820 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
821 NETLINK_INET_DIAG, nlh->nlmsg_type);
822#endif
823
818 if (inet_diag_table[nlh->nlmsg_type] == NULL) 824 if (inet_diag_table[nlh->nlmsg_type] == NULL)
819 return -ENOENT; 825 return -ENOENT;
820 826
@@ -914,3 +920,4 @@ static void __exit inet_diag_exit(void)
914module_init(inet_diag_init); 920module_init(inet_diag_init);
915module_exit(inet_diag_exit); 921module_exit(inet_diag_exit);
916MODULE_LICENSE("GPL"); 922MODULE_LICENSE("GPL");
923MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 3904d2158a92..2fbcc7d1b1a0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -56,3 +56,4 @@ static void __exit tcp_diag_exit(void)
56module_init(tcp_diag_init); 56module_init(tcp_diag_init);
57module_exit(tcp_diag_exit); 57module_exit(tcp_diag_exit);
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 67cd06613a25..66a9139d46e9 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -483,6 +483,7 @@ static int ah6_init_state(struct xfrm_state *x)
483 break; 483 break;
484 case XFRM_MODE_TUNNEL: 484 case XFRM_MODE_TUNNEL:
485 x->props.header_len += sizeof(struct ipv6hdr); 485 x->props.header_len += sizeof(struct ipv6hdr);
486 break;
486 default: 487 default:
487 goto error; 488 goto error;
488 } 489 }
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index b0715432e454..72a659806cad 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -360,6 +360,7 @@ static int esp6_init_state(struct xfrm_state *x)
360 break; 360 break;
361 case XFRM_MODE_TUNNEL: 361 case XFRM_MODE_TUNNEL:
362 x->props.header_len += sizeof(struct ipv6hdr); 362 x->props.header_len += sizeof(struct ipv6hdr);
363 break;
363 default: 364 default:
364 goto error; 365 goto error;
365 } 366 }
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index be57cf317a7f..421281d9dd1d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -266,7 +266,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
266 int busy; 266 int busy;
267 int nores; 267 int nores;
268 int len = skb->len; 268 int len = skb->len;
269 int subq = skb->queue_mapping; 269 int subq = skb_get_queue_mapping(skb);
270 struct sk_buff *skb_res = NULL; 270 struct sk_buff *skb_res = NULL;
271 271
272 start = master->slaves; 272 start = master->slaves;
@@ -284,7 +284,7 @@ restart:
284 if (slave->qdisc_sleeping != q) 284 if (slave->qdisc_sleeping != q)
285 continue; 285 continue;
286 if (netif_queue_stopped(slave) || 286 if (netif_queue_stopped(slave) ||
287 netif_subqueue_stopped(slave, subq) || 287 __netif_subqueue_stopped(slave, subq) ||
288 !netif_running(slave)) { 288 !netif_running(slave)) {
289 busy = 1; 289 busy = 1;
290 continue; 290 continue;
@@ -294,7 +294,7 @@ restart:
294 case 0: 294 case 0:
295 if (netif_tx_trylock(slave)) { 295 if (netif_tx_trylock(slave)) {
296 if (!netif_queue_stopped(slave) && 296 if (!netif_queue_stopped(slave) &&
297 !netif_subqueue_stopped(slave, subq) && 297 !__netif_subqueue_stopped(slave, subq) &&
298 slave->hard_start_xmit(skb, slave) == 0) { 298 slave->hard_start_xmit(skb, slave) == 0) {
299 netif_tx_unlock(slave); 299 netif_tx_unlock(slave);
300 master->slaves = NEXT_SLAVE(q); 300 master->slaves = NEXT_SLAVE(q);