diff options
author | Johan Hedberg <johan.hedberg@intel.com> | 2012-07-16 09:12:04 -0400 |
---|---|---|
committer | Gustavo Padovan <gustavo.padovan@collabora.co.uk> | 2012-07-17 13:48:13 -0400 |
commit | 3f27e95b83d08a58aadef42f332b1d1d50101cb6 (patch) | |
tree | 26d8e20839164bb75497a0caf761725f4f3583c3 /drivers/bluetooth | |
parent | 7d664fbafaf992e501159c013b4264a03ee1efac (diff) |
Bluetooth: Add initial reliable packet support for Three-wire UART
This patch adds initial support for reliable packets along with the
necessary retransmission timer for the Three-wire UART HCI driver.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Diffstat (limited to 'drivers/bluetooth')
-rw-r--r-- | drivers/bluetooth/hci_h5.c | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 6b7ec643f3da..ae1bd32d8ef9 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c | |||
@@ -30,6 +30,10 @@ | |||
30 | 30 | ||
31 | #include "hci_uart.h" | 31 | #include "hci_uart.h" |
32 | 32 | ||
33 | #define H5_TXWINSIZE 4 | ||
34 | |||
35 | #define H5_ACK_TIMEOUT msecs_to_jiffies(250) | ||
36 | |||
33 | struct h5 { | 37 | struct h5 { |
34 | struct sk_buff_head unack; /* Unack'ed packets queue */ | 38 | struct sk_buff_head unack; /* Unack'ed packets queue */ |
35 | struct sk_buff_head rel; /* Reliable packets queue */ | 39 | struct sk_buff_head rel; /* Reliable packets queue */ |
@@ -37,11 +41,34 @@ struct h5 { | |||
37 | 41 | ||
38 | struct sk_buff *rx_skb; | 42 | struct sk_buff *rx_skb; |
39 | 43 | ||
44 | struct timer_list timer; /* Retransmission timer */ | ||
45 | |||
40 | bool txack_req; | 46 | bool txack_req; |
41 | 47 | ||
42 | u8 msgq_txseq; | 48 | u8 msgq_txseq; |
43 | }; | 49 | }; |
44 | 50 | ||
51 | static void h5_timed_event(unsigned long arg) | ||
52 | { | ||
53 | struct hci_uart *hu = (struct hci_uart *) arg; | ||
54 | struct h5 *h5 = hu->priv; | ||
55 | struct sk_buff *skb; | ||
56 | unsigned long flags; | ||
57 | |||
58 | BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen); | ||
59 | |||
60 | spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); | ||
61 | |||
62 | while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) { | ||
63 | h5->msgq_txseq = (h5->msgq_txseq - 1) & 0x07; | ||
64 | skb_queue_head(&h5->rel, skb); | ||
65 | } | ||
66 | |||
67 | spin_unlock_irqrestore(&h5->unack.lock, flags); | ||
68 | |||
69 | hci_uart_tx_wakeup(hu); | ||
70 | } | ||
71 | |||
45 | static int h5_open(struct hci_uart *hu) | 72 | static int h5_open(struct hci_uart *hu) |
46 | { | 73 | { |
47 | struct h5 *h5; | 74 | struct h5 *h5; |
@@ -58,6 +85,10 @@ static int h5_open(struct hci_uart *hu) | |||
58 | skb_queue_head_init(&h5->rel); | 85 | skb_queue_head_init(&h5->rel); |
59 | skb_queue_head_init(&h5->unrel); | 86 | skb_queue_head_init(&h5->unrel); |
60 | 87 | ||
88 | init_timer(&h5->timer); | ||
89 | h5->timer.function = h5_timed_event; | ||
90 | h5->timer.data = (unsigned long) hu; | ||
91 | |||
61 | return 0; | 92 | return 0; |
62 | } | 93 | } |
63 | 94 | ||
@@ -69,6 +100,8 @@ static int h5_close(struct hci_uart *hu) | |||
69 | skb_queue_purge(&h5->rel); | 100 | skb_queue_purge(&h5->rel); |
70 | skb_queue_purge(&h5->unrel); | 101 | skb_queue_purge(&h5->unrel); |
71 | 102 | ||
103 | del_timer(&h5->timer); | ||
104 | |||
72 | kfree(h5); | 105 | kfree(h5); |
73 | 106 | ||
74 | return 0; | 107 | return 0; |
@@ -123,6 +156,7 @@ static struct sk_buff *h5_prepare_ack(struct h5 *h5) | |||
123 | static struct sk_buff *h5_dequeue(struct hci_uart *hu) | 156 | static struct sk_buff *h5_dequeue(struct hci_uart *hu) |
124 | { | 157 | { |
125 | struct h5 *h5 = hu->priv; | 158 | struct h5 *h5 = hu->priv; |
159 | unsigned long flags; | ||
126 | struct sk_buff *skb, *nskb; | 160 | struct sk_buff *skb, *nskb; |
127 | 161 | ||
128 | if ((skb = skb_dequeue(&h5->unrel)) != NULL) { | 162 | if ((skb = skb_dequeue(&h5->unrel)) != NULL) { |
@@ -136,6 +170,28 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu) | |||
136 | BT_ERR("Could not dequeue pkt because alloc_skb failed"); | 170 | BT_ERR("Could not dequeue pkt because alloc_skb failed"); |
137 | } | 171 | } |
138 | 172 | ||
173 | spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); | ||
174 | |||
175 | if (h5->unack.qlen >= H5_TXWINSIZE) | ||
176 | goto unlock; | ||
177 | |||
178 | if ((skb = skb_dequeue(&h5->rel)) != NULL) { | ||
179 | nskb = h5_prepare_pkt(h5, skb); | ||
180 | |||
181 | if (nskb) { | ||
182 | __skb_queue_tail(&h5->unack, skb); | ||
183 | mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT); | ||
184 | spin_unlock_irqrestore(&h5->unack.lock, flags); | ||
185 | return nskb; | ||
186 | } | ||
187 | |||
188 | skb_queue_head(&h5->rel, skb); | ||
189 | BT_ERR("Could not dequeue pkt because alloc_skb failed"); | ||
190 | } | ||
191 | |||
192 | unlock: | ||
193 | spin_unlock_irqrestore(&h5->unack.lock, flags); | ||
194 | |||
139 | if (h5->txack_req) | 195 | if (h5->txack_req) |
140 | return h5_prepare_ack(h5); | 196 | return h5_prepare_ack(h5); |
141 | 197 | ||