diff options
author | Johan Hedberg <johan.hedberg@intel.com> | 2015-11-10 02:44:54 -0500 |
---|---|---|
committer | Marcel Holtmann <marcel@holtmann.org> | 2015-11-19 11:50:28 -0500 |
commit | be91cd05704d5a547de086d0e61c249ee62d2e13 (patch) | |
tree | 2f6331d2e90708233041085adac56fc4a985efa9 /net/bluetooth/hci_request.c | |
parent | 030e7f8141a262e32dc064d7cf12377d769d45c2 (diff) |
Bluetooth: Move synchronous request handling into hci_request.c
hci_request.c is a more natural place for the synchronous request
handling. Furthermore, we will soon need access to some of the
previously private-to-hci_core.c functions from hci_request.c.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'net/bluetooth/hci_request.c')
-rw-r--r-- | net/bluetooth/hci_request.c | 184 |
1 files changed, 184 insertions, 0 deletions
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 5ba27c30e8f2..aa868f6f5a90 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include "smp.h" | 27 | #include "smp.h" |
28 | #include "hci_request.h" | 28 | #include "hci_request.h" |
29 | 29 | ||
30 | #define HCI_REQ_DONE 0 | ||
31 | #define HCI_REQ_PEND 1 | ||
32 | #define HCI_REQ_CANCELED 2 | ||
33 | |||
30 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) | 34 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) |
31 | { | 35 | { |
32 | skb_queue_head_init(&req->cmd_q); | 36 | skb_queue_head_init(&req->cmd_q); |
@@ -82,6 +86,186 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | |||
82 | return req_run(req, NULL, complete); | 86 | return req_run(req, NULL, complete); |
83 | } | 87 | } |
84 | 88 | ||
89 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, | ||
90 | struct sk_buff *skb) | ||
91 | { | ||
92 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | ||
93 | |||
94 | if (hdev->req_status == HCI_REQ_PEND) { | ||
95 | hdev->req_result = result; | ||
96 | hdev->req_status = HCI_REQ_DONE; | ||
97 | if (skb) | ||
98 | hdev->req_skb = skb_get(skb); | ||
99 | wake_up_interruptible(&hdev->req_wait_q); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | void hci_req_cancel(struct hci_dev *hdev, int err) | ||
104 | { | ||
105 | BT_DBG("%s err 0x%2.2x", hdev->name, err); | ||
106 | |||
107 | if (hdev->req_status == HCI_REQ_PEND) { | ||
108 | hdev->req_result = err; | ||
109 | hdev->req_status = HCI_REQ_CANCELED; | ||
110 | wake_up_interruptible(&hdev->req_wait_q); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | ||
115 | const void *param, u8 event, u32 timeout) | ||
116 | { | ||
117 | DECLARE_WAITQUEUE(wait, current); | ||
118 | struct hci_request req; | ||
119 | struct sk_buff *skb; | ||
120 | int err = 0; | ||
121 | |||
122 | BT_DBG("%s", hdev->name); | ||
123 | |||
124 | hci_req_init(&req, hdev); | ||
125 | |||
126 | hci_req_add_ev(&req, opcode, plen, param, event); | ||
127 | |||
128 | hdev->req_status = HCI_REQ_PEND; | ||
129 | |||
130 | add_wait_queue(&hdev->req_wait_q, &wait); | ||
131 | set_current_state(TASK_INTERRUPTIBLE); | ||
132 | |||
133 | err = hci_req_run_skb(&req, hci_req_sync_complete); | ||
134 | if (err < 0) { | ||
135 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
136 | set_current_state(TASK_RUNNING); | ||
137 | return ERR_PTR(err); | ||
138 | } | ||
139 | |||
140 | schedule_timeout(timeout); | ||
141 | |||
142 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
143 | |||
144 | if (signal_pending(current)) | ||
145 | return ERR_PTR(-EINTR); | ||
146 | |||
147 | switch (hdev->req_status) { | ||
148 | case HCI_REQ_DONE: | ||
149 | err = -bt_to_errno(hdev->req_result); | ||
150 | break; | ||
151 | |||
152 | case HCI_REQ_CANCELED: | ||
153 | err = -hdev->req_result; | ||
154 | break; | ||
155 | |||
156 | default: | ||
157 | err = -ETIMEDOUT; | ||
158 | break; | ||
159 | } | ||
160 | |||
161 | hdev->req_status = hdev->req_result = 0; | ||
162 | skb = hdev->req_skb; | ||
163 | hdev->req_skb = NULL; | ||
164 | |||
165 | BT_DBG("%s end: err %d", hdev->name, err); | ||
166 | |||
167 | if (err < 0) { | ||
168 | kfree_skb(skb); | ||
169 | return ERR_PTR(err); | ||
170 | } | ||
171 | |||
172 | if (!skb) | ||
173 | return ERR_PTR(-ENODATA); | ||
174 | |||
175 | return skb; | ||
176 | } | ||
177 | EXPORT_SYMBOL(__hci_cmd_sync_ev); | ||
178 | |||
179 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, | ||
180 | const void *param, u32 timeout) | ||
181 | { | ||
182 | return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); | ||
183 | } | ||
184 | EXPORT_SYMBOL(__hci_cmd_sync); | ||
185 | |||
186 | /* Execute request and wait for completion. */ | ||
187 | int __hci_req_sync(struct hci_dev *hdev, void (*func)(struct hci_request *req, | ||
188 | unsigned long opt), | ||
189 | unsigned long opt, __u32 timeout) | ||
190 | { | ||
191 | struct hci_request req; | ||
192 | DECLARE_WAITQUEUE(wait, current); | ||
193 | int err = 0; | ||
194 | |||
195 | BT_DBG("%s start", hdev->name); | ||
196 | |||
197 | hci_req_init(&req, hdev); | ||
198 | |||
199 | hdev->req_status = HCI_REQ_PEND; | ||
200 | |||
201 | func(&req, opt); | ||
202 | |||
203 | add_wait_queue(&hdev->req_wait_q, &wait); | ||
204 | set_current_state(TASK_INTERRUPTIBLE); | ||
205 | |||
206 | err = hci_req_run_skb(&req, hci_req_sync_complete); | ||
207 | if (err < 0) { | ||
208 | hdev->req_status = 0; | ||
209 | |||
210 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
211 | set_current_state(TASK_RUNNING); | ||
212 | |||
213 | /* ENODATA means the HCI request command queue is empty. | ||
214 | * This can happen when a request with conditionals doesn't | ||
215 | * trigger any commands to be sent. This is normal behavior | ||
216 | * and should not trigger an error return. | ||
217 | */ | ||
218 | if (err == -ENODATA) | ||
219 | return 0; | ||
220 | |||
221 | return err; | ||
222 | } | ||
223 | |||
224 | schedule_timeout(timeout); | ||
225 | |||
226 | remove_wait_queue(&hdev->req_wait_q, &wait); | ||
227 | |||
228 | if (signal_pending(current)) | ||
229 | return -EINTR; | ||
230 | |||
231 | switch (hdev->req_status) { | ||
232 | case HCI_REQ_DONE: | ||
233 | err = -bt_to_errno(hdev->req_result); | ||
234 | break; | ||
235 | |||
236 | case HCI_REQ_CANCELED: | ||
237 | err = -hdev->req_result; | ||
238 | break; | ||
239 | |||
240 | default: | ||
241 | err = -ETIMEDOUT; | ||
242 | break; | ||
243 | } | ||
244 | |||
245 | hdev->req_status = hdev->req_result = 0; | ||
246 | |||
247 | BT_DBG("%s end: err %d", hdev->name, err); | ||
248 | |||
249 | return err; | ||
250 | } | ||
251 | |||
252 | int hci_req_sync(struct hci_dev *hdev, void (*req)(struct hci_request *req, | ||
253 | unsigned long opt), | ||
254 | unsigned long opt, __u32 timeout) | ||
255 | { | ||
256 | int ret; | ||
257 | |||
258 | if (!test_bit(HCI_UP, &hdev->flags)) | ||
259 | return -ENETDOWN; | ||
260 | |||
261 | /* Serialize all requests */ | ||
262 | hci_req_lock(hdev); | ||
263 | ret = __hci_req_sync(hdev, req, opt, timeout); | ||
264 | hci_req_unlock(hdev); | ||
265 | |||
266 | return ret; | ||
267 | } | ||
268 | |||
85 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, | 269 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
86 | const void *param) | 270 | const void *param) |
87 | { | 271 | { |