diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl/htc.c')
-rw-r--r-- | drivers/net/wireless/ath/ath6kl/htc.c | 2466 |
1 files changed, 2466 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c new file mode 100644 index 000000000000..95c47bbd1d78 --- /dev/null +++ b/drivers/net/wireless/ath/ath6kl/htc.c | |||
@@ -0,0 +1,2466 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007-2011 Atheros Communications Inc. | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | |||
17 | #include "core.h" | ||
18 | #include "htc_hif.h" | ||
19 | #include "debug.h" | ||
20 | #include "hif-ops.h" | ||
21 | #include <asm/unaligned.h> | ||
22 | |||
23 | #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) | ||
24 | |||
25 | static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0, | ||
26 | int ctrl1) | ||
27 | { | ||
28 | struct htc_frame_hdr *hdr; | ||
29 | |||
30 | packet->buf -= HTC_HDR_LENGTH; | ||
31 | hdr = (struct htc_frame_hdr *)packet->buf; | ||
32 | |||
33 | /* Endianess? */ | ||
34 | put_unaligned((u16)packet->act_len, &hdr->payld_len); | ||
35 | hdr->flags = flags; | ||
36 | hdr->eid = packet->endpoint; | ||
37 | hdr->ctrl[0] = ctrl0; | ||
38 | hdr->ctrl[1] = ctrl1; | ||
39 | } | ||
40 | |||
41 | static void htc_reclaim_txctrl_buf(struct htc_target *target, | ||
42 | struct htc_packet *pkt) | ||
43 | { | ||
44 | spin_lock_bh(&target->htc_lock); | ||
45 | list_add_tail(&pkt->list, &target->free_ctrl_txbuf); | ||
46 | spin_unlock_bh(&target->htc_lock); | ||
47 | } | ||
48 | |||
49 | static struct htc_packet *htc_get_control_buf(struct htc_target *target, | ||
50 | bool tx) | ||
51 | { | ||
52 | struct htc_packet *packet = NULL; | ||
53 | struct list_head *buf_list; | ||
54 | |||
55 | buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf; | ||
56 | |||
57 | spin_lock_bh(&target->htc_lock); | ||
58 | |||
59 | if (list_empty(buf_list)) { | ||
60 | spin_unlock_bh(&target->htc_lock); | ||
61 | return NULL; | ||
62 | } | ||
63 | |||
64 | packet = list_first_entry(buf_list, struct htc_packet, list); | ||
65 | list_del(&packet->list); | ||
66 | spin_unlock_bh(&target->htc_lock); | ||
67 | |||
68 | if (tx) | ||
69 | packet->buf = packet->buf_start + HTC_HDR_LENGTH; | ||
70 | |||
71 | return packet; | ||
72 | } | ||
73 | |||
74 | static void htc_tx_comp_update(struct htc_target *target, | ||
75 | struct htc_endpoint *endpoint, | ||
76 | struct htc_packet *packet) | ||
77 | { | ||
78 | packet->completion = NULL; | ||
79 | packet->buf += HTC_HDR_LENGTH; | ||
80 | |||
81 | if (!packet->status) | ||
82 | return; | ||
83 | |||
84 | ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n", | ||
85 | packet->status, packet->endpoint, packet->act_len, | ||
86 | packet->info.tx.cred_used); | ||
87 | |||
88 | /* on failure to submit, reclaim credits for this packet */ | ||
89 | spin_lock_bh(&target->tx_lock); | ||
90 | endpoint->cred_dist.cred_to_dist += | ||
91 | packet->info.tx.cred_used; | ||
92 | endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); | ||
93 | |||
94 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", | ||
95 | target->cred_dist_cntxt, &target->cred_dist_list); | ||
96 | |||
97 | ath6k_credit_distribute(target->cred_dist_cntxt, | ||
98 | &target->cred_dist_list, | ||
99 | HTC_CREDIT_DIST_SEND_COMPLETE); | ||
100 | |||
101 | spin_unlock_bh(&target->tx_lock); | ||
102 | } | ||
103 | |||
104 | static void htc_tx_complete(struct htc_endpoint *endpoint, | ||
105 | struct list_head *txq) | ||
106 | { | ||
107 | if (list_empty(txq)) | ||
108 | return; | ||
109 | |||
110 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
111 | "send complete ep %d, (%d pkts)\n", | ||
112 | endpoint->eid, get_queue_depth(txq)); | ||
113 | |||
114 | ath6kl_tx_complete(endpoint->target->dev->ar, txq); | ||
115 | } | ||
116 | |||
117 | static void htc_tx_comp_handler(struct htc_target *target, | ||
118 | struct htc_packet *packet) | ||
119 | { | ||
120 | struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; | ||
121 | struct list_head container; | ||
122 | |||
123 | htc_tx_comp_update(target, endpoint, packet); | ||
124 | INIT_LIST_HEAD(&container); | ||
125 | list_add_tail(&packet->list, &container); | ||
126 | /* do completion */ | ||
127 | htc_tx_complete(endpoint, &container); | ||
128 | } | ||
129 | |||
130 | static void htc_async_tx_scat_complete(struct hif_scatter_req *scat_req) | ||
131 | { | ||
132 | struct htc_endpoint *endpoint = scat_req->ep; | ||
133 | struct htc_target *target = endpoint->target; | ||
134 | struct htc_packet *packet; | ||
135 | struct list_head tx_compq; | ||
136 | int i; | ||
137 | |||
138 | INIT_LIST_HEAD(&tx_compq); | ||
139 | |||
140 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
141 | "htc_async_tx_scat_complete total len: %d entries: %d\n", | ||
142 | scat_req->len, scat_req->scat_entries); | ||
143 | |||
144 | if (scat_req->status) | ||
145 | ath6kl_err("send scatter req failed: %d\n", scat_req->status); | ||
146 | |||
147 | /* walk through the scatter list and process */ | ||
148 | for (i = 0; i < scat_req->scat_entries; i++) { | ||
149 | packet = scat_req->scat_list[i].packet; | ||
150 | if (!packet) { | ||
151 | WARN_ON(1); | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | packet->status = scat_req->status; | ||
156 | htc_tx_comp_update(target, endpoint, packet); | ||
157 | list_add_tail(&packet->list, &tx_compq); | ||
158 | } | ||
159 | |||
160 | /* free scatter request */ | ||
161 | hif_scatter_req_add(target->dev->ar, scat_req); | ||
162 | |||
163 | /* complete all packets */ | ||
164 | htc_tx_complete(endpoint, &tx_compq); | ||
165 | } | ||
166 | |||
167 | static int htc_issue_send(struct htc_target *target, struct htc_packet *packet) | ||
168 | { | ||
169 | int status; | ||
170 | bool sync = false; | ||
171 | u32 padded_len, send_len; | ||
172 | |||
173 | if (!packet->completion) | ||
174 | sync = true; | ||
175 | |||
176 | send_len = packet->act_len + HTC_HDR_LENGTH; | ||
177 | |||
178 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n", | ||
179 | __func__, send_len, sync ? "sync" : "async"); | ||
180 | |||
181 | padded_len = CALC_TXRX_PADDED_LEN(target->dev, send_len); | ||
182 | |||
183 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
184 | "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n", | ||
185 | padded_len, | ||
186 | target->dev->ar->mbox_info.htc_addr, | ||
187 | sync ? "sync" : "async"); | ||
188 | |||
189 | if (sync) { | ||
190 | status = hif_read_write_sync(target->dev->ar, | ||
191 | target->dev->ar->mbox_info.htc_addr, | ||
192 | packet->buf, padded_len, | ||
193 | HIF_WR_SYNC_BLOCK_INC); | ||
194 | |||
195 | packet->status = status; | ||
196 | packet->buf += HTC_HDR_LENGTH; | ||
197 | } else | ||
198 | status = hif_write_async(target->dev->ar, | ||
199 | target->dev->ar->mbox_info.htc_addr, | ||
200 | packet->buf, padded_len, | ||
201 | HIF_WR_ASYNC_BLOCK_INC, packet); | ||
202 | |||
203 | return status; | ||
204 | } | ||
205 | |||
206 | static int htc_check_credits(struct htc_target *target, | ||
207 | struct htc_endpoint *ep, u8 *flags, | ||
208 | enum htc_endpoint_id eid, unsigned int len, | ||
209 | int *req_cred) | ||
210 | { | ||
211 | |||
212 | *req_cred = (len > target->tgt_cred_sz) ? | ||
213 | DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; | ||
214 | |||
215 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n", | ||
216 | *req_cred, ep->cred_dist.credits); | ||
217 | |||
218 | if (ep->cred_dist.credits < *req_cred) { | ||
219 | if (eid == ENDPOINT_0) | ||
220 | return -EINVAL; | ||
221 | |||
222 | /* Seek more credits */ | ||
223 | ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; | ||
224 | |||
225 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", | ||
226 | target->cred_dist_cntxt, &ep->cred_dist); | ||
227 | |||
228 | ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist); | ||
229 | |||
230 | ep->cred_dist.seek_cred = 0; | ||
231 | |||
232 | if (ep->cred_dist.credits < *req_cred) { | ||
233 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
234 | "not enough credits for ep %d - leaving packet in queue\n", | ||
235 | eid); | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | ep->cred_dist.credits -= *req_cred; | ||
241 | ep->ep_st.cred_cosumd += *req_cred; | ||
242 | |||
243 | /* When we are getting low on credits, ask for more */ | ||
244 | if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { | ||
245 | ep->cred_dist.seek_cred = | ||
246 | ep->cred_dist.cred_per_msg - ep->cred_dist.credits; | ||
247 | |||
248 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", | ||
249 | target->cred_dist_cntxt, &ep->cred_dist); | ||
250 | |||
251 | ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist); | ||
252 | |||
253 | /* see if we were successful in getting more */ | ||
254 | if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { | ||
255 | /* tell the target we need credits ASAP! */ | ||
256 | *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; | ||
257 | ep->ep_st.cred_low_indicate += 1; | ||
258 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n"); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | static void htc_tx_pkts_get(struct htc_target *target, | ||
266 | struct htc_endpoint *endpoint, | ||
267 | struct list_head *queue) | ||
268 | { | ||
269 | int req_cred; | ||
270 | u8 flags; | ||
271 | struct htc_packet *packet; | ||
272 | unsigned int len; | ||
273 | |||
274 | while (true) { | ||
275 | |||
276 | flags = 0; | ||
277 | |||
278 | if (list_empty(&endpoint->txq)) | ||
279 | break; | ||
280 | packet = list_first_entry(&endpoint->txq, struct htc_packet, | ||
281 | list); | ||
282 | |||
283 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
284 | "got head pkt:0x%p , queue depth: %d\n", | ||
285 | packet, get_queue_depth(&endpoint->txq)); | ||
286 | |||
287 | len = CALC_TXRX_PADDED_LEN(target->dev, | ||
288 | packet->act_len + HTC_HDR_LENGTH); | ||
289 | |||
290 | if (htc_check_credits(target, endpoint, &flags, | ||
291 | packet->endpoint, len, &req_cred)) | ||
292 | break; | ||
293 | |||
294 | /* now we can fully move onto caller's queue */ | ||
295 | packet = list_first_entry(&endpoint->txq, struct htc_packet, | ||
296 | list); | ||
297 | list_move_tail(&packet->list, queue); | ||
298 | |||
299 | /* save the number of credits this packet consumed */ | ||
300 | packet->info.tx.cred_used = req_cred; | ||
301 | |||
302 | /* all TX packets are handled asynchronously */ | ||
303 | packet->completion = htc_tx_comp_handler; | ||
304 | packet->context = target; | ||
305 | endpoint->ep_st.tx_issued += 1; | ||
306 | |||
307 | /* save send flags */ | ||
308 | packet->info.tx.flags = flags; | ||
309 | packet->info.tx.seqno = endpoint->seqno; | ||
310 | endpoint->seqno++; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | /* See if the padded tx length falls on a credit boundary */ | ||
315 | static int htc_get_credit_padding(unsigned int cred_sz, int *len, | ||
316 | struct htc_endpoint *ep) | ||
317 | { | ||
318 | int rem_cred, cred_pad; | ||
319 | |||
320 | rem_cred = *len % cred_sz; | ||
321 | |||
322 | /* No padding needed */ | ||
323 | if (!rem_cred) | ||
324 | return 0; | ||
325 | |||
326 | if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) | ||
327 | return -1; | ||
328 | |||
329 | /* | ||
330 | * The transfer consumes a "partial" credit, this | ||
331 | * packet cannot be bundled unless we add | ||
332 | * additional "dummy" padding (max 255 bytes) to | ||
333 | * consume the entire credit. | ||
334 | */ | ||
335 | cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred; | ||
336 | |||
337 | if ((cred_pad > 0) && (cred_pad <= 255)) | ||
338 | *len += cred_pad; | ||
339 | else | ||
340 | /* The amount of padding is too large, send as non-bundled */ | ||
341 | return -1; | ||
342 | |||
343 | return cred_pad; | ||
344 | } | ||
345 | |||
346 | static int htc_setup_send_scat_list(struct htc_target *target, | ||
347 | struct htc_endpoint *endpoint, | ||
348 | struct hif_scatter_req *scat_req, | ||
349 | int n_scat, | ||
350 | struct list_head *queue) | ||
351 | { | ||
352 | struct htc_packet *packet; | ||
353 | int i, len, rem_scat, cred_pad; | ||
354 | int status = 0; | ||
355 | |||
356 | rem_scat = target->dev->max_tx_bndl_sz; | ||
357 | |||
358 | for (i = 0; i < n_scat; i++) { | ||
359 | scat_req->scat_list[i].packet = NULL; | ||
360 | |||
361 | if (list_empty(queue)) | ||
362 | break; | ||
363 | |||
364 | packet = list_first_entry(queue, struct htc_packet, list); | ||
365 | len = CALC_TXRX_PADDED_LEN(target->dev, | ||
366 | packet->act_len + HTC_HDR_LENGTH); | ||
367 | |||
368 | cred_pad = htc_get_credit_padding(target->tgt_cred_sz, | ||
369 | &len, endpoint); | ||
370 | if (cred_pad < 0) { | ||
371 | status = -EINVAL; | ||
372 | break; | ||
373 | } | ||
374 | |||
375 | if (rem_scat < len) { | ||
376 | /* exceeds what we can transfer */ | ||
377 | status = -ENOSPC; | ||
378 | break; | ||
379 | } | ||
380 | |||
381 | rem_scat -= len; | ||
382 | /* now remove it from the queue */ | ||
383 | packet = list_first_entry(queue, struct htc_packet, list); | ||
384 | list_del(&packet->list); | ||
385 | |||
386 | scat_req->scat_list[i].packet = packet; | ||
387 | /* prepare packet and flag message as part of a send bundle */ | ||
388 | htc_prep_send_pkt(packet, | ||
389 | packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE, | ||
390 | cred_pad, packet->info.tx.seqno); | ||
391 | scat_req->scat_list[i].buf = packet->buf; | ||
392 | scat_req->scat_list[i].len = len; | ||
393 | |||
394 | scat_req->len += len; | ||
395 | scat_req->scat_entries++; | ||
396 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
397 | "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n", | ||
398 | i, packet, len, rem_scat); | ||
399 | } | ||
400 | |||
401 | /* Roll back scatter setup in case of any failure */ | ||
402 | if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) { | ||
403 | for (i = scat_req->scat_entries - 1; i >= 0; i--) { | ||
404 | packet = scat_req->scat_list[i].packet; | ||
405 | if (packet) { | ||
406 | packet->buf += HTC_HDR_LENGTH; | ||
407 | list_add(&packet->list, queue); | ||
408 | } | ||
409 | } | ||
410 | return -EINVAL; | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * htc_issue_send_bundle: drain a queue and send as bundles | ||
418 | * this function may return without fully draining the queue | ||
419 | * when | ||
420 | * | ||
421 | * 1. scatter resources are exhausted | ||
422 | * 2. a message that will consume a partial credit will stop the | ||
423 | * bundling process early | ||
424 | * 3. we drop below the minimum number of messages for a bundle | ||
425 | */ | ||
426 | static void htc_issue_send_bundle(struct htc_endpoint *endpoint, | ||
427 | struct list_head *queue, | ||
428 | int *sent_bundle, int *n_bundle_pkts) | ||
429 | { | ||
430 | struct htc_target *target = endpoint->target; | ||
431 | struct hif_scatter_req *scat_req = NULL; | ||
432 | struct hif_dev_scat_sup_info hif_info; | ||
433 | int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; | ||
434 | |||
435 | hif_info = target->dev->hif_scat_info; | ||
436 | |||
437 | while (true) { | ||
438 | n_scat = get_queue_depth(queue); | ||
439 | n_scat = min(n_scat, target->msg_per_bndl_max); | ||
440 | |||
441 | if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE) | ||
442 | /* not enough to bundle */ | ||
443 | break; | ||
444 | |||
445 | scat_req = hif_scatter_req_get(target->dev->ar); | ||
446 | |||
447 | if (!scat_req) { | ||
448 | /* no scatter resources */ | ||
449 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
450 | "no more scatter resources\n"); | ||
451 | break; | ||
452 | } | ||
453 | |||
454 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n", | ||
455 | n_scat); | ||
456 | |||
457 | scat_req->len = 0; | ||
458 | scat_req->scat_entries = 0; | ||
459 | |||
460 | if (htc_setup_send_scat_list(target, endpoint, scat_req, | ||
461 | n_scat, queue)) { | ||
462 | hif_scatter_req_add(target->dev->ar, scat_req); | ||
463 | break; | ||
464 | } | ||
465 | |||
466 | /* send path is always asynchronous */ | ||
467 | scat_req->complete = htc_async_tx_scat_complete; | ||
468 | scat_req->ep = endpoint; | ||
469 | n_sent_bundle++; | ||
470 | tot_pkts_bundle += scat_req->scat_entries; | ||
471 | |||
472 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
473 | "send scatter total bytes: %d , entries: %d\n", | ||
474 | scat_req->len, scat_req->scat_entries); | ||
475 | ath6kldev_submit_scat_req(target->dev, scat_req, false); | ||
476 | } | ||
477 | |||
478 | *sent_bundle = n_sent_bundle; | ||
479 | *n_bundle_pkts = tot_pkts_bundle; | ||
480 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n", | ||
481 | n_sent_bundle); | ||
482 | |||
483 | return; | ||
484 | } | ||
485 | |||
486 | static void htc_tx_from_ep_txq(struct htc_target *target, | ||
487 | struct htc_endpoint *endpoint) | ||
488 | { | ||
489 | struct list_head txq; | ||
490 | struct htc_packet *packet; | ||
491 | int bundle_sent; | ||
492 | int n_pkts_bundle; | ||
493 | |||
494 | spin_lock_bh(&target->tx_lock); | ||
495 | |||
496 | endpoint->tx_proc_cnt++; | ||
497 | if (endpoint->tx_proc_cnt > 1) { | ||
498 | endpoint->tx_proc_cnt--; | ||
499 | spin_unlock_bh(&target->tx_lock); | ||
500 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n"); | ||
501 | return; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * drain the endpoint TX queue for transmission as long | ||
506 | * as we have enough credits. | ||
507 | */ | ||
508 | INIT_LIST_HEAD(&txq); | ||
509 | |||
510 | while (true) { | ||
511 | |||
512 | if (list_empty(&endpoint->txq)) | ||
513 | break; | ||
514 | |||
515 | htc_tx_pkts_get(target, endpoint, &txq); | ||
516 | |||
517 | if (list_empty(&txq)) | ||
518 | break; | ||
519 | |||
520 | spin_unlock_bh(&target->tx_lock); | ||
521 | |||
522 | bundle_sent = 0; | ||
523 | n_pkts_bundle = 0; | ||
524 | |||
525 | while (true) { | ||
526 | /* try to send a bundle on each pass */ | ||
527 | if ((target->tx_bndl_enable) && | ||
528 | (get_queue_depth(&txq) >= | ||
529 | HTC_MIN_HTC_MSGS_TO_BUNDLE)) { | ||
530 | int temp1 = 0, temp2 = 0; | ||
531 | |||
532 | htc_issue_send_bundle(endpoint, &txq, | ||
533 | &temp1, &temp2); | ||
534 | bundle_sent += temp1; | ||
535 | n_pkts_bundle += temp2; | ||
536 | } | ||
537 | |||
538 | if (list_empty(&txq)) | ||
539 | break; | ||
540 | |||
541 | packet = list_first_entry(&txq, struct htc_packet, | ||
542 | list); | ||
543 | list_del(&packet->list); | ||
544 | |||
545 | htc_prep_send_pkt(packet, packet->info.tx.flags, | ||
546 | 0, packet->info.tx.seqno); | ||
547 | htc_issue_send(target, packet); | ||
548 | } | ||
549 | |||
550 | spin_lock_bh(&target->tx_lock); | ||
551 | |||
552 | endpoint->ep_st.tx_bundles += bundle_sent; | ||
553 | endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; | ||
554 | } | ||
555 | |||
556 | endpoint->tx_proc_cnt = 0; | ||
557 | spin_unlock_bh(&target->tx_lock); | ||
558 | } | ||
559 | |||
560 | static bool htc_try_send(struct htc_target *target, | ||
561 | struct htc_endpoint *endpoint, | ||
562 | struct htc_packet *tx_pkt) | ||
563 | { | ||
564 | struct htc_ep_callbacks ep_cb; | ||
565 | int txq_depth; | ||
566 | bool overflow = false; | ||
567 | |||
568 | ep_cb = endpoint->ep_cb; | ||
569 | |||
570 | spin_lock_bh(&target->tx_lock); | ||
571 | txq_depth = get_queue_depth(&endpoint->txq); | ||
572 | spin_unlock_bh(&target->tx_lock); | ||
573 | |||
574 | if (txq_depth >= endpoint->max_txq_depth) | ||
575 | overflow = true; | ||
576 | |||
577 | if (overflow) | ||
578 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
579 | "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n", | ||
580 | endpoint->eid, overflow, txq_depth, | ||
581 | endpoint->max_txq_depth); | ||
582 | |||
583 | if (overflow && ep_cb.tx_full) { | ||
584 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
585 | "indicating overflowed tx packet: 0x%p\n", tx_pkt); | ||
586 | |||
587 | if (ep_cb.tx_full(endpoint->target, tx_pkt) == | ||
588 | HTC_SEND_FULL_DROP) { | ||
589 | endpoint->ep_st.tx_dropped += 1; | ||
590 | return false; | ||
591 | } | ||
592 | } | ||
593 | |||
594 | spin_lock_bh(&target->tx_lock); | ||
595 | list_add_tail(&tx_pkt->list, &endpoint->txq); | ||
596 | spin_unlock_bh(&target->tx_lock); | ||
597 | |||
598 | htc_tx_from_ep_txq(target, endpoint); | ||
599 | |||
600 | return true; | ||
601 | } | ||
602 | |||
603 | static void htc_chk_ep_txq(struct htc_target *target) | ||
604 | { | ||
605 | struct htc_endpoint *endpoint; | ||
606 | struct htc_endpoint_credit_dist *cred_dist; | ||
607 | |||
608 | /* | ||
609 | * Run through the credit distribution list to see if there are | ||
610 | * packets queued. NOTE: no locks need to be taken since the | ||
611 | * distribution list is not dynamic (cannot be re-ordered) and we | ||
612 | * are not modifying any state. | ||
613 | */ | ||
614 | list_for_each_entry(cred_dist, &target->cred_dist_list, list) { | ||
615 | endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd; | ||
616 | |||
617 | spin_lock_bh(&target->tx_lock); | ||
618 | if (!list_empty(&endpoint->txq)) { | ||
619 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
620 | "ep %d has %d credits and %d packets in tx queue\n", | ||
621 | cred_dist->endpoint, | ||
622 | endpoint->cred_dist.credits, | ||
623 | get_queue_depth(&endpoint->txq)); | ||
624 | spin_unlock_bh(&target->tx_lock); | ||
625 | /* | ||
626 | * Try to start the stalled queue, this list is | ||
627 | * ordered by priority. If there are credits | ||
628 | * available the highest priority queue will get a | ||
629 | * chance to reclaim credits from lower priority | ||
630 | * ones. | ||
631 | */ | ||
632 | htc_tx_from_ep_txq(target, endpoint); | ||
633 | spin_lock_bh(&target->tx_lock); | ||
634 | } | ||
635 | spin_unlock_bh(&target->tx_lock); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | static int htc_setup_tx_complete(struct htc_target *target) | ||
640 | { | ||
641 | struct htc_packet *send_pkt = NULL; | ||
642 | int status; | ||
643 | |||
644 | send_pkt = htc_get_control_buf(target, true); | ||
645 | |||
646 | if (!send_pkt) | ||
647 | return -ENOMEM; | ||
648 | |||
649 | if (target->htc_tgt_ver >= HTC_VERSION_2P1) { | ||
650 | struct htc_setup_comp_ext_msg *setup_comp_ext; | ||
651 | u32 flags = 0; | ||
652 | |||
653 | setup_comp_ext = | ||
654 | (struct htc_setup_comp_ext_msg *)send_pkt->buf; | ||
655 | memset(setup_comp_ext, 0, sizeof(*setup_comp_ext)); | ||
656 | setup_comp_ext->msg_id = | ||
657 | cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); | ||
658 | |||
659 | if (target->msg_per_bndl_max > 0) { | ||
660 | /* Indicate HTC bundling to the target */ | ||
661 | flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN; | ||
662 | setup_comp_ext->msg_per_rxbndl = | ||
663 | target->msg_per_bndl_max; | ||
664 | } | ||
665 | |||
666 | memcpy(&setup_comp_ext->flags, &flags, | ||
667 | sizeof(setup_comp_ext->flags)); | ||
668 | set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext, | ||
669 | sizeof(struct htc_setup_comp_ext_msg), | ||
670 | ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); | ||
671 | |||
672 | } else { | ||
673 | struct htc_setup_comp_msg *setup_comp; | ||
674 | setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf; | ||
675 | memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg)); | ||
676 | setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID); | ||
677 | set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp, | ||
678 | sizeof(struct htc_setup_comp_msg), | ||
679 | ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); | ||
680 | } | ||
681 | |||
682 | /* we want synchronous operation */ | ||
683 | send_pkt->completion = NULL; | ||
684 | htc_prep_send_pkt(send_pkt, 0, 0, 0); | ||
685 | status = htc_issue_send(target, send_pkt); | ||
686 | |||
687 | if (send_pkt != NULL) | ||
688 | htc_reclaim_txctrl_buf(target, send_pkt); | ||
689 | |||
690 | return status; | ||
691 | } | ||
692 | |||
693 | void htc_set_credit_dist(struct htc_target *target, | ||
694 | struct htc_credit_state_info *cred_dist_cntxt, | ||
695 | u16 srvc_pri_order[], int list_len) | ||
696 | { | ||
697 | struct htc_endpoint *endpoint; | ||
698 | int i, ep; | ||
699 | |||
700 | target->cred_dist_cntxt = cred_dist_cntxt; | ||
701 | |||
702 | list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, | ||
703 | &target->cred_dist_list); | ||
704 | |||
705 | for (i = 0; i < list_len; i++) { | ||
706 | for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { | ||
707 | endpoint = &target->endpoint[ep]; | ||
708 | if (endpoint->svc_id == srvc_pri_order[i]) { | ||
709 | list_add_tail(&endpoint->cred_dist.list, | ||
710 | &target->cred_dist_list); | ||
711 | break; | ||
712 | } | ||
713 | } | ||
714 | if (ep >= ENDPOINT_MAX) { | ||
715 | WARN_ON(1); | ||
716 | return; | ||
717 | } | ||
718 | } | ||
719 | } | ||
720 | |||
721 | int htc_tx(struct htc_target *target, struct htc_packet *packet) | ||
722 | { | ||
723 | struct htc_endpoint *endpoint; | ||
724 | struct list_head queue; | ||
725 | |||
726 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
727 | "htc_tx: ep id: %d, buf: 0x%p, len: %d\n", | ||
728 | packet->endpoint, packet->buf, packet->act_len); | ||
729 | |||
730 | if (packet->endpoint >= ENDPOINT_MAX) { | ||
731 | WARN_ON(1); | ||
732 | return -EINVAL; | ||
733 | } | ||
734 | |||
735 | endpoint = &target->endpoint[packet->endpoint]; | ||
736 | |||
737 | if (!htc_try_send(target, endpoint, packet)) { | ||
738 | packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ? | ||
739 | -ECANCELED : -ENOSPC; | ||
740 | INIT_LIST_HEAD(&queue); | ||
741 | list_add(&packet->list, &queue); | ||
742 | htc_tx_complete(endpoint, &queue); | ||
743 | } | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | /* flush endpoint TX queue */ | ||
749 | void htc_flush_txep(struct htc_target *target, | ||
750 | enum htc_endpoint_id eid, u16 tag) | ||
751 | { | ||
752 | struct htc_packet *packet, *tmp_pkt; | ||
753 | struct list_head discard_q, container; | ||
754 | struct htc_endpoint *endpoint = &target->endpoint[eid]; | ||
755 | |||
756 | if (!endpoint->svc_id) { | ||
757 | WARN_ON(1); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | /* initialize the discard queue */ | ||
762 | INIT_LIST_HEAD(&discard_q); | ||
763 | |||
764 | spin_lock_bh(&target->tx_lock); | ||
765 | |||
766 | list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) { | ||
767 | if ((tag == HTC_TX_PACKET_TAG_ALL) || | ||
768 | (tag == packet->info.tx.tag)) | ||
769 | list_move_tail(&packet->list, &discard_q); | ||
770 | } | ||
771 | |||
772 | spin_unlock_bh(&target->tx_lock); | ||
773 | |||
774 | list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { | ||
775 | packet->status = -ECANCELED; | ||
776 | list_del(&packet->list); | ||
777 | ath6kl_dbg(ATH6KL_DBG_TRC, | ||
778 | "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n", | ||
779 | packet, packet->act_len, | ||
780 | packet->endpoint, packet->info.tx.tag); | ||
781 | |||
782 | INIT_LIST_HEAD(&container); | ||
783 | list_add_tail(&packet->list, &container); | ||
784 | htc_tx_complete(endpoint, &container); | ||
785 | } | ||
786 | |||
787 | } | ||
788 | |||
789 | static void htc_flush_txep_all(struct htc_target *target) | ||
790 | { | ||
791 | struct htc_endpoint *endpoint; | ||
792 | int i; | ||
793 | |||
794 | dump_cred_dist_stats(target); | ||
795 | |||
796 | for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { | ||
797 | endpoint = &target->endpoint[i]; | ||
798 | if (endpoint->svc_id == 0) | ||
799 | /* not in use.. */ | ||
800 | continue; | ||
801 | htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); | ||
802 | } | ||
803 | } | ||
804 | |||
805 | void htc_indicate_activity_change(struct htc_target *target, | ||
806 | enum htc_endpoint_id eid, bool active) | ||
807 | { | ||
808 | struct htc_endpoint *endpoint = &target->endpoint[eid]; | ||
809 | bool dist = false; | ||
810 | |||
811 | if (endpoint->svc_id == 0) { | ||
812 | WARN_ON(1); | ||
813 | return; | ||
814 | } | ||
815 | |||
816 | spin_lock_bh(&target->tx_lock); | ||
817 | |||
818 | if (active) { | ||
819 | if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) { | ||
820 | endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE; | ||
821 | dist = true; | ||
822 | } | ||
823 | } else { | ||
824 | if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) { | ||
825 | endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE; | ||
826 | dist = true; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | if (dist) { | ||
831 | endpoint->cred_dist.txq_depth = | ||
832 | get_queue_depth(&endpoint->txq); | ||
833 | |||
834 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", | ||
835 | target->cred_dist_cntxt, &target->cred_dist_list); | ||
836 | |||
837 | ath6k_credit_distribute(target->cred_dist_cntxt, | ||
838 | &target->cred_dist_list, | ||
839 | HTC_CREDIT_DIST_ACTIVITY_CHANGE); | ||
840 | } | ||
841 | |||
842 | spin_unlock_bh(&target->tx_lock); | ||
843 | |||
844 | if (dist && !active) | ||
845 | htc_chk_ep_txq(target); | ||
846 | } | ||
847 | |||
848 | /* HTC Rx */ | ||
849 | |||
850 | static inline void htc_update_rx_stats(struct htc_endpoint *endpoint, | ||
851 | int n_look_ahds) | ||
852 | { | ||
853 | endpoint->ep_st.rx_pkts++; | ||
854 | if (n_look_ahds == 1) | ||
855 | endpoint->ep_st.rx_lkahds++; | ||
856 | else if (n_look_ahds > 1) | ||
857 | endpoint->ep_st.rx_bundle_lkahd++; | ||
858 | } | ||
859 | |||
860 | static inline bool htc_valid_rx_frame_len(struct htc_target *target, | ||
861 | enum htc_endpoint_id eid, int len) | ||
862 | { | ||
863 | return (eid == target->dev->ar->ctrl_ep) ? | ||
864 | len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE; | ||
865 | } | ||
866 | |||
867 | static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) | ||
868 | { | ||
869 | struct list_head queue; | ||
870 | |||
871 | INIT_LIST_HEAD(&queue); | ||
872 | list_add_tail(&packet->list, &queue); | ||
873 | return htc_add_rxbuf_multiple(target, &queue); | ||
874 | } | ||
875 | |||
876 | static void htc_reclaim_rxbuf(struct htc_target *target, | ||
877 | struct htc_packet *packet, | ||
878 | struct htc_endpoint *ep) | ||
879 | { | ||
880 | if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) { | ||
881 | htc_rxpkt_reset(packet); | ||
882 | packet->status = -ECANCELED; | ||
883 | ep->ep_cb.rx(ep->target, packet); | ||
884 | } else { | ||
885 | htc_rxpkt_reset(packet); | ||
886 | htc_add_rxbuf((void *)(target), packet); | ||
887 | } | ||
888 | } | ||
889 | |||
890 | static void reclaim_rx_ctrl_buf(struct htc_target *target, | ||
891 | struct htc_packet *packet) | ||
892 | { | ||
893 | spin_lock_bh(&target->htc_lock); | ||
894 | list_add_tail(&packet->list, &target->free_ctrl_rxbuf); | ||
895 | spin_unlock_bh(&target->htc_lock); | ||
896 | } | ||
897 | |||
898 | static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet, | ||
899 | u32 rx_len) | ||
900 | { | ||
901 | struct ath6kl_device *dev = target->dev; | ||
902 | u32 padded_len; | ||
903 | int status; | ||
904 | |||
905 | padded_len = CALC_TXRX_PADDED_LEN(dev, rx_len); | ||
906 | |||
907 | if (padded_len > packet->buf_len) { | ||
908 | ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n", | ||
909 | padded_len, rx_len, packet->buf_len); | ||
910 | return -ENOMEM; | ||
911 | } | ||
912 | |||
913 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
914 | "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n", | ||
915 | packet, packet->info.rx.exp_hdr, | ||
916 | padded_len, dev->ar->mbox_info.htc_addr, "sync"); | ||
917 | |||
918 | status = hif_read_write_sync(dev->ar, | ||
919 | dev->ar->mbox_info.htc_addr, | ||
920 | packet->buf, padded_len, | ||
921 | HIF_RD_SYNC_BLOCK_FIX); | ||
922 | |||
923 | packet->status = status; | ||
924 | |||
925 | return status; | ||
926 | } | ||
927 | |||
928 | /* | ||
929 | * optimization for recv packets, we can indicate a | ||
930 | * "hint" that there are more single-packets to fetch | ||
931 | * on this endpoint. | ||
932 | */ | ||
933 | static void set_rxpkt_indication_flag(u32 lk_ahd, | ||
934 | struct htc_endpoint *endpoint, | ||
935 | struct htc_packet *packet) | ||
936 | { | ||
937 | struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd; | ||
938 | |||
939 | if (htc_hdr->eid == packet->endpoint) { | ||
940 | if (!list_empty(&endpoint->rx_bufq)) | ||
941 | packet->info.rx.indicat_flags |= | ||
942 | HTC_RX_FLAGS_INDICATE_MORE_PKTS; | ||
943 | } | ||
944 | } | ||
945 | |||
946 | static void chk_rx_water_mark(struct htc_endpoint *endpoint) | ||
947 | { | ||
948 | struct htc_ep_callbacks ep_cb = endpoint->ep_cb; | ||
949 | |||
950 | if (ep_cb.rx_refill_thresh > 0) { | ||
951 | spin_lock_bh(&endpoint->target->rx_lock); | ||
952 | if (get_queue_depth(&endpoint->rx_bufq) | ||
953 | < ep_cb.rx_refill_thresh) { | ||
954 | spin_unlock_bh(&endpoint->target->rx_lock); | ||
955 | ep_cb.rx_refill(endpoint->target, endpoint->eid); | ||
956 | return; | ||
957 | } | ||
958 | spin_unlock_bh(&endpoint->target->rx_lock); | ||
959 | } | ||
960 | } | ||
961 | |||
962 | /* This function is called with rx_lock held */ | ||
963 | static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep, | ||
964 | u32 *lk_ahds, struct list_head *queue, int n_msg) | ||
965 | { | ||
966 | struct htc_packet *packet; | ||
967 | /* FIXME: type of lk_ahds can't be right */ | ||
968 | struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds; | ||
969 | struct htc_ep_callbacks ep_cb; | ||
970 | int status = 0, j, full_len; | ||
971 | bool no_recycle; | ||
972 | |||
973 | full_len = CALC_TXRX_PADDED_LEN(target->dev, | ||
974 | le16_to_cpu(htc_hdr->payld_len) + | ||
975 | sizeof(*htc_hdr)); | ||
976 | |||
977 | if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { | ||
978 | ath6kl_warn("Rx buffer requested with invalid length\n"); | ||
979 | return -EINVAL; | ||
980 | } | ||
981 | |||
982 | ep_cb = ep->ep_cb; | ||
983 | for (j = 0; j < n_msg; j++) { | ||
984 | |||
985 | /* | ||
986 | * Reset flag, any packets allocated using the | ||
987 | * rx_alloc() API cannot be recycled on | ||
988 | * cleanup,they must be explicitly returned. | ||
989 | */ | ||
990 | no_recycle = false; | ||
991 | |||
992 | if (ep_cb.rx_allocthresh && | ||
993 | (full_len > ep_cb.rx_alloc_thresh)) { | ||
994 | ep->ep_st.rx_alloc_thresh_hit += 1; | ||
995 | ep->ep_st.rxalloc_thresh_byte += | ||
996 | le16_to_cpu(htc_hdr->payld_len); | ||
997 | |||
998 | spin_unlock_bh(&target->rx_lock); | ||
999 | no_recycle = true; | ||
1000 | |||
1001 | packet = ep_cb.rx_allocthresh(ep->target, ep->eid, | ||
1002 | full_len); | ||
1003 | spin_lock_bh(&target->rx_lock); | ||
1004 | } else { | ||
1005 | /* refill handler is being used */ | ||
1006 | if (list_empty(&ep->rx_bufq)) { | ||
1007 | if (ep_cb.rx_refill) { | ||
1008 | spin_unlock_bh(&target->rx_lock); | ||
1009 | ep_cb.rx_refill(ep->target, ep->eid); | ||
1010 | spin_lock_bh(&target->rx_lock); | ||
1011 | } | ||
1012 | } | ||
1013 | |||
1014 | if (list_empty(&ep->rx_bufq)) | ||
1015 | packet = NULL; | ||
1016 | else { | ||
1017 | packet = list_first_entry(&ep->rx_bufq, | ||
1018 | struct htc_packet, list); | ||
1019 | list_del(&packet->list); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | if (!packet) { | ||
1024 | target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS; | ||
1025 | target->ep_waiting = ep->eid; | ||
1026 | return -ENOSPC; | ||
1027 | } | ||
1028 | |||
1029 | /* clear flags */ | ||
1030 | packet->info.rx.rx_flags = 0; | ||
1031 | packet->info.rx.indicat_flags = 0; | ||
1032 | packet->status = 0; | ||
1033 | |||
1034 | if (no_recycle) | ||
1035 | /* | ||
1036 | * flag that these packets cannot be | ||
1037 | * recycled, they have to be returned to | ||
1038 | * the user | ||
1039 | */ | ||
1040 | packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE; | ||
1041 | |||
1042 | /* Caller needs to free this upon any failure */ | ||
1043 | list_add_tail(&packet->list, queue); | ||
1044 | |||
1045 | if (target->htc_flags & HTC_OP_STATE_STOPPING) { | ||
1046 | status = -ECANCELED; | ||
1047 | break; | ||
1048 | } | ||
1049 | |||
1050 | if (j) { | ||
1051 | packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR; | ||
1052 | packet->info.rx.exp_hdr = 0xFFFFFFFF; | ||
1053 | } else | ||
1054 | /* set expected look ahead */ | ||
1055 | packet->info.rx.exp_hdr = *lk_ahds; | ||
1056 | |||
1057 | packet->act_len = le16_to_cpu(htc_hdr->payld_len) + | ||
1058 | HTC_HDR_LENGTH; | ||
1059 | } | ||
1060 | |||
1061 | return status; | ||
1062 | } | ||
1063 | |||
1064 | static int alloc_and_prep_rxpkts(struct htc_target *target, | ||
1065 | u32 lk_ahds[], int msg, | ||
1066 | struct htc_endpoint *endpoint, | ||
1067 | struct list_head *queue) | ||
1068 | { | ||
1069 | int status = 0; | ||
1070 | struct htc_packet *packet, *tmp_pkt; | ||
1071 | struct htc_frame_hdr *htc_hdr; | ||
1072 | int i, n_msg; | ||
1073 | |||
1074 | spin_lock_bh(&target->rx_lock); | ||
1075 | |||
1076 | for (i = 0; i < msg; i++) { | ||
1077 | |||
1078 | htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; | ||
1079 | |||
1080 | if (htc_hdr->eid >= ENDPOINT_MAX) { | ||
1081 | ath6kl_err("invalid ep in look-ahead: %d\n", | ||
1082 | htc_hdr->eid); | ||
1083 | status = -ENOMEM; | ||
1084 | break; | ||
1085 | } | ||
1086 | |||
1087 | if (htc_hdr->eid != endpoint->eid) { | ||
1088 | ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n", | ||
1089 | htc_hdr->eid, endpoint->eid, i); | ||
1090 | status = -ENOMEM; | ||
1091 | break; | ||
1092 | } | ||
1093 | |||
1094 | if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) { | ||
1095 | ath6kl_err("payload len %d exceeds max htc : %d !\n", | ||
1096 | htc_hdr->payld_len, | ||
1097 | (u32) HTC_MAX_PAYLOAD_LENGTH); | ||
1098 | status = -ENOMEM; | ||
1099 | break; | ||
1100 | } | ||
1101 | |||
1102 | if (endpoint->svc_id == 0) { | ||
1103 | ath6kl_err("ep %d is not connected !\n", htc_hdr->eid); | ||
1104 | status = -ENOMEM; | ||
1105 | break; | ||
1106 | } | ||
1107 | |||
1108 | if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) { | ||
1109 | /* | ||
1110 | * HTC header indicates that every packet to follow | ||
1111 | * has the same padded length so that it can be | ||
1112 | * optimally fetched as a full bundle. | ||
1113 | */ | ||
1114 | n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >> | ||
1115 | HTC_FLG_RX_BNDL_CNT_S; | ||
1116 | |||
1117 | /* the count doesn't include the starter frame */ | ||
1118 | n_msg++; | ||
1119 | if (n_msg > target->msg_per_bndl_max) { | ||
1120 | status = -ENOMEM; | ||
1121 | break; | ||
1122 | } | ||
1123 | |||
1124 | endpoint->ep_st.rx_bundle_from_hdr += 1; | ||
1125 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1126 | "htc hdr indicates :%d msg can be fetched as a bundle\n", | ||
1127 | n_msg); | ||
1128 | } else | ||
1129 | /* HTC header only indicates 1 message to fetch */ | ||
1130 | n_msg = 1; | ||
1131 | |||
1132 | /* Setup packet buffers for each message */ | ||
1133 | status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue, | ||
1134 | n_msg); | ||
1135 | |||
1136 | /* | ||
1137 | * This is due to unavailabilty of buffers to rx entire data. | ||
1138 | * Return no error so that free buffers from queue can be used | ||
1139 | * to receive partial data. | ||
1140 | */ | ||
1141 | if (status == -ENOSPC) { | ||
1142 | spin_unlock_bh(&target->rx_lock); | ||
1143 | return 0; | ||
1144 | } | ||
1145 | |||
1146 | if (status) | ||
1147 | break; | ||
1148 | } | ||
1149 | |||
1150 | spin_unlock_bh(&target->rx_lock); | ||
1151 | |||
1152 | if (status) { | ||
1153 | list_for_each_entry_safe(packet, tmp_pkt, queue, list) { | ||
1154 | list_del(&packet->list); | ||
1155 | htc_reclaim_rxbuf(target, packet, | ||
1156 | &target->endpoint[packet->endpoint]); | ||
1157 | } | ||
1158 | } | ||
1159 | |||
1160 | return status; | ||
1161 | } | ||
1162 | |||
1163 | static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) | ||
1164 | { | ||
1165 | if (packets->endpoint != ENDPOINT_0) { | ||
1166 | WARN_ON(1); | ||
1167 | return; | ||
1168 | } | ||
1169 | |||
1170 | if (packets->status == -ECANCELED) { | ||
1171 | reclaim_rx_ctrl_buf(context, packets); | ||
1172 | return; | ||
1173 | } | ||
1174 | |||
1175 | if (packets->act_len > 0) { | ||
1176 | ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", | ||
1177 | packets->act_len + HTC_HDR_LENGTH); | ||
1178 | |||
1179 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, | ||
1180 | "Unexpected ENDPOINT 0 Message", | ||
1181 | packets->buf - HTC_HDR_LENGTH, | ||
1182 | packets->act_len + HTC_HDR_LENGTH); | ||
1183 | } | ||
1184 | |||
1185 | htc_reclaim_rxbuf(context, packets, &context->endpoint[0]); | ||
1186 | } | ||
1187 | |||
1188 | static void htc_proc_cred_rpt(struct htc_target *target, | ||
1189 | struct htc_credit_report *rpt, | ||
1190 | int n_entries, | ||
1191 | enum htc_endpoint_id from_ep) | ||
1192 | { | ||
1193 | struct htc_endpoint *endpoint; | ||
1194 | int tot_credits = 0, i; | ||
1195 | bool dist = false; | ||
1196 | |||
1197 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
1198 | "htc_proc_cred_rpt, credit report entries:%d\n", n_entries); | ||
1199 | |||
1200 | spin_lock_bh(&target->tx_lock); | ||
1201 | |||
1202 | for (i = 0; i < n_entries; i++, rpt++) { | ||
1203 | if (rpt->eid >= ENDPOINT_MAX) { | ||
1204 | WARN_ON(1); | ||
1205 | spin_unlock_bh(&target->tx_lock); | ||
1206 | return; | ||
1207 | } | ||
1208 | |||
1209 | endpoint = &target->endpoint[rpt->eid]; | ||
1210 | |||
1211 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n", | ||
1212 | rpt->eid, rpt->credits); | ||
1213 | |||
1214 | endpoint->ep_st.tx_cred_rpt += 1; | ||
1215 | endpoint->ep_st.cred_retnd += rpt->credits; | ||
1216 | |||
1217 | if (from_ep == rpt->eid) { | ||
1218 | /* | ||
1219 | * This credit report arrived on the same endpoint | ||
1220 | * indicating it arrived in an RX packet. | ||
1221 | */ | ||
1222 | endpoint->ep_st.cred_from_rx += rpt->credits; | ||
1223 | endpoint->ep_st.cred_rpt_from_rx += 1; | ||
1224 | } else if (from_ep == ENDPOINT_0) { | ||
1225 | /* credit arrived on endpoint 0 as a NULL message */ | ||
1226 | endpoint->ep_st.cred_from_ep0 += rpt->credits; | ||
1227 | endpoint->ep_st.cred_rpt_ep0 += 1; | ||
1228 | } else { | ||
1229 | endpoint->ep_st.cred_from_other += rpt->credits; | ||
1230 | endpoint->ep_st.cred_rpt_from_other += 1; | ||
1231 | } | ||
1232 | |||
1233 | if (ENDPOINT_0 == rpt->eid) | ||
1234 | /* always give endpoint 0 credits back */ | ||
1235 | endpoint->cred_dist.credits += rpt->credits; | ||
1236 | else { | ||
1237 | endpoint->cred_dist.cred_to_dist += rpt->credits; | ||
1238 | dist = true; | ||
1239 | } | ||
1240 | |||
1241 | /* | ||
1242 | * Refresh tx depth for distribution function that will | ||
1243 | * recover these credits NOTE: this is only valid when | ||
1244 | * there are credits to recover! | ||
1245 | */ | ||
1246 | endpoint->cred_dist.txq_depth = | ||
1247 | get_queue_depth(&endpoint->txq); | ||
1248 | |||
1249 | tot_credits += rpt->credits; | ||
1250 | } | ||
1251 | |||
1252 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, | ||
1253 | "report indicated %d credits to distribute\n", | ||
1254 | tot_credits); | ||
1255 | |||
1256 | if (dist) { | ||
1257 | /* | ||
1258 | * This was a credit return based on a completed send | ||
1259 | * operations note, this is done with the lock held | ||
1260 | */ | ||
1261 | ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", | ||
1262 | target->cred_dist_cntxt, &target->cred_dist_list); | ||
1263 | |||
1264 | ath6k_credit_distribute(target->cred_dist_cntxt, | ||
1265 | &target->cred_dist_list, | ||
1266 | HTC_CREDIT_DIST_SEND_COMPLETE); | ||
1267 | } | ||
1268 | |||
1269 | spin_unlock_bh(&target->tx_lock); | ||
1270 | |||
1271 | if (tot_credits) | ||
1272 | htc_chk_ep_txq(target); | ||
1273 | } | ||
1274 | |||
1275 | static int htc_parse_trailer(struct htc_target *target, | ||
1276 | struct htc_record_hdr *record, | ||
1277 | u8 *record_buf, u32 *next_lk_ahds, | ||
1278 | enum htc_endpoint_id endpoint, | ||
1279 | int *n_lk_ahds) | ||
1280 | { | ||
1281 | struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt; | ||
1282 | struct htc_lookahead_report *lk_ahd; | ||
1283 | int len; | ||
1284 | |||
1285 | switch (record->rec_id) { | ||
1286 | case HTC_RECORD_CREDITS: | ||
1287 | len = record->len / sizeof(struct htc_credit_report); | ||
1288 | if (!len) { | ||
1289 | WARN_ON(1); | ||
1290 | return -EINVAL; | ||
1291 | } | ||
1292 | |||
1293 | htc_proc_cred_rpt(target, | ||
1294 | (struct htc_credit_report *) record_buf, | ||
1295 | len, endpoint); | ||
1296 | break; | ||
1297 | case HTC_RECORD_LOOKAHEAD: | ||
1298 | len = record->len / sizeof(*lk_ahd); | ||
1299 | if (!len) { | ||
1300 | WARN_ON(1); | ||
1301 | return -EINVAL; | ||
1302 | } | ||
1303 | |||
1304 | lk_ahd = (struct htc_lookahead_report *) record_buf; | ||
1305 | if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) | ||
1306 | && next_lk_ahds) { | ||
1307 | |||
1308 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1309 | "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n", | ||
1310 | lk_ahd->pre_valid, lk_ahd->post_valid); | ||
1311 | |||
1312 | /* look ahead bytes are valid, copy them over */ | ||
1313 | memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); | ||
1314 | |||
1315 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead", | ||
1316 | next_lk_ahds, 4); | ||
1317 | |||
1318 | *n_lk_ahds = 1; | ||
1319 | } | ||
1320 | break; | ||
1321 | case HTC_RECORD_LOOKAHEAD_BUNDLE: | ||
1322 | len = record->len / sizeof(*bundle_lkahd_rpt); | ||
1323 | if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) { | ||
1324 | WARN_ON(1); | ||
1325 | return -EINVAL; | ||
1326 | } | ||
1327 | |||
1328 | if (next_lk_ahds) { | ||
1329 | int i; | ||
1330 | |||
1331 | bundle_lkahd_rpt = | ||
1332 | (struct htc_bundle_lkahd_rpt *) record_buf; | ||
1333 | |||
1334 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd", | ||
1335 | record_buf, record->len); | ||
1336 | |||
1337 | for (i = 0; i < len; i++) { | ||
1338 | memcpy((u8 *)&next_lk_ahds[i], | ||
1339 | bundle_lkahd_rpt->lk_ahd, 4); | ||
1340 | bundle_lkahd_rpt++; | ||
1341 | } | ||
1342 | |||
1343 | *n_lk_ahds = i; | ||
1344 | } | ||
1345 | break; | ||
1346 | default: | ||
1347 | ath6kl_err("unhandled record: id:%d len:%d\n", | ||
1348 | record->rec_id, record->len); | ||
1349 | break; | ||
1350 | } | ||
1351 | |||
1352 | return 0; | ||
1353 | |||
1354 | } | ||
1355 | |||
1356 | static int htc_proc_trailer(struct htc_target *target, | ||
1357 | u8 *buf, int len, u32 *next_lk_ahds, | ||
1358 | int *n_lk_ahds, enum htc_endpoint_id endpoint) | ||
1359 | { | ||
1360 | struct htc_record_hdr *record; | ||
1361 | int orig_len; | ||
1362 | int status; | ||
1363 | u8 *record_buf; | ||
1364 | u8 *orig_buf; | ||
1365 | |||
1366 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len); | ||
1367 | |||
1368 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len); | ||
1369 | |||
1370 | orig_buf = buf; | ||
1371 | orig_len = len; | ||
1372 | status = 0; | ||
1373 | |||
1374 | while (len > 0) { | ||
1375 | |||
1376 | if (len < sizeof(struct htc_record_hdr)) { | ||
1377 | status = -ENOMEM; | ||
1378 | break; | ||
1379 | } | ||
1380 | /* these are byte aligned structs */ | ||
1381 | record = (struct htc_record_hdr *) buf; | ||
1382 | len -= sizeof(struct htc_record_hdr); | ||
1383 | buf += sizeof(struct htc_record_hdr); | ||
1384 | |||
1385 | if (record->len > len) { | ||
1386 | ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n", | ||
1387 | record->len, record->rec_id, len); | ||
1388 | status = -ENOMEM; | ||
1389 | break; | ||
1390 | } | ||
1391 | record_buf = buf; | ||
1392 | |||
1393 | status = htc_parse_trailer(target, record, record_buf, | ||
1394 | next_lk_ahds, endpoint, n_lk_ahds); | ||
1395 | |||
1396 | if (status) | ||
1397 | break; | ||
1398 | |||
1399 | /* advance buffer past this record for next time around */ | ||
1400 | buf += record->len; | ||
1401 | len -= record->len; | ||
1402 | } | ||
1403 | |||
1404 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer", | ||
1405 | orig_buf, orig_len); | ||
1406 | |||
1407 | return status; | ||
1408 | } | ||
1409 | |||
1410 | static int htc_proc_rxhdr(struct htc_target *target, | ||
1411 | struct htc_packet *packet, | ||
1412 | u32 *next_lkahds, int *n_lkahds) | ||
1413 | { | ||
1414 | int status = 0; | ||
1415 | u16 payload_len; | ||
1416 | u32 lk_ahd; | ||
1417 | struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf; | ||
1418 | |||
1419 | if (n_lkahds != NULL) | ||
1420 | *n_lkahds = 0; | ||
1421 | |||
1422 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf, | ||
1423 | packet->act_len); | ||
1424 | |||
1425 | /* | ||
1426 | * NOTE: we cannot assume the alignment of buf, so we use the safe | ||
1427 | * macros to retrieve 16 bit fields. | ||
1428 | */ | ||
1429 | payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); | ||
1430 | |||
1431 | memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd)); | ||
1432 | |||
1433 | if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) { | ||
1434 | /* | ||
1435 | * Refresh the expected header and the actual length as it | ||
1436 | * was unknown when this packet was grabbed as part of the | ||
1437 | * bundle. | ||
1438 | */ | ||
1439 | packet->info.rx.exp_hdr = lk_ahd; | ||
1440 | packet->act_len = payload_len + HTC_HDR_LENGTH; | ||
1441 | |||
1442 | /* validate the actual header that was refreshed */ | ||
1443 | if (packet->act_len > packet->buf_len) { | ||
1444 | ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n", | ||
1445 | payload_len, lk_ahd); | ||
1446 | /* | ||
1447 | * Limit this to max buffer just to print out some | ||
1448 | * of the buffer. | ||
1449 | */ | ||
1450 | packet->act_len = min(packet->act_len, packet->buf_len); | ||
1451 | status = -ENOMEM; | ||
1452 | goto fail_rx; | ||
1453 | } | ||
1454 | |||
1455 | if (packet->endpoint != htc_hdr->eid) { | ||
1456 | ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n", | ||
1457 | htc_hdr->eid, packet->endpoint); | ||
1458 | status = -ENOMEM; | ||
1459 | goto fail_rx; | ||
1460 | } | ||
1461 | } | ||
1462 | |||
1463 | if (lk_ahd != packet->info.rx.exp_hdr) { | ||
1464 | ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", | ||
1465 | packet, packet->info.rx.rx_flags); | ||
1466 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd", | ||
1467 | &packet->info.rx.exp_hdr, 4); | ||
1468 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header", | ||
1469 | (u8 *)&lk_ahd, sizeof(lk_ahd)); | ||
1470 | status = -ENOMEM; | ||
1471 | goto fail_rx; | ||
1472 | } | ||
1473 | |||
1474 | if (htc_hdr->flags & HTC_FLG_RX_TRAILER) { | ||
1475 | if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) || | ||
1476 | htc_hdr->ctrl[0] > payload_len) { | ||
1477 | ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n", | ||
1478 | payload_len, htc_hdr->ctrl[0]); | ||
1479 | status = -ENOMEM; | ||
1480 | goto fail_rx; | ||
1481 | } | ||
1482 | |||
1483 | if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { | ||
1484 | next_lkahds = NULL; | ||
1485 | n_lkahds = NULL; | ||
1486 | } | ||
1487 | |||
1488 | status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH | ||
1489 | + payload_len - htc_hdr->ctrl[0], | ||
1490 | htc_hdr->ctrl[0], next_lkahds, | ||
1491 | n_lkahds, packet->endpoint); | ||
1492 | |||
1493 | if (status) | ||
1494 | goto fail_rx; | ||
1495 | |||
1496 | packet->act_len -= htc_hdr->ctrl[0]; | ||
1497 | } | ||
1498 | |||
1499 | packet->buf += HTC_HDR_LENGTH; | ||
1500 | packet->act_len -= HTC_HDR_LENGTH; | ||
1501 | |||
1502 | fail_rx: | ||
1503 | if (status) | ||
1504 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT", | ||
1505 | packet->buf, | ||
1506 | packet->act_len < 256 ? packet->act_len : 256); | ||
1507 | else { | ||
1508 | if (packet->act_len > 0) | ||
1509 | ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, | ||
1510 | "HTC - Application Msg", | ||
1511 | packet->buf, packet->act_len); | ||
1512 | } | ||
1513 | |||
1514 | return status; | ||
1515 | } | ||
1516 | |||
1517 | static void do_rx_completion(struct htc_endpoint *endpoint, | ||
1518 | struct htc_packet *packet) | ||
1519 | { | ||
1520 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1521 | "htc calling ep %d recv callback on packet 0x%p\n", | ||
1522 | endpoint->eid, packet); | ||
1523 | endpoint->ep_cb.rx(endpoint->target, packet); | ||
1524 | } | ||
1525 | |||
1526 | static int htc_issue_rxpkt_bundle(struct htc_target *target, | ||
1527 | struct list_head *rxq, | ||
1528 | struct list_head *sync_compq, | ||
1529 | int *n_pkt_fetched, bool part_bundle) | ||
1530 | { | ||
1531 | struct hif_scatter_req *scat_req; | ||
1532 | struct htc_packet *packet; | ||
1533 | int rem_space = target->dev->max_rx_bndl_sz; | ||
1534 | int n_scat_pkt, status = 0, i, len; | ||
1535 | |||
1536 | n_scat_pkt = get_queue_depth(rxq); | ||
1537 | n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max); | ||
1538 | |||
1539 | if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { | ||
1540 | /* | ||
1541 | * We were forced to split this bundle receive operation | ||
1542 | * all packets in this partial bundle must have their | ||
1543 | * lookaheads ignored. | ||
1544 | */ | ||
1545 | part_bundle = true; | ||
1546 | |||
1547 | /* | ||
1548 | * This would only happen if the target ignored our max | ||
1549 | * bundle limit. | ||
1550 | */ | ||
1551 | ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n", | ||
1552 | get_queue_depth(rxq), n_scat_pkt); | ||
1553 | } | ||
1554 | |||
1555 | len = 0; | ||
1556 | |||
1557 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1558 | "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n", | ||
1559 | get_queue_depth(rxq), n_scat_pkt); | ||
1560 | |||
1561 | scat_req = hif_scatter_req_get(target->dev->ar); | ||
1562 | |||
1563 | if (scat_req == NULL) | ||
1564 | goto fail_rx_pkt; | ||
1565 | |||
1566 | scat_req->flags = 0; | ||
1567 | |||
1568 | if (part_bundle) | ||
1569 | scat_req->flags |= | ||
1570 | HTC_SCAT_REQ_FLG_PART_BNDL; | ||
1571 | |||
1572 | for (i = 0; i < n_scat_pkt; i++) { | ||
1573 | int pad_len; | ||
1574 | |||
1575 | packet = list_first_entry(rxq, struct htc_packet, list); | ||
1576 | list_del(&packet->list); | ||
1577 | |||
1578 | pad_len = CALC_TXRX_PADDED_LEN(target->dev, | ||
1579 | packet->act_len); | ||
1580 | |||
1581 | if ((rem_space - pad_len) < 0) { | ||
1582 | list_add(&packet->list, rxq); | ||
1583 | break; | ||
1584 | } | ||
1585 | |||
1586 | rem_space -= pad_len; | ||
1587 | |||
1588 | if (part_bundle || (i < (n_scat_pkt - 1))) | ||
1589 | /* | ||
1590 | * Packet 0..n-1 cannot be checked for look-aheads | ||
1591 | * since we are fetching a bundle the last packet | ||
1592 | * however can have it's lookahead used | ||
1593 | */ | ||
1594 | packet->info.rx.rx_flags |= | ||
1595 | HTC_RX_PKT_IGNORE_LOOKAHEAD; | ||
1596 | |||
1597 | /* NOTE: 1 HTC packet per scatter entry */ | ||
1598 | scat_req->scat_list[i].buf = packet->buf; | ||
1599 | scat_req->scat_list[i].len = pad_len; | ||
1600 | |||
1601 | packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE; | ||
1602 | |||
1603 | list_add_tail(&packet->list, sync_compq); | ||
1604 | |||
1605 | WARN_ON(!scat_req->scat_list[i].len); | ||
1606 | len += scat_req->scat_list[i].len; | ||
1607 | } | ||
1608 | |||
1609 | scat_req->len = len; | ||
1610 | scat_req->scat_entries = i; | ||
1611 | |||
1612 | status = ath6kldev_submit_scat_req(target->dev, scat_req, true); | ||
1613 | |||
1614 | if (!status) | ||
1615 | *n_pkt_fetched = i; | ||
1616 | |||
1617 | /* free scatter request */ | ||
1618 | hif_scatter_req_add(target->dev->ar, scat_req); | ||
1619 | |||
1620 | fail_rx_pkt: | ||
1621 | |||
1622 | return status; | ||
1623 | } | ||
1624 | |||
1625 | static int htc_proc_fetched_rxpkts(struct htc_target *target, | ||
1626 | struct list_head *comp_pktq, u32 lk_ahds[], | ||
1627 | int *n_lk_ahd) | ||
1628 | { | ||
1629 | struct htc_packet *packet, *tmp_pkt; | ||
1630 | struct htc_endpoint *ep; | ||
1631 | int status = 0; | ||
1632 | |||
1633 | list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { | ||
1634 | list_del(&packet->list); | ||
1635 | ep = &target->endpoint[packet->endpoint]; | ||
1636 | |||
1637 | /* process header for each of the recv packet */ | ||
1638 | status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd); | ||
1639 | if (status) | ||
1640 | return status; | ||
1641 | |||
1642 | if (list_empty(comp_pktq)) { | ||
1643 | /* | ||
1644 | * Last packet's more packet flag is set | ||
1645 | * based on the lookahead. | ||
1646 | */ | ||
1647 | if (*n_lk_ahd > 0) | ||
1648 | set_rxpkt_indication_flag(lk_ahds[0], | ||
1649 | ep, packet); | ||
1650 | } else | ||
1651 | /* | ||
1652 | * Packets in a bundle automatically have | ||
1653 | * this flag set. | ||
1654 | */ | ||
1655 | packet->info.rx.indicat_flags |= | ||
1656 | HTC_RX_FLAGS_INDICATE_MORE_PKTS; | ||
1657 | |||
1658 | htc_update_rx_stats(ep, *n_lk_ahd); | ||
1659 | |||
1660 | if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE) | ||
1661 | ep->ep_st.rx_bundl += 1; | ||
1662 | |||
1663 | do_rx_completion(ep, packet); | ||
1664 | } | ||
1665 | |||
1666 | return status; | ||
1667 | } | ||
1668 | |||
1669 | static int htc_fetch_rxpkts(struct htc_target *target, | ||
1670 | struct list_head *rx_pktq, | ||
1671 | struct list_head *comp_pktq) | ||
1672 | { | ||
1673 | int fetched_pkts; | ||
1674 | bool part_bundle = false; | ||
1675 | int status = 0; | ||
1676 | |||
1677 | /* now go fetch the list of HTC packets */ | ||
1678 | while (!list_empty(rx_pktq)) { | ||
1679 | fetched_pkts = 0; | ||
1680 | |||
1681 | if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { | ||
1682 | /* | ||
1683 | * There are enough packets to attempt a | ||
1684 | * bundle transfer and recv bundling is | ||
1685 | * allowed. | ||
1686 | */ | ||
1687 | status = htc_issue_rxpkt_bundle(target, rx_pktq, | ||
1688 | comp_pktq, | ||
1689 | &fetched_pkts, | ||
1690 | part_bundle); | ||
1691 | if (status) | ||
1692 | return status; | ||
1693 | |||
1694 | if (!list_empty(rx_pktq)) | ||
1695 | part_bundle = true; | ||
1696 | } | ||
1697 | |||
1698 | if (!fetched_pkts) { | ||
1699 | struct htc_packet *packet; | ||
1700 | |||
1701 | packet = list_first_entry(rx_pktq, struct htc_packet, | ||
1702 | list); | ||
1703 | |||
1704 | list_del(&packet->list); | ||
1705 | |||
1706 | /* fully synchronous */ | ||
1707 | packet->completion = NULL; | ||
1708 | |||
1709 | if (!list_empty(rx_pktq)) | ||
1710 | /* | ||
1711 | * look_aheads in all packet | ||
1712 | * except the last one in the | ||
1713 | * bundle must be ignored | ||
1714 | */ | ||
1715 | packet->info.rx.rx_flags |= | ||
1716 | HTC_RX_PKT_IGNORE_LOOKAHEAD; | ||
1717 | |||
1718 | /* go fetch the packet */ | ||
1719 | status = dev_rx_pkt(target, packet, packet->act_len); | ||
1720 | if (status) | ||
1721 | return status; | ||
1722 | |||
1723 | list_add_tail(&packet->list, comp_pktq); | ||
1724 | } | ||
1725 | } | ||
1726 | |||
1727 | return status; | ||
1728 | } | ||
1729 | |||
1730 | static int htc_rxmsg_pending_handler(struct htc_target *target, | ||
1731 | u32 msg_look_ahead[], | ||
1732 | int *num_pkts) | ||
1733 | { | ||
1734 | struct htc_packet *packets, *tmp_pkt; | ||
1735 | struct htc_endpoint *endpoint; | ||
1736 | struct list_head rx_pktq, comp_pktq; | ||
1737 | int status = 0; | ||
1738 | u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE]; | ||
1739 | int num_look_ahead = 1; | ||
1740 | enum htc_endpoint_id id; | ||
1741 | int n_fetched = 0; | ||
1742 | |||
1743 | *num_pkts = 0; | ||
1744 | |||
1745 | /* | ||
1746 | * On first entry copy the look_aheads into our temp array for | ||
1747 | * processing | ||
1748 | */ | ||
1749 | memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads)); | ||
1750 | |||
1751 | while (true) { | ||
1752 | |||
1753 | /* | ||
1754 | * First lookahead sets the expected endpoint IDs for all | ||
1755 | * packets in a bundle. | ||
1756 | */ | ||
1757 | id = ((struct htc_frame_hdr *)&look_aheads[0])->eid; | ||
1758 | endpoint = &target->endpoint[id]; | ||
1759 | |||
1760 | if (id >= ENDPOINT_MAX) { | ||
1761 | ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n", | ||
1762 | id); | ||
1763 | status = -ENOMEM; | ||
1764 | break; | ||
1765 | } | ||
1766 | |||
1767 | INIT_LIST_HEAD(&rx_pktq); | ||
1768 | INIT_LIST_HEAD(&comp_pktq); | ||
1769 | |||
1770 | /* | ||
1771 | * Try to allocate as many HTC RX packets indicated by the | ||
1772 | * look_aheads. | ||
1773 | */ | ||
1774 | status = alloc_and_prep_rxpkts(target, look_aheads, | ||
1775 | num_look_ahead, endpoint, | ||
1776 | &rx_pktq); | ||
1777 | if (status) | ||
1778 | break; | ||
1779 | |||
1780 | if (get_queue_depth(&rx_pktq) >= 2) | ||
1781 | /* | ||
1782 | * A recv bundle was detected, force IRQ status | ||
1783 | * re-check again | ||
1784 | */ | ||
1785 | target->dev->chk_irq_status_cnt = 1; | ||
1786 | |||
1787 | n_fetched += get_queue_depth(&rx_pktq); | ||
1788 | |||
1789 | num_look_ahead = 0; | ||
1790 | |||
1791 | status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq); | ||
1792 | |||
1793 | if (!status) | ||
1794 | chk_rx_water_mark(endpoint); | ||
1795 | |||
1796 | /* Process fetched packets */ | ||
1797 | status = htc_proc_fetched_rxpkts(target, &comp_pktq, | ||
1798 | look_aheads, &num_look_ahead); | ||
1799 | |||
1800 | if (!num_look_ahead || status) | ||
1801 | break; | ||
1802 | |||
1803 | /* | ||
1804 | * For SYNCH processing, if we get here, we are running | ||
1805 | * through the loop again due to a detected lookahead. Set | ||
1806 | * flag that we should re-check IRQ status registers again | ||
1807 | * before leaving IRQ processing, this can net better | ||
1808 | * performance in high throughput situations. | ||
1809 | */ | ||
1810 | target->dev->chk_irq_status_cnt = 1; | ||
1811 | } | ||
1812 | |||
1813 | if (status) { | ||
1814 | ath6kl_err("failed to get pending recv messages: %d\n", | ||
1815 | status); | ||
1816 | /* | ||
1817 | * Cleanup any packets we allocated but didn't use to | ||
1818 | * actually fetch any packets. | ||
1819 | */ | ||
1820 | list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) { | ||
1821 | list_del(&packets->list); | ||
1822 | htc_reclaim_rxbuf(target, packets, | ||
1823 | &target->endpoint[packets->endpoint]); | ||
1824 | } | ||
1825 | |||
1826 | /* cleanup any packets in sync completion queue */ | ||
1827 | list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { | ||
1828 | list_del(&packets->list); | ||
1829 | htc_reclaim_rxbuf(target, packets, | ||
1830 | &target->endpoint[packets->endpoint]); | ||
1831 | } | ||
1832 | |||
1833 | if (target->htc_flags & HTC_OP_STATE_STOPPING) { | ||
1834 | ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); | ||
1835 | ath6kldev_rx_control(target->dev, false); | ||
1836 | } | ||
1837 | } | ||
1838 | |||
1839 | /* | ||
1840 | * Before leaving, check to see if host ran out of buffers and | ||
1841 | * needs to stop the receiver. | ||
1842 | */ | ||
1843 | if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { | ||
1844 | ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); | ||
1845 | ath6kldev_rx_control(target->dev, false); | ||
1846 | } | ||
1847 | *num_pkts = n_fetched; | ||
1848 | |||
1849 | return status; | ||
1850 | } | ||
1851 | |||
1852 | /* | ||
1853 | * Synchronously wait for a control message from the target, | ||
1854 | * This function is used at initialization time ONLY. At init messages | ||
1855 | * on ENDPOINT 0 are expected. | ||
1856 | */ | ||
1857 | static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) | ||
1858 | { | ||
1859 | struct htc_packet *packet = NULL; | ||
1860 | struct htc_frame_hdr *htc_hdr; | ||
1861 | u32 look_ahead; | ||
1862 | |||
1863 | if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead, | ||
1864 | HTC_TARGET_RESPONSE_TIMEOUT)) | ||
1865 | return NULL; | ||
1866 | |||
1867 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1868 | "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead); | ||
1869 | |||
1870 | htc_hdr = (struct htc_frame_hdr *)&look_ahead; | ||
1871 | |||
1872 | if (htc_hdr->eid != ENDPOINT_0) | ||
1873 | return NULL; | ||
1874 | |||
1875 | packet = htc_get_control_buf(target, false); | ||
1876 | |||
1877 | if (!packet) | ||
1878 | return NULL; | ||
1879 | |||
1880 | packet->info.rx.rx_flags = 0; | ||
1881 | packet->info.rx.exp_hdr = look_ahead; | ||
1882 | packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH; | ||
1883 | |||
1884 | if (packet->act_len > packet->buf_len) | ||
1885 | goto fail_ctrl_rx; | ||
1886 | |||
1887 | /* we want synchronous operation */ | ||
1888 | packet->completion = NULL; | ||
1889 | |||
1890 | /* get the message from the device, this will block */ | ||
1891 | if (dev_rx_pkt(target, packet, packet->act_len)) | ||
1892 | goto fail_ctrl_rx; | ||
1893 | |||
1894 | /* process receive header */ | ||
1895 | packet->status = htc_proc_rxhdr(target, packet, NULL, NULL); | ||
1896 | |||
1897 | if (packet->status) { | ||
1898 | ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n", | ||
1899 | packet->status); | ||
1900 | goto fail_ctrl_rx; | ||
1901 | } | ||
1902 | |||
1903 | return packet; | ||
1904 | |||
1905 | fail_ctrl_rx: | ||
1906 | if (packet != NULL) { | ||
1907 | htc_rxpkt_reset(packet); | ||
1908 | reclaim_rx_ctrl_buf(target, packet); | ||
1909 | } | ||
1910 | |||
1911 | return NULL; | ||
1912 | } | ||
1913 | |||
1914 | int htc_add_rxbuf_multiple(struct htc_target *target, | ||
1915 | struct list_head *pkt_queue) | ||
1916 | { | ||
1917 | struct htc_endpoint *endpoint; | ||
1918 | struct htc_packet *first_pkt; | ||
1919 | bool rx_unblock = false; | ||
1920 | int status = 0, depth; | ||
1921 | |||
1922 | if (list_empty(pkt_queue)) | ||
1923 | return -ENOMEM; | ||
1924 | |||
1925 | first_pkt = list_first_entry(pkt_queue, struct htc_packet, list); | ||
1926 | |||
1927 | if (first_pkt->endpoint >= ENDPOINT_MAX) | ||
1928 | return status; | ||
1929 | |||
1930 | depth = get_queue_depth(pkt_queue); | ||
1931 | |||
1932 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1933 | "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n", | ||
1934 | first_pkt->endpoint, depth, first_pkt->buf_len); | ||
1935 | |||
1936 | endpoint = &target->endpoint[first_pkt->endpoint]; | ||
1937 | |||
1938 | if (target->htc_flags & HTC_OP_STATE_STOPPING) { | ||
1939 | struct htc_packet *packet, *tmp_pkt; | ||
1940 | |||
1941 | /* walk through queue and mark each one canceled */ | ||
1942 | list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { | ||
1943 | packet->status = -ECANCELED; | ||
1944 | list_del(&packet->list); | ||
1945 | do_rx_completion(endpoint, packet); | ||
1946 | } | ||
1947 | |||
1948 | return status; | ||
1949 | } | ||
1950 | |||
1951 | spin_lock_bh(&target->rx_lock); | ||
1952 | |||
1953 | list_splice_tail_init(pkt_queue, &endpoint->rx_bufq); | ||
1954 | |||
1955 | /* check if we are blocked waiting for a new buffer */ | ||
1956 | if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { | ||
1957 | if (target->ep_waiting == first_pkt->endpoint) { | ||
1958 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1959 | "receiver was blocked on ep:%d, unblocking.\n", | ||
1960 | target->ep_waiting); | ||
1961 | target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; | ||
1962 | target->ep_waiting = ENDPOINT_MAX; | ||
1963 | rx_unblock = true; | ||
1964 | } | ||
1965 | } | ||
1966 | |||
1967 | spin_unlock_bh(&target->rx_lock); | ||
1968 | |||
1969 | if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) | ||
1970 | /* TODO : implement a buffer threshold count? */ | ||
1971 | ath6kldev_rx_control(target->dev, true); | ||
1972 | |||
1973 | return status; | ||
1974 | } | ||
1975 | |||
1976 | void htc_flush_rx_buf(struct htc_target *target) | ||
1977 | { | ||
1978 | struct htc_endpoint *endpoint; | ||
1979 | struct htc_packet *packet, *tmp_pkt; | ||
1980 | int i; | ||
1981 | |||
1982 | for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { | ||
1983 | endpoint = &target->endpoint[i]; | ||
1984 | if (!endpoint->svc_id) | ||
1985 | /* not in use.. */ | ||
1986 | continue; | ||
1987 | |||
1988 | spin_lock_bh(&target->rx_lock); | ||
1989 | list_for_each_entry_safe(packet, tmp_pkt, | ||
1990 | &endpoint->rx_bufq, list) { | ||
1991 | list_del(&packet->list); | ||
1992 | spin_unlock_bh(&target->rx_lock); | ||
1993 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
1994 | "flushing rx pkt:0x%p, len:%d, ep:%d\n", | ||
1995 | packet, packet->buf_len, | ||
1996 | packet->endpoint); | ||
1997 | dev_kfree_skb(packet->pkt_cntxt); | ||
1998 | spin_lock_bh(&target->rx_lock); | ||
1999 | } | ||
2000 | spin_unlock_bh(&target->rx_lock); | ||
2001 | } | ||
2002 | } | ||
2003 | |||
2004 | int htc_conn_service(struct htc_target *target, | ||
2005 | struct htc_service_connect_req *conn_req, | ||
2006 | struct htc_service_connect_resp *conn_resp) | ||
2007 | { | ||
2008 | struct htc_packet *rx_pkt = NULL; | ||
2009 | struct htc_packet *tx_pkt = NULL; | ||
2010 | struct htc_conn_service_resp *resp_msg; | ||
2011 | struct htc_conn_service_msg *conn_msg; | ||
2012 | struct htc_endpoint *endpoint; | ||
2013 | enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; | ||
2014 | unsigned int max_msg_sz = 0; | ||
2015 | int status = 0; | ||
2016 | |||
2017 | ath6kl_dbg(ATH6KL_DBG_TRC, | ||
2018 | "htc_conn_service, target:0x%p service id:0x%X\n", | ||
2019 | target, conn_req->svc_id); | ||
2020 | |||
2021 | if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { | ||
2022 | /* special case for pseudo control service */ | ||
2023 | assigned_ep = ENDPOINT_0; | ||
2024 | max_msg_sz = HTC_MAX_CTRL_MSG_LEN; | ||
2025 | } else { | ||
2026 | /* allocate a packet to send to the target */ | ||
2027 | tx_pkt = htc_get_control_buf(target, true); | ||
2028 | |||
2029 | if (!tx_pkt) | ||
2030 | return -ENOMEM; | ||
2031 | |||
2032 | conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf; | ||
2033 | memset(conn_msg, 0, sizeof(*conn_msg)); | ||
2034 | conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); | ||
2035 | conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); | ||
2036 | conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags); | ||
2037 | |||
2038 | set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg, | ||
2039 | sizeof(*conn_msg) + conn_msg->svc_meta_len, | ||
2040 | ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); | ||
2041 | |||
2042 | /* we want synchronous operation */ | ||
2043 | tx_pkt->completion = NULL; | ||
2044 | htc_prep_send_pkt(tx_pkt, 0, 0, 0); | ||
2045 | status = htc_issue_send(target, tx_pkt); | ||
2046 | |||
2047 | if (status) | ||
2048 | goto fail_tx; | ||
2049 | |||
2050 | /* wait for response */ | ||
2051 | rx_pkt = htc_wait_for_ctrl_msg(target); | ||
2052 | |||
2053 | if (!rx_pkt) { | ||
2054 | status = -ENOMEM; | ||
2055 | goto fail_tx; | ||
2056 | } | ||
2057 | |||
2058 | resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; | ||
2059 | |||
2060 | if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID) | ||
2061 | || (rx_pkt->act_len < sizeof(*resp_msg))) { | ||
2062 | status = -ENOMEM; | ||
2063 | goto fail_tx; | ||
2064 | } | ||
2065 | |||
2066 | conn_resp->resp_code = resp_msg->status; | ||
2067 | /* check response status */ | ||
2068 | if (resp_msg->status != HTC_SERVICE_SUCCESS) { | ||
2069 | ath6kl_err("target failed service 0x%X connect request (status:%d)\n", | ||
2070 | resp_msg->svc_id, resp_msg->status); | ||
2071 | status = -ENOMEM; | ||
2072 | goto fail_tx; | ||
2073 | } | ||
2074 | |||
2075 | assigned_ep = (enum htc_endpoint_id)resp_msg->eid; | ||
2076 | max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz); | ||
2077 | } | ||
2078 | |||
2079 | if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) { | ||
2080 | status = -ENOMEM; | ||
2081 | goto fail_tx; | ||
2082 | } | ||
2083 | |||
2084 | endpoint = &target->endpoint[assigned_ep]; | ||
2085 | endpoint->eid = assigned_ep; | ||
2086 | if (endpoint->svc_id) { | ||
2087 | status = -ENOMEM; | ||
2088 | goto fail_tx; | ||
2089 | } | ||
2090 | |||
2091 | /* return assigned endpoint to caller */ | ||
2092 | conn_resp->endpoint = assigned_ep; | ||
2093 | conn_resp->len_max = max_msg_sz; | ||
2094 | |||
2095 | /* setup the endpoint */ | ||
2096 | |||
2097 | /* this marks the endpoint in use */ | ||
2098 | endpoint->svc_id = conn_req->svc_id; | ||
2099 | |||
2100 | endpoint->max_txq_depth = conn_req->max_txq_depth; | ||
2101 | endpoint->len_max = max_msg_sz; | ||
2102 | endpoint->ep_cb = conn_req->ep_cb; | ||
2103 | endpoint->cred_dist.svc_id = conn_req->svc_id; | ||
2104 | endpoint->cred_dist.htc_rsvd = endpoint; | ||
2105 | endpoint->cred_dist.endpoint = assigned_ep; | ||
2106 | endpoint->cred_dist.cred_sz = target->tgt_cred_sz; | ||
2107 | |||
2108 | if (conn_req->max_rxmsg_sz) { | ||
2109 | /* | ||
2110 | * Override cred_per_msg calculation, this optimizes | ||
2111 | * the credit-low indications since the host will actually | ||
2112 | * issue smaller messages in the Send path. | ||
2113 | */ | ||
2114 | if (conn_req->max_rxmsg_sz > max_msg_sz) { | ||
2115 | status = -ENOMEM; | ||
2116 | goto fail_tx; | ||
2117 | } | ||
2118 | endpoint->cred_dist.cred_per_msg = | ||
2119 | conn_req->max_rxmsg_sz / target->tgt_cred_sz; | ||
2120 | } else | ||
2121 | endpoint->cred_dist.cred_per_msg = | ||
2122 | max_msg_sz / target->tgt_cred_sz; | ||
2123 | |||
2124 | if (!endpoint->cred_dist.cred_per_msg) | ||
2125 | endpoint->cred_dist.cred_per_msg = 1; | ||
2126 | |||
2127 | /* save local connection flags */ | ||
2128 | endpoint->conn_flags = conn_req->flags; | ||
2129 | |||
2130 | fail_tx: | ||
2131 | if (tx_pkt) | ||
2132 | htc_reclaim_txctrl_buf(target, tx_pkt); | ||
2133 | |||
2134 | if (rx_pkt) { | ||
2135 | htc_rxpkt_reset(rx_pkt); | ||
2136 | reclaim_rx_ctrl_buf(target, rx_pkt); | ||
2137 | } | ||
2138 | |||
2139 | return status; | ||
2140 | } | ||
2141 | |||
2142 | static void reset_ep_state(struct htc_target *target) | ||
2143 | { | ||
2144 | struct htc_endpoint *endpoint; | ||
2145 | int i; | ||
2146 | |||
2147 | for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { | ||
2148 | endpoint = &target->endpoint[i]; | ||
2149 | memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist)); | ||
2150 | endpoint->svc_id = 0; | ||
2151 | endpoint->len_max = 0; | ||
2152 | endpoint->max_txq_depth = 0; | ||
2153 | memset(&endpoint->ep_st, 0, | ||
2154 | sizeof(endpoint->ep_st)); | ||
2155 | INIT_LIST_HEAD(&endpoint->rx_bufq); | ||
2156 | INIT_LIST_HEAD(&endpoint->txq); | ||
2157 | endpoint->target = target; | ||
2158 | } | ||
2159 | |||
2160 | /* reset distribution list */ | ||
2161 | INIT_LIST_HEAD(&target->cred_dist_list); | ||
2162 | } | ||
2163 | |||
2164 | int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint) | ||
2165 | { | ||
2166 | int num; | ||
2167 | |||
2168 | spin_lock_bh(&target->rx_lock); | ||
2169 | num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); | ||
2170 | spin_unlock_bh(&target->rx_lock); | ||
2171 | return num; | ||
2172 | } | ||
2173 | |||
2174 | static void htc_setup_msg_bndl(struct htc_target *target) | ||
2175 | { | ||
2176 | struct hif_dev_scat_sup_info *scat_info = &target->dev->hif_scat_info; | ||
2177 | |||
2178 | /* limit what HTC can handle */ | ||
2179 | target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE, | ||
2180 | target->msg_per_bndl_max); | ||
2181 | |||
2182 | if (ath6kldev_setup_msg_bndl(target->dev, target->msg_per_bndl_max)) { | ||
2183 | target->msg_per_bndl_max = 0; | ||
2184 | return; | ||
2185 | } | ||
2186 | |||
2187 | /* limit bundle what the device layer can handle */ | ||
2188 | target->msg_per_bndl_max = min(scat_info->max_scat_entries, | ||
2189 | target->msg_per_bndl_max); | ||
2190 | |||
2191 | ath6kl_dbg(ATH6KL_DBG_TRC, | ||
2192 | "htc bundling allowed. max msg per htc bundle: %d\n", | ||
2193 | target->msg_per_bndl_max); | ||
2194 | |||
2195 | /* Max rx bundle size is limited by the max tx bundle size */ | ||
2196 | target->dev->max_rx_bndl_sz = scat_info->max_xfer_szper_scatreq; | ||
2197 | /* Max tx bundle size if limited by the extended mbox address range */ | ||
2198 | target->dev->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, | ||
2199 | scat_info->max_xfer_szper_scatreq); | ||
2200 | |||
2201 | ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n", | ||
2202 | target->dev->max_rx_bndl_sz, target->dev->max_tx_bndl_sz); | ||
2203 | |||
2204 | if (target->dev->max_tx_bndl_sz) | ||
2205 | target->tx_bndl_enable = true; | ||
2206 | |||
2207 | if (target->dev->max_rx_bndl_sz) | ||
2208 | target->rx_bndl_enable = true; | ||
2209 | |||
2210 | if ((target->tgt_cred_sz % target->dev->block_sz) != 0) { | ||
2211 | ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n", | ||
2212 | target->tgt_cred_sz); | ||
2213 | |||
2214 | /* | ||
2215 | * Disallow send bundling since the credit size is | ||
2216 | * not aligned to a block size the I/O block | ||
2217 | * padding will spill into the next credit buffer | ||
2218 | * which is fatal. | ||
2219 | */ | ||
2220 | target->tx_bndl_enable = false; | ||
2221 | } | ||
2222 | } | ||
2223 | |||
2224 | int htc_wait_target(struct htc_target *target) | ||
2225 | { | ||
2226 | struct htc_packet *packet = NULL; | ||
2227 | struct htc_ready_ext_msg *rdy_msg; | ||
2228 | struct htc_service_connect_req connect; | ||
2229 | struct htc_service_connect_resp resp; | ||
2230 | int status; | ||
2231 | |||
2232 | /* we should be getting 1 control message that the target is ready */ | ||
2233 | packet = htc_wait_for_ctrl_msg(target); | ||
2234 | |||
2235 | if (!packet) | ||
2236 | return -ENOMEM; | ||
2237 | |||
2238 | /* we controlled the buffer creation so it's properly aligned */ | ||
2239 | rdy_msg = (struct htc_ready_ext_msg *)packet->buf; | ||
2240 | |||
2241 | if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) || | ||
2242 | (packet->act_len < sizeof(struct htc_ready_msg))) { | ||
2243 | status = -ENOMEM; | ||
2244 | goto fail_wait_target; | ||
2245 | } | ||
2246 | |||
2247 | if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) { | ||
2248 | status = -ENOMEM; | ||
2249 | goto fail_wait_target; | ||
2250 | } | ||
2251 | |||
2252 | target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); | ||
2253 | target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); | ||
2254 | |||
2255 | ath6kl_dbg(ATH6KL_DBG_HTC_RECV, | ||
2256 | "target ready: credits: %d credit size: %d\n", | ||
2257 | target->tgt_creds, target->tgt_cred_sz); | ||
2258 | |||
2259 | /* check if this is an extended ready message */ | ||
2260 | if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) { | ||
2261 | /* this is an extended message */ | ||
2262 | target->htc_tgt_ver = rdy_msg->htc_ver; | ||
2263 | target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl; | ||
2264 | } else { | ||
2265 | /* legacy */ | ||
2266 | target->htc_tgt_ver = HTC_VERSION_2P0; | ||
2267 | target->msg_per_bndl_max = 0; | ||
2268 | } | ||
2269 | |||
2270 | ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n", | ||
2271 | (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", | ||
2272 | target->htc_tgt_ver); | ||
2273 | |||
2274 | if (target->msg_per_bndl_max > 0) | ||
2275 | htc_setup_msg_bndl(target); | ||
2276 | |||
2277 | /* setup our pseudo HTC control endpoint connection */ | ||
2278 | memset(&connect, 0, sizeof(connect)); | ||
2279 | memset(&resp, 0, sizeof(resp)); | ||
2280 | connect.ep_cb.rx = htc_ctrl_rx; | ||
2281 | connect.ep_cb.rx_refill = NULL; | ||
2282 | connect.ep_cb.tx_full = NULL; | ||
2283 | connect.max_txq_depth = NUM_CONTROL_BUFFERS; | ||
2284 | connect.svc_id = HTC_CTRL_RSVD_SVC; | ||
2285 | |||
2286 | /* connect fake service */ | ||
2287 | status = htc_conn_service((void *)target, &connect, &resp); | ||
2288 | |||
2289 | if (status) | ||
2290 | ath6kl_hif_cleanup_scatter(target->dev->ar); | ||
2291 | |||
2292 | fail_wait_target: | ||
2293 | if (packet) { | ||
2294 | htc_rxpkt_reset(packet); | ||
2295 | reclaim_rx_ctrl_buf(target, packet); | ||
2296 | } | ||
2297 | |||
2298 | return status; | ||
2299 | } | ||
2300 | |||
2301 | /* | ||
2302 | * Start HTC, enable interrupts and let the target know | ||
2303 | * host has finished setup. | ||
2304 | */ | ||
2305 | int htc_start(struct htc_target *target) | ||
2306 | { | ||
2307 | struct htc_packet *packet; | ||
2308 | int status; | ||
2309 | |||
2310 | /* Disable interrupts at the chip level */ | ||
2311 | ath6kldev_disable_intrs(target->dev); | ||
2312 | |||
2313 | target->htc_flags = 0; | ||
2314 | target->rx_st_flags = 0; | ||
2315 | |||
2316 | /* Push control receive buffers into htc control endpoint */ | ||
2317 | while ((packet = htc_get_control_buf(target, false)) != NULL) { | ||
2318 | status = htc_add_rxbuf(target, packet); | ||
2319 | if (status) | ||
2320 | return status; | ||
2321 | } | ||
2322 | |||
2323 | /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ | ||
2324 | ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list, | ||
2325 | target->tgt_creds); | ||
2326 | |||
2327 | dump_cred_dist_stats(target); | ||
2328 | |||
2329 | /* Indicate to the target of the setup completion */ | ||
2330 | status = htc_setup_tx_complete(target); | ||
2331 | |||
2332 | if (status) | ||
2333 | return status; | ||
2334 | |||
2335 | /* unmask interrupts */ | ||
2336 | status = ath6kldev_unmask_intrs(target->dev); | ||
2337 | |||
2338 | if (status) | ||
2339 | htc_stop(target); | ||
2340 | |||
2341 | return status; | ||
2342 | } | ||
2343 | |||
2344 | /* htc_stop: stop interrupt reception, and flush all queued buffers */ | ||
2345 | void htc_stop(struct htc_target *target) | ||
2346 | { | ||
2347 | spin_lock_bh(&target->htc_lock); | ||
2348 | target->htc_flags |= HTC_OP_STATE_STOPPING; | ||
2349 | spin_unlock_bh(&target->htc_lock); | ||
2350 | |||
2351 | /* | ||
2352 | * Masking interrupts is a synchronous operation, when this | ||
2353 | * function returns all pending HIF I/O has completed, we can | ||
2354 | * safely flush the queues. | ||
2355 | */ | ||
2356 | ath6kldev_mask_intrs(target->dev); | ||
2357 | |||
2358 | htc_flush_txep_all(target); | ||
2359 | |||
2360 | htc_flush_rx_buf(target); | ||
2361 | |||
2362 | reset_ep_state(target); | ||
2363 | } | ||
2364 | |||
2365 | void *htc_create(struct ath6kl *ar) | ||
2366 | { | ||
2367 | struct htc_target *target = NULL; | ||
2368 | struct htc_packet *packet; | ||
2369 | int status = 0, i = 0; | ||
2370 | u32 block_size, ctrl_bufsz; | ||
2371 | |||
2372 | target = kzalloc(sizeof(*target), GFP_KERNEL); | ||
2373 | if (!target) { | ||
2374 | ath6kl_err("unable to allocate memory\n"); | ||
2375 | return NULL; | ||
2376 | } | ||
2377 | |||
2378 | target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); | ||
2379 | if (!target->dev) { | ||
2380 | ath6kl_err("unable to allocate memory\n"); | ||
2381 | status = -ENOMEM; | ||
2382 | goto fail_create_htc; | ||
2383 | } | ||
2384 | |||
2385 | spin_lock_init(&target->htc_lock); | ||
2386 | spin_lock_init(&target->rx_lock); | ||
2387 | spin_lock_init(&target->tx_lock); | ||
2388 | |||
2389 | INIT_LIST_HEAD(&target->free_ctrl_txbuf); | ||
2390 | INIT_LIST_HEAD(&target->free_ctrl_rxbuf); | ||
2391 | INIT_LIST_HEAD(&target->cred_dist_list); | ||
2392 | |||
2393 | target->dev->ar = ar; | ||
2394 | target->dev->htc_cnxt = target; | ||
2395 | target->dev->msg_pending = htc_rxmsg_pending_handler; | ||
2396 | target->ep_waiting = ENDPOINT_MAX; | ||
2397 | |||
2398 | reset_ep_state(target); | ||
2399 | |||
2400 | status = ath6kldev_setup(target->dev); | ||
2401 | |||
2402 | if (status) | ||
2403 | goto fail_create_htc; | ||
2404 | |||
2405 | block_size = ar->mbox_info.block_size; | ||
2406 | |||
2407 | ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? | ||
2408 | (block_size + HTC_HDR_LENGTH) : | ||
2409 | (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); | ||
2410 | |||
2411 | for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { | ||
2412 | packet = kzalloc(sizeof(*packet), GFP_KERNEL); | ||
2413 | if (!packet) | ||
2414 | break; | ||
2415 | |||
2416 | packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); | ||
2417 | if (!packet->buf_start) { | ||
2418 | kfree(packet); | ||
2419 | break; | ||
2420 | } | ||
2421 | |||
2422 | packet->buf_len = ctrl_bufsz; | ||
2423 | if (i < NUM_CONTROL_RX_BUFFERS) { | ||
2424 | packet->act_len = 0; | ||
2425 | packet->buf = packet->buf_start; | ||
2426 | packet->endpoint = ENDPOINT_0; | ||
2427 | list_add_tail(&packet->list, &target->free_ctrl_rxbuf); | ||
2428 | } else | ||
2429 | list_add_tail(&packet->list, &target->free_ctrl_txbuf); | ||
2430 | } | ||
2431 | |||
2432 | fail_create_htc: | ||
2433 | if (i != NUM_CONTROL_BUFFERS || status) { | ||
2434 | if (target) { | ||
2435 | htc_cleanup(target); | ||
2436 | target = NULL; | ||
2437 | } | ||
2438 | } | ||
2439 | |||
2440 | return target; | ||
2441 | } | ||
2442 | |||
2443 | /* cleanup the HTC instance */ | ||
2444 | void htc_cleanup(struct htc_target *target) | ||
2445 | { | ||
2446 | struct htc_packet *packet, *tmp_packet; | ||
2447 | |||
2448 | ath6kl_hif_cleanup_scatter(target->dev->ar); | ||
2449 | |||
2450 | list_for_each_entry_safe(packet, tmp_packet, | ||
2451 | &target->free_ctrl_txbuf, list) { | ||
2452 | list_del(&packet->list); | ||
2453 | kfree(packet->buf_start); | ||
2454 | kfree(packet); | ||
2455 | } | ||
2456 | |||
2457 | list_for_each_entry_safe(packet, tmp_packet, | ||
2458 | &target->free_ctrl_rxbuf, list) { | ||
2459 | list_del(&packet->list); | ||
2460 | kfree(packet->buf_start); | ||
2461 | kfree(packet); | ||
2462 | } | ||
2463 | |||
2464 | kfree(target->dev); | ||
2465 | kfree(target); | ||
2466 | } | ||