aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath6kl/hif.c
diff options
context:
space:
mode:
authorKalle Valo <kvalo@qca.qualcomm.com>2011-10-05 05:23:33 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2011-11-11 05:50:55 -0500
commit8e8ddb2b8d19a952e1dff7a2a8a9d606e52fc3e3 (patch)
treebf5387a9f4707642838d83a071835695e62ee6d7 /drivers/net/wireless/ath/ath6kl/hif.c
parentb4be8959c2cca0a0d3136f9d3bf06a52252911f4 (diff)
ath6kl: move htc_hif to hif.c
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl/hif.c')
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c643
1 files changed, 643 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
new file mode 100644
index 000000000000..629e16cdce32
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -0,0 +1,643 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "target.h"
19#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h"
22
23#define MAILBOX_FOR_BLOCK_SIZE 1
24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26
27static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
28 bool from_dma)
29{
30 u8 *buf;
31 int i;
32
33 buf = req->virt_dma_buf;
34
35 for (i = 0; i < req->scat_entries; i++) {
36
37 if (from_dma)
38 memcpy(req->scat_list[i].buf, buf,
39 req->scat_list[i].len);
40 else
41 memcpy(buf, req->scat_list[i].buf,
42 req->scat_list[i].len);
43
44 buf += req->scat_list[i].len;
45 }
46
47 return 0;
48}
49
50int ath6kl_hif_rw_comp_handler(void *context, int status)
51{
52 struct htc_packet *packet = context;
53
54 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
55 "ath6kl_hif_rw_comp_handler (pkt:0x%p , status: %d\n",
56 packet, status);
57
58 packet->status = status;
59 packet->completion(packet->context, packet);
60
61 return 0;
62}
63
64static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
65{
66 u32 dummy;
67 int status;
68
69 ath6kl_err("target debug interrupt\n");
70
71 ath6kl_target_failure(dev->ar);
72
73 /*
74 * read counter to clear the interrupt, the debug error interrupt is
75 * counter 0.
76 */
77 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
78 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
79 if (status)
80 WARN_ON(1);
81
82 return status;
83}
84
85/* mailbox recv message polling */
86int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
87 int timeout)
88{
89 struct ath6kl_irq_proc_registers *rg;
90 int status = 0, i;
91 u8 htc_mbox = 1 << HTC_MAILBOX;
92
93 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
94 /* this is the standard HIF way, load the reg table */
95 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
96 (u8 *) &dev->irq_proc_reg,
97 sizeof(dev->irq_proc_reg),
98 HIF_RD_SYNC_BYTE_INC);
99
100 if (status) {
101 ath6kl_err("failed to read reg table\n");
102 return status;
103 }
104
105 /* check for MBOX data and valid lookahead */
106 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
107 if (dev->irq_proc_reg.rx_lkahd_valid &
108 htc_mbox) {
109 /*
110 * Mailbox has a message and the look ahead
111 * is valid.
112 */
113 rg = &dev->irq_proc_reg;
114 *lk_ahd =
115 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
116 break;
117 }
118 }
119
120 /* delay a little */
121 mdelay(ATH6KL_TIME_QUANTUM);
122 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
123 }
124
125 if (i == 0) {
126 ath6kl_err("timeout waiting for recv message\n");
127 status = -ETIME;
128 /* check if the target asserted */
129 if (dev->irq_proc_reg.counter_int_status &
130 ATH6KL_TARGET_DEBUG_INTR_MASK)
131 /*
132 * Target failure handler will be called in case of
133 * an assert.
134 */
135 ath6kl_hif_proc_dbg_intr(dev);
136 }
137
138 return status;
139}
140
141/*
142 * Disable packet reception (used in case the host runs out of buffers)
143 * using the interrupt enable registers through the host I/F
144 */
145int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
146{
147 struct ath6kl_irq_enable_reg regs;
148 int status = 0;
149
150 /* take the lock to protect interrupt enable shadows */
151 spin_lock_bh(&dev->lock);
152
153 if (enable_rx)
154 dev->irq_en_reg.int_status_en |=
155 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
156 else
157 dev->irq_en_reg.int_status_en &=
158 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
159
160 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
161
162 spin_unlock_bh(&dev->lock);
163
164 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
165 &regs.int_status_en,
166 sizeof(struct ath6kl_irq_enable_reg),
167 HIF_WR_SYNC_BYTE_INC);
168
169 return status;
170}
171
172int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
173 struct hif_scatter_req *scat_req, bool read)
174{
175 int status = 0;
176
177 if (read) {
178 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
179 scat_req->addr = dev->ar->mbox_info.htc_addr;
180 } else {
181 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
182
183 scat_req->addr =
184 (scat_req->len > HIF_MBOX_WIDTH) ?
185 dev->ar->mbox_info.htc_ext_addr :
186 dev->ar->mbox_info.htc_addr;
187 }
188
189 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
190 "ath6kl_hif_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
191 scat_req->scat_entries, scat_req->len,
192 scat_req->addr, !read ? "async" : "sync",
193 (read) ? "rd" : "wr");
194
195 if (!read && scat_req->virt_scat) {
196 status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
197 if (status) {
198 scat_req->status = status;
199 scat_req->complete(dev->ar->htc_target, scat_req);
200 return 0;
201 }
202 }
203
204 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
205
206 if (read) {
207 /* in sync mode, we can touch the scatter request */
208 scat_req->status = status;
209 if (!status && scat_req->virt_scat)
210 scat_req->status =
211 ath6kl_hif_cp_scat_dma_buf(scat_req, true);
212 }
213
214 return status;
215}
216
217static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
218{
219 u8 counter_int_status;
220
221 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
222
223 counter_int_status = dev->irq_proc_reg.counter_int_status &
224 dev->irq_en_reg.cntr_int_status_en;
225
226 ath6kl_dbg(ATH6KL_DBG_IRQ,
227 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
228 counter_int_status);
229
230 /*
231 * NOTE: other modules like GMBOX may use the counter interrupt for
232 * credit flow control on other counters, we only need to check for
233 * the debug assertion counter interrupt.
234 */
235 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
236 return ath6kl_hif_proc_dbg_intr(dev);
237
238 return 0;
239}
240
241static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
242{
243 int status;
244 u8 error_int_status;
245 u8 reg_buf[4];
246
247 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
248
249 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
250 if (!error_int_status) {
251 WARN_ON(1);
252 return -EIO;
253 }
254
255 ath6kl_dbg(ATH6KL_DBG_IRQ,
256 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
257 error_int_status);
258
259 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
260 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
261
262 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
263 ath6kl_err("rx underflow\n");
264
265 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
266 ath6kl_err("tx overflow\n");
267
268 /* Clear the interrupt */
269 dev->irq_proc_reg.error_int_status &= ~error_int_status;
270
271 /* set W1C value to clear the interrupt, this hits the register first */
272 reg_buf[0] = error_int_status;
273 reg_buf[1] = 0;
274 reg_buf[2] = 0;
275 reg_buf[3] = 0;
276
277 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
278 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
279
280 if (status)
281 WARN_ON(1);
282
283 return status;
284}
285
286static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
287{
288 int status;
289 u8 cpu_int_status;
290 u8 reg_buf[4];
291
292 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
293
294 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
295 dev->irq_en_reg.cpu_int_status_en;
296 if (!cpu_int_status) {
297 WARN_ON(1);
298 return -EIO;
299 }
300
301 ath6kl_dbg(ATH6KL_DBG_IRQ,
302 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
303 cpu_int_status);
304
305 /* Clear the interrupt */
306 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
307
308 /*
309 * Set up the register transfer buffer to hit the register 4 times ,
310 * this is done to make the access 4-byte aligned to mitigate issues
311 * with host bus interconnects that restrict bus transfer lengths to
312 * be a multiple of 4-bytes.
313 */
314
315 /* set W1C value to clear the interrupt, this hits the register first */
316 reg_buf[0] = cpu_int_status;
317 /* the remaining are set to zero which have no-effect */
318 reg_buf[1] = 0;
319 reg_buf[2] = 0;
320 reg_buf[3] = 0;
321
322 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
323 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
324
325 if (status)
326 WARN_ON(1);
327
328 return status;
329}
330
331/* process pending interrupts synchronously */
332static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
333{
334 struct ath6kl_irq_proc_registers *rg;
335 int status = 0;
336 u8 host_int_status = 0;
337 u32 lk_ahd = 0;
338 u8 htc_mbox = 1 << HTC_MAILBOX;
339
340 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
341
342 /*
343 * NOTE: HIF implementation guarantees that the context of this
344 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
345 * sleep or call any API that can block or switch thread/task
346 * contexts. This is a fully schedulable context.
347 */
348
349 /*
350 * Process pending intr only when int_status_en is clear, it may
351 * result in unnecessary bus transaction otherwise. Target may be
352 * unresponsive at the time.
353 */
354 if (dev->irq_en_reg.int_status_en) {
355 /*
356 * Read the first 28 bytes of the HTC register table. This
357 * will yield us the value of different int status
358 * registers and the lookahead registers.
359 *
360 * length = sizeof(int_status) + sizeof(cpu_int_status)
361 * + sizeof(error_int_status) +
362 * sizeof(counter_int_status) +
363 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
364 * + sizeof(hole) + sizeof(rx_lkahd) +
365 * sizeof(int_status_en) +
366 * sizeof(cpu_int_status_en) +
367 * sizeof(err_int_status_en) +
368 * sizeof(cntr_int_status_en);
369 */
370 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
371 (u8 *) &dev->irq_proc_reg,
372 sizeof(dev->irq_proc_reg),
373 HIF_RD_SYNC_BYTE_INC);
374 if (status)
375 goto out;
376
377 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
378 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
379 &dev->irq_en_reg);
380
381 /* Update only those registers that are enabled */
382 host_int_status = dev->irq_proc_reg.host_int_status &
383 dev->irq_en_reg.int_status_en;
384
385 /* Look at mbox status */
386 if (host_int_status & htc_mbox) {
387 /*
388 * Mask out pending mbox value, we use "lookAhead as
389 * the real flag for mbox processing.
390 */
391 host_int_status &= ~htc_mbox;
392 if (dev->irq_proc_reg.rx_lkahd_valid &
393 htc_mbox) {
394 rg = &dev->irq_proc_reg;
395 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
396 if (!lk_ahd)
397 ath6kl_err("lookAhead is zero!\n");
398 }
399 }
400 }
401
402 if (!host_int_status && !lk_ahd) {
403 *done = true;
404 goto out;
405 }
406
407 if (lk_ahd) {
408 int fetched = 0;
409
410 ath6kl_dbg(ATH6KL_DBG_IRQ,
411 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
412 /*
413 * Mailbox Interrupt, the HTC layer may issue async
414 * requests to empty the mailbox. When emptying the recv
415 * mailbox we use the async handler above called from the
416 * completion routine of the callers read request. This can
417 * improve performance by reducing context switching when
418 * we rapidly pull packets.
419 */
420 status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
421 lk_ahd, &fetched);
422 if (status)
423 goto out;
424
425 if (!fetched)
426 /*
427 * HTC could not pull any messages out due to lack
428 * of resources.
429 */
430 dev->htc_cnxt->chk_irq_status_cnt = 0;
431 }
432
433 /* now handle the rest of them */
434 ath6kl_dbg(ATH6KL_DBG_IRQ,
435 "valid interrupt source(s) for other interrupts: 0x%x\n",
436 host_int_status);
437
438 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
439 /* CPU Interrupt */
440 status = ath6kl_hif_proc_cpu_intr(dev);
441 if (status)
442 goto out;
443 }
444
445 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
446 /* Error Interrupt */
447 status = ath6kl_hif_proc_err_intr(dev);
448 if (status)
449 goto out;
450 }
451
452 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
453 /* Counter Interrupt */
454 status = ath6kl_hif_proc_counter_intr(dev);
455
456out:
457 /*
458 * An optimization to bypass reading the IRQ status registers
459 * unecessarily which can re-wake the target, if upper layers
460 * determine that we are in a low-throughput mode, we can rely on
461 * taking another interrupt rather than re-checking the status
462 * registers which can re-wake the target.
463 *
464 * NOTE : for host interfaces that makes use of detecting pending
465 * mbox messages at hif can not use this optimization due to
466 * possible side effects, SPI requires the host to drain all
467 * messages from the mailbox before exiting the ISR routine.
468 */
469
470 ath6kl_dbg(ATH6KL_DBG_IRQ,
471 "bypassing irq status re-check, forcing done\n");
472
473 if (!dev->htc_cnxt->chk_irq_status_cnt)
474 *done = true;
475
476 ath6kl_dbg(ATH6KL_DBG_IRQ,
477 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
478
479 return status;
480}
481
482/* interrupt handler, kicks off all interrupt processing */
483int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
484{
485 struct ath6kl_device *dev = ar->htc_target->dev;
486 int status = 0;
487 bool done = false;
488
489 /*
490 * Reset counter used to flag a re-scan of IRQ status registers on
491 * the target.
492 */
493 dev->htc_cnxt->chk_irq_status_cnt = 0;
494
495 /*
496 * IRQ processing is synchronous, interrupt status registers can be
497 * re-read.
498 */
499 while (!done) {
500 status = proc_pending_irqs(dev, &done);
501 if (status)
502 break;
503 }
504
505 return status;
506}
507
508static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
509{
510 struct ath6kl_irq_enable_reg regs;
511 int status;
512
513 spin_lock_bh(&dev->lock);
514
515 /* Enable all but ATH6KL CPU interrupts */
516 dev->irq_en_reg.int_status_en =
517 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
518 SM(INT_STATUS_ENABLE_CPU, 0x01) |
519 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
520
521 /*
522 * NOTE: There are some cases where HIF can do detection of
523 * pending mbox messages which is disabled now.
524 */
525 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
526
527 /* Set up the CPU Interrupt status Register */
528 dev->irq_en_reg.cpu_int_status_en = 0;
529
530 /* Set up the Error Interrupt status Register */
531 dev->irq_en_reg.err_int_status_en =
532 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
533 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
534
535 /*
536 * Enable Counter interrupt status register to get fatal errors for
537 * debugging.
538 */
539 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
540 ATH6KL_TARGET_DEBUG_INTR_MASK);
541 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
542
543 spin_unlock_bh(&dev->lock);
544
545 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
546 &regs.int_status_en, sizeof(regs),
547 HIF_WR_SYNC_BYTE_INC);
548
549 if (status)
550 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
551 status);
552
553 return status;
554}
555
556int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
557{
558 struct ath6kl_irq_enable_reg regs;
559
560 spin_lock_bh(&dev->lock);
561 /* Disable all interrupts */
562 dev->irq_en_reg.int_status_en = 0;
563 dev->irq_en_reg.cpu_int_status_en = 0;
564 dev->irq_en_reg.err_int_status_en = 0;
565 dev->irq_en_reg.cntr_int_status_en = 0;
566 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
567 spin_unlock_bh(&dev->lock);
568
569 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
570 &regs.int_status_en, sizeof(regs),
571 HIF_WR_SYNC_BYTE_INC);
572}
573
574/* enable device interrupts */
575int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
576{
577 int status = 0;
578
579 /*
580 * Make sure interrupt are disabled before unmasking at the HIF
581 * layer. The rationale here is that between device insertion
582 * (where we clear the interrupts the first time) and when HTC
583 * is finally ready to handle interrupts, other software can perform
584 * target "soft" resets. The ATH6KL interrupt enables reset back to an
585 * "enabled" state when this happens.
586 */
587 ath6kl_hif_disable_intrs(dev);
588
589 /* unmask the host controller interrupts */
590 ath6kl_hif_irq_enable(dev->ar);
591 status = ath6kl_hif_enable_intrs(dev);
592
593 return status;
594}
595
596/* disable all device interrupts */
597int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
598{
599 /*
600 * Mask the interrupt at the HIF layer to avoid any stray interrupt
601 * taken while we zero out our shadow registers in
602 * ath6kl_hif_disable_intrs().
603 */
604 ath6kl_hif_irq_disable(dev->ar);
605
606 return ath6kl_hif_disable_intrs(dev);
607}
608
609int ath6kl_hif_setup(struct ath6kl_device *dev)
610{
611 int status = 0;
612
613 spin_lock_init(&dev->lock);
614
615 /*
616 * NOTE: we actually get the block size of a mailbox other than 0,
617 * for SDIO the block size on mailbox 0 is artificially set to 1.
618 * So we use the block size that is set for the other 3 mailboxes.
619 */
620 dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
621
622 /* must be a power of 2 */
623 if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
624 WARN_ON(1);
625 status = -EINVAL;
626 goto fail_setup;
627 }
628
629 /* assemble mask, used for padding to a block */
630 dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
631
632 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
633 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
634
635 ath6kl_dbg(ATH6KL_DBG_TRC,
636 "hif interrupt processing is sync only\n");
637
638 status = ath6kl_hif_disable_intrs(dev);
639
640fail_setup:
641 return status;
642
643}