diff options
Diffstat (limited to 'drivers/net/wireless/ath5k/dma.c')
-rw-r--r-- | drivers/net/wireless/ath5k/dma.c | 566 |
1 files changed, 566 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c new file mode 100644 index 000000000000..a28090be9603 --- /dev/null +++ b/drivers/net/wireless/ath5k/dma.c | |||
@@ -0,0 +1,566 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> | ||
3 | * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> | ||
4 | * | ||
5 | * Permission to use, copy, modify, and distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | /*************************************\ | ||
20 | * DMA and interrupt masking functions * | ||
21 | \*************************************/ | ||
22 | |||
23 | /* | ||
24 | * dma.c - DMA and interrupt masking functions | ||
25 | * | ||
26 | * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and | ||
27 | * handle queue setup for 5210 chipset (rest are handled on qcu.c). | ||
28 | * Also we setup interrupt mask register (IMR) and read the various iterrupt | ||
29 | * status registers (ISR). | ||
30 | * | ||
31 | * TODO: Handle SISR on 5211+ and introduce a function to return the queue | ||
32 | * number that resulted the interrupt. | ||
33 | */ | ||
34 | |||
35 | #include "ath5k.h" | ||
36 | #include "reg.h" | ||
37 | #include "debug.h" | ||
38 | #include "base.h" | ||
39 | |||
40 | /*********\ | ||
41 | * Receive * | ||
42 | \*********/ | ||
43 | |||
44 | /** | ||
45 | * ath5k_hw_start_rx_dma - Start DMA receive | ||
46 | * | ||
47 | * @ah: The &struct ath5k_hw | ||
48 | */ | ||
49 | void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) | ||
50 | { | ||
51 | ATH5K_TRACE(ah->ah_sc); | ||
52 | ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); | ||
53 | ath5k_hw_reg_read(ah, AR5K_CR); | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * ath5k_hw_stop_rx_dma - Stop DMA receive | ||
58 | * | ||
59 | * @ah: The &struct ath5k_hw | ||
60 | */ | ||
61 | int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) | ||
62 | { | ||
63 | unsigned int i; | ||
64 | |||
65 | ATH5K_TRACE(ah->ah_sc); | ||
66 | ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); | ||
67 | |||
68 | /* | ||
69 | * It may take some time to disable the DMA receive unit | ||
70 | */ | ||
71 | for (i = 2000; i > 0 && | ||
72 | (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; | ||
73 | i--) | ||
74 | udelay(10); | ||
75 | |||
76 | return i ? 0 : -EBUSY; | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * ath5k_hw_get_rxdp - Get RX Descriptor's address | ||
81 | * | ||
82 | * @ah: The &struct ath5k_hw | ||
83 | * | ||
84 | * XXX: Is RXDP read and clear ? | ||
85 | */ | ||
86 | u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) | ||
87 | { | ||
88 | return ath5k_hw_reg_read(ah, AR5K_RXDP); | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * ath5k_hw_set_rxdp - Set RX Descriptor's address | ||
93 | * | ||
94 | * @ah: The &struct ath5k_hw | ||
95 | * @phys_addr: RX descriptor address | ||
96 | * | ||
97 | * XXX: Should we check if rx is enabled before setting rxdp ? | ||
98 | */ | ||
99 | void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) | ||
100 | { | ||
101 | ATH5K_TRACE(ah->ah_sc); | ||
102 | |||
103 | ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); | ||
104 | } | ||
105 | |||
106 | |||
107 | /**********\ | ||
108 | * Transmit * | ||
109 | \**********/ | ||
110 | |||
111 | /** | ||
112 | * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue | ||
113 | * | ||
114 | * @ah: The &struct ath5k_hw | ||
115 | * @queue: The hw queue number | ||
116 | * | ||
117 | * Start DMA transmit for a specific queue and since 5210 doesn't have | ||
118 | * QCU/DCU, set up queue parameters for 5210 here based on queue type (one | ||
119 | * queue for normal data and one queue for beacons). For queue setup | ||
120 | * on newer chips check out qcu.c. Returns -EINVAL if queue number is out | ||
121 | * of range or if queue is already disabled. | ||
122 | * | ||
123 | * NOTE: Must be called after setting up tx control descriptor for that | ||
124 | * queue (see below). | ||
125 | */ | ||
126 | int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) | ||
127 | { | ||
128 | u32 tx_queue; | ||
129 | |||
130 | ATH5K_TRACE(ah->ah_sc); | ||
131 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); | ||
132 | |||
133 | /* Return if queue is declared inactive */ | ||
134 | if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) | ||
135 | return -EIO; | ||
136 | |||
137 | if (ah->ah_version == AR5K_AR5210) { | ||
138 | tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); | ||
139 | |||
140 | /* | ||
141 | * Set the queue by type on 5210 | ||
142 | */ | ||
143 | switch (ah->ah_txq[queue].tqi_type) { | ||
144 | case AR5K_TX_QUEUE_DATA: | ||
145 | tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; | ||
146 | break; | ||
147 | case AR5K_TX_QUEUE_BEACON: | ||
148 | tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; | ||
149 | ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, | ||
150 | AR5K_BSR); | ||
151 | break; | ||
152 | case AR5K_TX_QUEUE_CAB: | ||
153 | tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; | ||
154 | ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | | ||
155 | AR5K_BCR_BDMAE, AR5K_BSR); | ||
156 | break; | ||
157 | default: | ||
158 | return -EINVAL; | ||
159 | } | ||
160 | /* Start queue */ | ||
161 | ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); | ||
162 | ath5k_hw_reg_read(ah, AR5K_CR); | ||
163 | } else { | ||
164 | /* Return if queue is disabled */ | ||
165 | if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) | ||
166 | return -EIO; | ||
167 | |||
168 | /* Start queue */ | ||
169 | AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); | ||
170 | } | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue | ||
177 | * | ||
178 | * @ah: The &struct ath5k_hw | ||
179 | * @queue: The hw queue number | ||
180 | * | ||
181 | * Stop DMA transmit on a specific hw queue and drain queue so we don't | ||
182 | * have any pending frames. Returns -EBUSY if we still have pending frames, | ||
183 | * -EINVAL if queue number is out of range. | ||
184 | * | ||
185 | * TODO: Test queue drain code | ||
186 | */ | ||
187 | int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) | ||
188 | { | ||
189 | unsigned int i = 100; | ||
190 | u32 tx_queue, pending; | ||
191 | |||
192 | ATH5K_TRACE(ah->ah_sc); | ||
193 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); | ||
194 | |||
195 | /* Return if queue is declared inactive */ | ||
196 | if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) | ||
197 | return -EIO; | ||
198 | |||
199 | if (ah->ah_version == AR5K_AR5210) { | ||
200 | tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); | ||
201 | |||
202 | /* | ||
203 | * Set by queue type | ||
204 | */ | ||
205 | switch (ah->ah_txq[queue].tqi_type) { | ||
206 | case AR5K_TX_QUEUE_DATA: | ||
207 | tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; | ||
208 | break; | ||
209 | case AR5K_TX_QUEUE_BEACON: | ||
210 | case AR5K_TX_QUEUE_CAB: | ||
211 | /* XXX Fix me... */ | ||
212 | tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; | ||
213 | ath5k_hw_reg_write(ah, 0, AR5K_BSR); | ||
214 | break; | ||
215 | default: | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | |||
219 | /* Stop queue */ | ||
220 | ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); | ||
221 | ath5k_hw_reg_read(ah, AR5K_CR); | ||
222 | } else { | ||
223 | /* | ||
224 | * Schedule TX disable and wait until queue is empty | ||
225 | */ | ||
226 | AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); | ||
227 | |||
228 | /*Check for pending frames*/ | ||
229 | do { | ||
230 | pending = ath5k_hw_reg_read(ah, | ||
231 | AR5K_QUEUE_STATUS(queue)) & | ||
232 | AR5K_QCU_STS_FRMPENDCNT; | ||
233 | udelay(100); | ||
234 | } while (--i && pending); | ||
235 | |||
236 | /* Clear register */ | ||
237 | ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); | ||
238 | if (pending) | ||
239 | return -EBUSY; | ||
240 | } | ||
241 | |||
242 | /* TODO: Check for success else return error */ | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue | ||
248 | * | ||
249 | * @ah: The &struct ath5k_hw | ||
250 | * @queue: The hw queue number | ||
251 | * | ||
252 | * Get TX descriptor's address for a specific queue. For 5210 we ignore | ||
253 | * the queue number and use tx queue type since we only have 2 queues. | ||
254 | * We use TXDP0 for normal data queue and TXDP1 for beacon queue. | ||
255 | * For newer chips with QCU/DCU we just read the corresponding TXDP register. | ||
256 | * | ||
257 | * XXX: Is TXDP read and clear ? | ||
258 | */ | ||
259 | u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) | ||
260 | { | ||
261 | u16 tx_reg; | ||
262 | |||
263 | ATH5K_TRACE(ah->ah_sc); | ||
264 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); | ||
265 | |||
266 | /* | ||
267 | * Get the transmit queue descriptor pointer from the selected queue | ||
268 | */ | ||
269 | /*5210 doesn't have QCU*/ | ||
270 | if (ah->ah_version == AR5K_AR5210) { | ||
271 | switch (ah->ah_txq[queue].tqi_type) { | ||
272 | case AR5K_TX_QUEUE_DATA: | ||
273 | tx_reg = AR5K_NOQCU_TXDP0; | ||
274 | break; | ||
275 | case AR5K_TX_QUEUE_BEACON: | ||
276 | case AR5K_TX_QUEUE_CAB: | ||
277 | tx_reg = AR5K_NOQCU_TXDP1; | ||
278 | break; | ||
279 | default: | ||
280 | return 0xffffffff; | ||
281 | } | ||
282 | } else { | ||
283 | tx_reg = AR5K_QUEUE_TXDP(queue); | ||
284 | } | ||
285 | |||
286 | return ath5k_hw_reg_read(ah, tx_reg); | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue | ||
291 | * | ||
292 | * @ah: The &struct ath5k_hw | ||
293 | * @queue: The hw queue number | ||
294 | * | ||
295 | * Set TX descriptor's address for a specific queue. For 5210 we ignore | ||
296 | * the queue number and we use tx queue type since we only have 2 queues | ||
297 | * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. | ||
298 | * For newer chips with QCU/DCU we just set the corresponding TXDP register. | ||
299 | * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still | ||
300 | * active. | ||
301 | */ | ||
302 | int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) | ||
303 | { | ||
304 | u16 tx_reg; | ||
305 | |||
306 | ATH5K_TRACE(ah->ah_sc); | ||
307 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); | ||
308 | |||
309 | /* | ||
310 | * Set the transmit queue descriptor pointer register by type | ||
311 | * on 5210 | ||
312 | */ | ||
313 | if (ah->ah_version == AR5K_AR5210) { | ||
314 | switch (ah->ah_txq[queue].tqi_type) { | ||
315 | case AR5K_TX_QUEUE_DATA: | ||
316 | tx_reg = AR5K_NOQCU_TXDP0; | ||
317 | break; | ||
318 | case AR5K_TX_QUEUE_BEACON: | ||
319 | case AR5K_TX_QUEUE_CAB: | ||
320 | tx_reg = AR5K_NOQCU_TXDP1; | ||
321 | break; | ||
322 | default: | ||
323 | return -EINVAL; | ||
324 | } | ||
325 | } else { | ||
326 | /* | ||
327 | * Set the transmit queue descriptor pointer for | ||
328 | * the selected queue on QCU for 5211+ | ||
329 | * (this won't work if the queue is still active) | ||
330 | */ | ||
331 | if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) | ||
332 | return -EIO; | ||
333 | |||
334 | tx_reg = AR5K_QUEUE_TXDP(queue); | ||
335 | } | ||
336 | |||
337 | /* Set descriptor pointer */ | ||
338 | ath5k_hw_reg_write(ah, phys_addr, tx_reg); | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * ath5k_hw_update_tx_triglevel - Update tx trigger level | ||
345 | * | ||
346 | * @ah: The &struct ath5k_hw | ||
347 | * @increase: Flag to force increase of trigger level | ||
348 | * | ||
349 | * This function increases/decreases the tx trigger level for the tx fifo | ||
350 | * buffer (aka FIFO threshold) that is used to indicate when PCU flushes | ||
351 | * the buffer and transmits it's data. Lowering this results sending small | ||
352 | * frames more quickly but can lead to tx underruns, raising it a lot can | ||
353 | * result other problems (i think bmiss is related). Right now we start with | ||
354 | * the lowest possible (64Bytes) and if we get tx underrun we increase it using | ||
355 | * the increase flag. Returns -EIO if we have have reached maximum/minimum. | ||
356 | * | ||
357 | * XXX: Link this with tx DMA size ? | ||
358 | * XXX: Use it to save interrupts ? | ||
359 | * TODO: Needs testing, i think it's related to bmiss... | ||
360 | */ | ||
361 | int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) | ||
362 | { | ||
363 | u32 trigger_level, imr; | ||
364 | int ret = -EIO; | ||
365 | |||
366 | ATH5K_TRACE(ah->ah_sc); | ||
367 | |||
368 | /* | ||
369 | * Disable interrupts by setting the mask | ||
370 | */ | ||
371 | imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); | ||
372 | |||
373 | trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), | ||
374 | AR5K_TXCFG_TXFULL); | ||
375 | |||
376 | if (!increase) { | ||
377 | if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) | ||
378 | goto done; | ||
379 | } else | ||
380 | trigger_level += | ||
381 | ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); | ||
382 | |||
383 | /* | ||
384 | * Update trigger level on success | ||
385 | */ | ||
386 | if (ah->ah_version == AR5K_AR5210) | ||
387 | ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); | ||
388 | else | ||
389 | AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, | ||
390 | AR5K_TXCFG_TXFULL, trigger_level); | ||
391 | |||
392 | ret = 0; | ||
393 | |||
394 | done: | ||
395 | /* | ||
396 | * Restore interrupt mask | ||
397 | */ | ||
398 | ath5k_hw_set_imr(ah, imr); | ||
399 | |||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | /*******************\ | ||
404 | * Interrupt masking * | ||
405 | \*******************/ | ||
406 | |||
407 | /** | ||
408 | * ath5k_hw_is_intr_pending - Check if we have pending interrupts | ||
409 | * | ||
410 | * @ah: The &struct ath5k_hw | ||
411 | * | ||
412 | * Check if we have pending interrupts to process. Returns 1 if we | ||
413 | * have pending interrupts and 0 if we haven't. | ||
414 | */ | ||
415 | bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) | ||
416 | { | ||
417 | ATH5K_TRACE(ah->ah_sc); | ||
418 | return ath5k_hw_reg_read(ah, AR5K_INTPEND); | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * ath5k_hw_get_isr - Get interrupt status | ||
423 | * | ||
424 | * @ah: The @struct ath5k_hw | ||
425 | * @interrupt_mask: Driver's interrupt mask used to filter out | ||
426 | * interrupts in sw. | ||
427 | * | ||
428 | * This function is used inside our interrupt handler to determine the reason | ||
429 | * for the interrupt by reading Primary Interrupt Status Register. Returns an | ||
430 | * abstract interrupt status mask which is mostly ISR with some uncommon bits | ||
431 | * being mapped on some standard non hw-specific positions | ||
432 | * (check out &ath5k_int). | ||
433 | * | ||
434 | * NOTE: We use read-and-clear register, so after this function is called ISR | ||
435 | * is zeroed. | ||
436 | * | ||
437 | * XXX: Why filter interrupts in sw with interrupt_mask ? No benefit at all | ||
438 | * plus it can be misleading (one might thing that we save interrupts this way) | ||
439 | */ | ||
440 | int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) | ||
441 | { | ||
442 | u32 data; | ||
443 | |||
444 | ATH5K_TRACE(ah->ah_sc); | ||
445 | |||
446 | /* | ||
447 | * Read interrupt status from the Interrupt Status register | ||
448 | * on 5210 | ||
449 | */ | ||
450 | if (ah->ah_version == AR5K_AR5210) { | ||
451 | data = ath5k_hw_reg_read(ah, AR5K_ISR); | ||
452 | if (unlikely(data == AR5K_INT_NOCARD)) { | ||
453 | *interrupt_mask = data; | ||
454 | return -ENODEV; | ||
455 | } | ||
456 | } else { | ||
457 | /* | ||
458 | * Read interrupt status from the Read-And-Clear | ||
459 | * shadow register. | ||
460 | * Note: PISR/SISR Not available on 5210 | ||
461 | */ | ||
462 | data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); | ||
463 | } | ||
464 | |||
465 | /* | ||
466 | * Get abstract interrupt mask (driver-compatible) | ||
467 | */ | ||
468 | *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; | ||
469 | |||
470 | if (unlikely(data == AR5K_INT_NOCARD)) | ||
471 | return -ENODEV; | ||
472 | |||
473 | if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR)) | ||
474 | *interrupt_mask |= AR5K_INT_RX; | ||
475 | |||
476 | if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR | ||
477 | | AR5K_ISR_TXDESC | AR5K_ISR_TXEOL)) | ||
478 | *interrupt_mask |= AR5K_INT_TX; | ||
479 | |||
480 | if (ah->ah_version != AR5K_AR5210) { | ||
481 | /*HIU = Host Interface Unit (PCI etc)*/ | ||
482 | if (unlikely(data & (AR5K_ISR_HIUERR))) | ||
483 | *interrupt_mask |= AR5K_INT_FATAL; | ||
484 | |||
485 | /*Beacon Not Ready*/ | ||
486 | if (unlikely(data & (AR5K_ISR_BNR))) | ||
487 | *interrupt_mask |= AR5K_INT_BNR; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * XXX: BMISS interrupts may occur after association. | ||
492 | * I found this on 5210 code but it needs testing. If this is | ||
493 | * true we should disable them before assoc and re-enable them | ||
494 | * after a successfull assoc + some jiffies. | ||
495 | */ | ||
496 | #if 0 | ||
497 | interrupt_mask &= ~AR5K_INT_BMISS; | ||
498 | #endif | ||
499 | |||
500 | /* | ||
501 | * In case we didn't handle anything, | ||
502 | * print the register value. | ||
503 | */ | ||
504 | if (unlikely(*interrupt_mask == 0 && net_ratelimit())) | ||
505 | ATH5K_PRINTF("0x%08x\n", data); | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | /** | ||
511 | * ath5k_hw_set_imr - Set interrupt mask | ||
512 | * | ||
513 | * @ah: The &struct ath5k_hw | ||
514 | * @new_mask: The new interrupt mask to be set | ||
515 | * | ||
516 | * Set the interrupt mask in hw to save interrupts. We do that by mapping | ||
517 | * ath5k_int bits to hw-specific bits to remove abstraction and writing | ||
518 | * Interrupt Mask Register. | ||
519 | */ | ||
520 | enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) | ||
521 | { | ||
522 | enum ath5k_int old_mask, int_mask; | ||
523 | |||
524 | /* | ||
525 | * Disable card interrupts to prevent any race conditions | ||
526 | * (they will be re-enabled afterwards). | ||
527 | */ | ||
528 | ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); | ||
529 | ath5k_hw_reg_read(ah, AR5K_IER); | ||
530 | |||
531 | old_mask = ah->ah_imr; | ||
532 | |||
533 | /* | ||
534 | * Add additional, chipset-dependent interrupt mask flags | ||
535 | * and write them to the IMR (interrupt mask register). | ||
536 | */ | ||
537 | int_mask = new_mask & AR5K_INT_COMMON; | ||
538 | |||
539 | if (new_mask & AR5K_INT_RX) | ||
540 | int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN | | ||
541 | AR5K_IMR_RXDESC; | ||
542 | |||
543 | if (new_mask & AR5K_INT_TX) | ||
544 | int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC | | ||
545 | AR5K_IMR_TXURN; | ||
546 | |||
547 | if (ah->ah_version != AR5K_AR5210) { | ||
548 | if (new_mask & AR5K_INT_FATAL) { | ||
549 | int_mask |= AR5K_IMR_HIUERR; | ||
550 | AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT | | ||
551 | AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); | ||
556 | |||
557 | /* Store new interrupt mask */ | ||
558 | ah->ah_imr = new_mask; | ||
559 | |||
560 | /* ..re-enable interrupts */ | ||
561 | ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); | ||
562 | ath5k_hw_reg_read(ah, AR5K_IER); | ||
563 | |||
564 | return old_mask; | ||
565 | } | ||
566 | |||