diff options
author | Michael Buesch <mb@bu3sch.de> | 2007-09-18 15:39:42 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:51:37 -0400 |
commit | e4d6b7951812d98417feb10784e400e253caf633 (patch) | |
tree | 4f653c52b4cffd5ade2eb166a56b306c9181ed08 /drivers/net/wireless/b43/dma.c | |
parent | 61e115a56d1aafd6e6a8a9fee8ac099a6128ac7b (diff) |
[B43]: add mac80211-based driver for modern BCM43xx devices
Signed-off-by: Michael Buesch <mb@bu3sch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 1494 |
1 files changed, 1494 insertions, 0 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c new file mode 100644 index 000000000000..5e8f8ac0f1dd --- /dev/null +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -0,0 +1,1494 @@ | |||
1 | /* | ||
2 | |||
3 | Broadcom B43 wireless driver | ||
4 | |||
5 | DMA ringbuffer and descriptor allocation/management | ||
6 | |||
7 | Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> | ||
8 | |||
9 | Some code in this file is derived from the b44.c driver | ||
10 | Copyright (C) 2002 David S. Miller | ||
11 | Copyright (C) Pekka Pietikainen | ||
12 | |||
13 | This program is free software; you can redistribute it and/or modify | ||
14 | it under the terms of the GNU General Public License as published by | ||
15 | the Free Software Foundation; either version 2 of the License, or | ||
16 | (at your option) any later version. | ||
17 | |||
18 | This program is distributed in the hope that it will be useful, | ||
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | GNU General Public License for more details. | ||
22 | |||
23 | You should have received a copy of the GNU General Public License | ||
24 | along with this program; see the file COPYING. If not, write to | ||
25 | the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, | ||
26 | Boston, MA 02110-1301, USA. | ||
27 | |||
28 | */ | ||
29 | |||
30 | #include "b43.h" | ||
31 | #include "dma.h" | ||
32 | #include "main.h" | ||
33 | #include "debugfs.h" | ||
34 | #include "xmit.h" | ||
35 | |||
36 | #include <linux/dma-mapping.h> | ||
37 | #include <linux/pci.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/skbuff.h> | ||
40 | |||
41 | /* 32bit DMA ops. */ | ||
42 | static | ||
43 | struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, | ||
44 | int slot, | ||
45 | struct b43_dmadesc_meta **meta) | ||
46 | { | ||
47 | struct b43_dmadesc32 *desc; | ||
48 | |||
49 | *meta = &(ring->meta[slot]); | ||
50 | desc = ring->descbase; | ||
51 | desc = &(desc[slot]); | ||
52 | |||
53 | return (struct b43_dmadesc_generic *)desc; | ||
54 | } | ||
55 | |||
56 | static void op32_fill_descriptor(struct b43_dmaring *ring, | ||
57 | struct b43_dmadesc_generic *desc, | ||
58 | dma_addr_t dmaaddr, u16 bufsize, | ||
59 | int start, int end, int irq) | ||
60 | { | ||
61 | struct b43_dmadesc32 *descbase = ring->descbase; | ||
62 | int slot; | ||
63 | u32 ctl; | ||
64 | u32 addr; | ||
65 | u32 addrext; | ||
66 | |||
67 | slot = (int)(&(desc->dma32) - descbase); | ||
68 | B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); | ||
69 | |||
70 | addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); | ||
71 | addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) | ||
72 | >> SSB_DMA_TRANSLATION_SHIFT; | ||
73 | addr |= ssb_dma_translation(ring->dev->dev); | ||
74 | ctl = (bufsize - ring->frameoffset) | ||
75 | & B43_DMA32_DCTL_BYTECNT; | ||
76 | if (slot == ring->nr_slots - 1) | ||
77 | ctl |= B43_DMA32_DCTL_DTABLEEND; | ||
78 | if (start) | ||
79 | ctl |= B43_DMA32_DCTL_FRAMESTART; | ||
80 | if (end) | ||
81 | ctl |= B43_DMA32_DCTL_FRAMEEND; | ||
82 | if (irq) | ||
83 | ctl |= B43_DMA32_DCTL_IRQ; | ||
84 | ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) | ||
85 | & B43_DMA32_DCTL_ADDREXT_MASK; | ||
86 | |||
87 | desc->dma32.control = cpu_to_le32(ctl); | ||
88 | desc->dma32.address = cpu_to_le32(addr); | ||
89 | } | ||
90 | |||
91 | static void op32_poke_tx(struct b43_dmaring *ring, int slot) | ||
92 | { | ||
93 | b43_dma_write(ring, B43_DMA32_TXINDEX, | ||
94 | (u32) (slot * sizeof(struct b43_dmadesc32))); | ||
95 | } | ||
96 | |||
97 | static void op32_tx_suspend(struct b43_dmaring *ring) | ||
98 | { | ||
99 | b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) | ||
100 | | B43_DMA32_TXSUSPEND); | ||
101 | } | ||
102 | |||
103 | static void op32_tx_resume(struct b43_dmaring *ring) | ||
104 | { | ||
105 | b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) | ||
106 | & ~B43_DMA32_TXSUSPEND); | ||
107 | } | ||
108 | |||
109 | static int op32_get_current_rxslot(struct b43_dmaring *ring) | ||
110 | { | ||
111 | u32 val; | ||
112 | |||
113 | val = b43_dma_read(ring, B43_DMA32_RXSTATUS); | ||
114 | val &= B43_DMA32_RXDPTR; | ||
115 | |||
116 | return (val / sizeof(struct b43_dmadesc32)); | ||
117 | } | ||
118 | |||
119 | static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) | ||
120 | { | ||
121 | b43_dma_write(ring, B43_DMA32_RXINDEX, | ||
122 | (u32) (slot * sizeof(struct b43_dmadesc32))); | ||
123 | } | ||
124 | |||
125 | static const struct b43_dma_ops dma32_ops = { | ||
126 | .idx2desc = op32_idx2desc, | ||
127 | .fill_descriptor = op32_fill_descriptor, | ||
128 | .poke_tx = op32_poke_tx, | ||
129 | .tx_suspend = op32_tx_suspend, | ||
130 | .tx_resume = op32_tx_resume, | ||
131 | .get_current_rxslot = op32_get_current_rxslot, | ||
132 | .set_current_rxslot = op32_set_current_rxslot, | ||
133 | }; | ||
134 | |||
135 | /* 64bit DMA ops. */ | ||
136 | static | ||
137 | struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, | ||
138 | int slot, | ||
139 | struct b43_dmadesc_meta **meta) | ||
140 | { | ||
141 | struct b43_dmadesc64 *desc; | ||
142 | |||
143 | *meta = &(ring->meta[slot]); | ||
144 | desc = ring->descbase; | ||
145 | desc = &(desc[slot]); | ||
146 | |||
147 | return (struct b43_dmadesc_generic *)desc; | ||
148 | } | ||
149 | |||
150 | static void op64_fill_descriptor(struct b43_dmaring *ring, | ||
151 | struct b43_dmadesc_generic *desc, | ||
152 | dma_addr_t dmaaddr, u16 bufsize, | ||
153 | int start, int end, int irq) | ||
154 | { | ||
155 | struct b43_dmadesc64 *descbase = ring->descbase; | ||
156 | int slot; | ||
157 | u32 ctl0 = 0, ctl1 = 0; | ||
158 | u32 addrlo, addrhi; | ||
159 | u32 addrext; | ||
160 | |||
161 | slot = (int)(&(desc->dma64) - descbase); | ||
162 | B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); | ||
163 | |||
164 | addrlo = (u32) (dmaaddr & 0xFFFFFFFF); | ||
165 | addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); | ||
166 | addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) | ||
167 | >> SSB_DMA_TRANSLATION_SHIFT; | ||
168 | addrhi |= ssb_dma_translation(ring->dev->dev); | ||
169 | if (slot == ring->nr_slots - 1) | ||
170 | ctl0 |= B43_DMA64_DCTL0_DTABLEEND; | ||
171 | if (start) | ||
172 | ctl0 |= B43_DMA64_DCTL0_FRAMESTART; | ||
173 | if (end) | ||
174 | ctl0 |= B43_DMA64_DCTL0_FRAMEEND; | ||
175 | if (irq) | ||
176 | ctl0 |= B43_DMA64_DCTL0_IRQ; | ||
177 | ctl1 |= (bufsize - ring->frameoffset) | ||
178 | & B43_DMA64_DCTL1_BYTECNT; | ||
179 | ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) | ||
180 | & B43_DMA64_DCTL1_ADDREXT_MASK; | ||
181 | |||
182 | desc->dma64.control0 = cpu_to_le32(ctl0); | ||
183 | desc->dma64.control1 = cpu_to_le32(ctl1); | ||
184 | desc->dma64.address_low = cpu_to_le32(addrlo); | ||
185 | desc->dma64.address_high = cpu_to_le32(addrhi); | ||
186 | } | ||
187 | |||
188 | static void op64_poke_tx(struct b43_dmaring *ring, int slot) | ||
189 | { | ||
190 | b43_dma_write(ring, B43_DMA64_TXINDEX, | ||
191 | (u32) (slot * sizeof(struct b43_dmadesc64))); | ||
192 | } | ||
193 | |||
194 | static void op64_tx_suspend(struct b43_dmaring *ring) | ||
195 | { | ||
196 | b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) | ||
197 | | B43_DMA64_TXSUSPEND); | ||
198 | } | ||
199 | |||
200 | static void op64_tx_resume(struct b43_dmaring *ring) | ||
201 | { | ||
202 | b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) | ||
203 | & ~B43_DMA64_TXSUSPEND); | ||
204 | } | ||
205 | |||
206 | static int op64_get_current_rxslot(struct b43_dmaring *ring) | ||
207 | { | ||
208 | u32 val; | ||
209 | |||
210 | val = b43_dma_read(ring, B43_DMA64_RXSTATUS); | ||
211 | val &= B43_DMA64_RXSTATDPTR; | ||
212 | |||
213 | return (val / sizeof(struct b43_dmadesc64)); | ||
214 | } | ||
215 | |||
216 | static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) | ||
217 | { | ||
218 | b43_dma_write(ring, B43_DMA64_RXINDEX, | ||
219 | (u32) (slot * sizeof(struct b43_dmadesc64))); | ||
220 | } | ||
221 | |||
222 | static const struct b43_dma_ops dma64_ops = { | ||
223 | .idx2desc = op64_idx2desc, | ||
224 | .fill_descriptor = op64_fill_descriptor, | ||
225 | .poke_tx = op64_poke_tx, | ||
226 | .tx_suspend = op64_tx_suspend, | ||
227 | .tx_resume = op64_tx_resume, | ||
228 | .get_current_rxslot = op64_get_current_rxslot, | ||
229 | .set_current_rxslot = op64_set_current_rxslot, | ||
230 | }; | ||
231 | |||
232 | static inline int free_slots(struct b43_dmaring *ring) | ||
233 | { | ||
234 | return (ring->nr_slots - ring->used_slots); | ||
235 | } | ||
236 | |||
237 | static inline int next_slot(struct b43_dmaring *ring, int slot) | ||
238 | { | ||
239 | B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); | ||
240 | if (slot == ring->nr_slots - 1) | ||
241 | return 0; | ||
242 | return slot + 1; | ||
243 | } | ||
244 | |||
245 | static inline int prev_slot(struct b43_dmaring *ring, int slot) | ||
246 | { | ||
247 | B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); | ||
248 | if (slot == 0) | ||
249 | return ring->nr_slots - 1; | ||
250 | return slot - 1; | ||
251 | } | ||
252 | |||
253 | #ifdef CONFIG_B43_DEBUG | ||
254 | static void update_max_used_slots(struct b43_dmaring *ring, | ||
255 | int current_used_slots) | ||
256 | { | ||
257 | if (current_used_slots <= ring->max_used_slots) | ||
258 | return; | ||
259 | ring->max_used_slots = current_used_slots; | ||
260 | if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { | ||
261 | b43dbg(ring->dev->wl, | ||
262 | "max_used_slots increased to %d on %s ring %d\n", | ||
263 | ring->max_used_slots, | ||
264 | ring->tx ? "TX" : "RX", ring->index); | ||
265 | } | ||
266 | } | ||
267 | #else | ||
268 | static inline | ||
269 | void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) | ||
270 | { | ||
271 | } | ||
272 | #endif /* DEBUG */ | ||
273 | |||
274 | /* Request a slot for usage. */ | ||
275 | static inline int request_slot(struct b43_dmaring *ring) | ||
276 | { | ||
277 | int slot; | ||
278 | |||
279 | B43_WARN_ON(!ring->tx); | ||
280 | B43_WARN_ON(ring->stopped); | ||
281 | B43_WARN_ON(free_slots(ring) == 0); | ||
282 | |||
283 | slot = next_slot(ring, ring->current_slot); | ||
284 | ring->current_slot = slot; | ||
285 | ring->used_slots++; | ||
286 | |||
287 | update_max_used_slots(ring, ring->used_slots); | ||
288 | |||
289 | return slot; | ||
290 | } | ||
291 | |||
292 | /* Mac80211-queue to b43-ring mapping */ | ||
293 | static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev, | ||
294 | int queue_priority) | ||
295 | { | ||
296 | struct b43_dmaring *ring; | ||
297 | |||
298 | /*FIXME: For now we always run on TX-ring-1 */ | ||
299 | return dev->dma.tx_ring1; | ||
300 | |||
301 | /* 0 = highest priority */ | ||
302 | switch (queue_priority) { | ||
303 | default: | ||
304 | B43_WARN_ON(1); | ||
305 | /* fallthrough */ | ||
306 | case 0: | ||
307 | ring = dev->dma.tx_ring3; | ||
308 | break; | ||
309 | case 1: | ||
310 | ring = dev->dma.tx_ring2; | ||
311 | break; | ||
312 | case 2: | ||
313 | ring = dev->dma.tx_ring1; | ||
314 | break; | ||
315 | case 3: | ||
316 | ring = dev->dma.tx_ring0; | ||
317 | break; | ||
318 | case 4: | ||
319 | ring = dev->dma.tx_ring4; | ||
320 | break; | ||
321 | case 5: | ||
322 | ring = dev->dma.tx_ring5; | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | return ring; | ||
327 | } | ||
328 | |||
329 | /* Bcm43xx-ring to mac80211-queue mapping */ | ||
330 | static inline int txring_to_priority(struct b43_dmaring *ring) | ||
331 | { | ||
332 | static const u8 idx_to_prio[] = { 3, 2, 1, 0, 4, 5, }; | ||
333 | |||
334 | /*FIXME: have only one queue, for now */ | ||
335 | return 0; | ||
336 | |||
337 | return idx_to_prio[ring->index]; | ||
338 | } | ||
339 | |||
340 | u16 b43_dmacontroller_base(int dma64bit, int controller_idx) | ||
341 | { | ||
342 | static const u16 map64[] = { | ||
343 | B43_MMIO_DMA64_BASE0, | ||
344 | B43_MMIO_DMA64_BASE1, | ||
345 | B43_MMIO_DMA64_BASE2, | ||
346 | B43_MMIO_DMA64_BASE3, | ||
347 | B43_MMIO_DMA64_BASE4, | ||
348 | B43_MMIO_DMA64_BASE5, | ||
349 | }; | ||
350 | static const u16 map32[] = { | ||
351 | B43_MMIO_DMA32_BASE0, | ||
352 | B43_MMIO_DMA32_BASE1, | ||
353 | B43_MMIO_DMA32_BASE2, | ||
354 | B43_MMIO_DMA32_BASE3, | ||
355 | B43_MMIO_DMA32_BASE4, | ||
356 | B43_MMIO_DMA32_BASE5, | ||
357 | }; | ||
358 | |||
359 | if (dma64bit) { | ||
360 | B43_WARN_ON(!(controller_idx >= 0 && | ||
361 | controller_idx < ARRAY_SIZE(map64))); | ||
362 | return map64[controller_idx]; | ||
363 | } | ||
364 | B43_WARN_ON(!(controller_idx >= 0 && | ||
365 | controller_idx < ARRAY_SIZE(map32))); | ||
366 | return map32[controller_idx]; | ||
367 | } | ||
368 | |||
369 | static inline | ||
370 | dma_addr_t map_descbuffer(struct b43_dmaring *ring, | ||
371 | unsigned char *buf, size_t len, int tx) | ||
372 | { | ||
373 | dma_addr_t dmaaddr; | ||
374 | |||
375 | if (tx) { | ||
376 | dmaaddr = dma_map_single(ring->dev->dev->dev, | ||
377 | buf, len, DMA_TO_DEVICE); | ||
378 | } else { | ||
379 | dmaaddr = dma_map_single(ring->dev->dev->dev, | ||
380 | buf, len, DMA_FROM_DEVICE); | ||
381 | } | ||
382 | |||
383 | return dmaaddr; | ||
384 | } | ||
385 | |||
386 | static inline | ||
387 | void unmap_descbuffer(struct b43_dmaring *ring, | ||
388 | dma_addr_t addr, size_t len, int tx) | ||
389 | { | ||
390 | if (tx) { | ||
391 | dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE); | ||
392 | } else { | ||
393 | dma_unmap_single(ring->dev->dev->dev, | ||
394 | addr, len, DMA_FROM_DEVICE); | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static inline | ||
399 | void sync_descbuffer_for_cpu(struct b43_dmaring *ring, | ||
400 | dma_addr_t addr, size_t len) | ||
401 | { | ||
402 | B43_WARN_ON(ring->tx); | ||
403 | dma_sync_single_for_cpu(ring->dev->dev->dev, | ||
404 | addr, len, DMA_FROM_DEVICE); | ||
405 | } | ||
406 | |||
407 | static inline | ||
408 | void sync_descbuffer_for_device(struct b43_dmaring *ring, | ||
409 | dma_addr_t addr, size_t len) | ||
410 | { | ||
411 | B43_WARN_ON(ring->tx); | ||
412 | dma_sync_single_for_device(ring->dev->dev->dev, | ||
413 | addr, len, DMA_FROM_DEVICE); | ||
414 | } | ||
415 | |||
416 | static inline | ||
417 | void free_descriptor_buffer(struct b43_dmaring *ring, | ||
418 | struct b43_dmadesc_meta *meta) | ||
419 | { | ||
420 | if (meta->skb) { | ||
421 | dev_kfree_skb_any(meta->skb); | ||
422 | meta->skb = NULL; | ||
423 | } | ||
424 | } | ||
425 | |||
426 | static int alloc_ringmemory(struct b43_dmaring *ring) | ||
427 | { | ||
428 | struct device *dev = ring->dev->dev->dev; | ||
429 | |||
430 | ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, | ||
431 | &(ring->dmabase), GFP_KERNEL); | ||
432 | if (!ring->descbase) { | ||
433 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | ||
434 | return -ENOMEM; | ||
435 | } | ||
436 | memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static void free_ringmemory(struct b43_dmaring *ring) | ||
442 | { | ||
443 | struct device *dev = ring->dev->dev->dev; | ||
444 | |||
445 | dma_free_coherent(dev, B43_DMA_RINGMEMSIZE, | ||
446 | ring->descbase, ring->dmabase); | ||
447 | } | ||
448 | |||
449 | /* Reset the RX DMA channel */ | ||
450 | int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | ||
451 | { | ||
452 | int i; | ||
453 | u32 value; | ||
454 | u16 offset; | ||
455 | |||
456 | might_sleep(); | ||
457 | |||
458 | offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; | ||
459 | b43_write32(dev, mmio_base + offset, 0); | ||
460 | for (i = 0; i < 10; i++) { | ||
461 | offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; | ||
462 | value = b43_read32(dev, mmio_base + offset); | ||
463 | if (dma64) { | ||
464 | value &= B43_DMA64_RXSTAT; | ||
465 | if (value == B43_DMA64_RXSTAT_DISABLED) { | ||
466 | i = -1; | ||
467 | break; | ||
468 | } | ||
469 | } else { | ||
470 | value &= B43_DMA32_RXSTATE; | ||
471 | if (value == B43_DMA32_RXSTAT_DISABLED) { | ||
472 | i = -1; | ||
473 | break; | ||
474 | } | ||
475 | } | ||
476 | msleep(1); | ||
477 | } | ||
478 | if (i != -1) { | ||
479 | b43err(dev->wl, "DMA RX reset timed out\n"); | ||
480 | return -ENODEV; | ||
481 | } | ||
482 | |||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | /* Reset the RX DMA channel */ | ||
487 | int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) | ||
488 | { | ||
489 | int i; | ||
490 | u32 value; | ||
491 | u16 offset; | ||
492 | |||
493 | might_sleep(); | ||
494 | |||
495 | for (i = 0; i < 10; i++) { | ||
496 | offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; | ||
497 | value = b43_read32(dev, mmio_base + offset); | ||
498 | if (dma64) { | ||
499 | value &= B43_DMA64_TXSTAT; | ||
500 | if (value == B43_DMA64_TXSTAT_DISABLED || | ||
501 | value == B43_DMA64_TXSTAT_IDLEWAIT || | ||
502 | value == B43_DMA64_TXSTAT_STOPPED) | ||
503 | break; | ||
504 | } else { | ||
505 | value &= B43_DMA32_TXSTATE; | ||
506 | if (value == B43_DMA32_TXSTAT_DISABLED || | ||
507 | value == B43_DMA32_TXSTAT_IDLEWAIT || | ||
508 | value == B43_DMA32_TXSTAT_STOPPED) | ||
509 | break; | ||
510 | } | ||
511 | msleep(1); | ||
512 | } | ||
513 | offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; | ||
514 | b43_write32(dev, mmio_base + offset, 0); | ||
515 | for (i = 0; i < 10; i++) { | ||
516 | offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; | ||
517 | value = b43_read32(dev, mmio_base + offset); | ||
518 | if (dma64) { | ||
519 | value &= B43_DMA64_TXSTAT; | ||
520 | if (value == B43_DMA64_TXSTAT_DISABLED) { | ||
521 | i = -1; | ||
522 | break; | ||
523 | } | ||
524 | } else { | ||
525 | value &= B43_DMA32_TXSTATE; | ||
526 | if (value == B43_DMA32_TXSTAT_DISABLED) { | ||
527 | i = -1; | ||
528 | break; | ||
529 | } | ||
530 | } | ||
531 | msleep(1); | ||
532 | } | ||
533 | if (i != -1) { | ||
534 | b43err(dev->wl, "DMA TX reset timed out\n"); | ||
535 | return -ENODEV; | ||
536 | } | ||
537 | /* ensure the reset is completed. */ | ||
538 | msleep(1); | ||
539 | |||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | static int setup_rx_descbuffer(struct b43_dmaring *ring, | ||
544 | struct b43_dmadesc_generic *desc, | ||
545 | struct b43_dmadesc_meta *meta, gfp_t gfp_flags) | ||
546 | { | ||
547 | struct b43_rxhdr_fw4 *rxhdr; | ||
548 | struct b43_hwtxstatus *txstat; | ||
549 | dma_addr_t dmaaddr; | ||
550 | struct sk_buff *skb; | ||
551 | |||
552 | B43_WARN_ON(ring->tx); | ||
553 | |||
554 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | ||
555 | if (unlikely(!skb)) | ||
556 | return -ENOMEM; | ||
557 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); | ||
558 | if (dma_mapping_error(dmaaddr)) { | ||
559 | /* ugh. try to realloc in zone_dma */ | ||
560 | gfp_flags |= GFP_DMA; | ||
561 | |||
562 | dev_kfree_skb_any(skb); | ||
563 | |||
564 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | ||
565 | if (unlikely(!skb)) | ||
566 | return -ENOMEM; | ||
567 | dmaaddr = map_descbuffer(ring, skb->data, | ||
568 | ring->rx_buffersize, 0); | ||
569 | } | ||
570 | |||
571 | if (dma_mapping_error(dmaaddr)) { | ||
572 | dev_kfree_skb_any(skb); | ||
573 | return -EIO; | ||
574 | } | ||
575 | |||
576 | meta->skb = skb; | ||
577 | meta->dmaaddr = dmaaddr; | ||
578 | ring->ops->fill_descriptor(ring, desc, dmaaddr, | ||
579 | ring->rx_buffersize, 0, 0, 0); | ||
580 | |||
581 | rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); | ||
582 | rxhdr->frame_len = 0; | ||
583 | txstat = (struct b43_hwtxstatus *)(skb->data); | ||
584 | txstat->cookie = 0; | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | /* Allocate the initial descbuffers. | ||
590 | * This is used for an RX ring only. | ||
591 | */ | ||
592 | static int alloc_initial_descbuffers(struct b43_dmaring *ring) | ||
593 | { | ||
594 | int i, err = -ENOMEM; | ||
595 | struct b43_dmadesc_generic *desc; | ||
596 | struct b43_dmadesc_meta *meta; | ||
597 | |||
598 | for (i = 0; i < ring->nr_slots; i++) { | ||
599 | desc = ring->ops->idx2desc(ring, i, &meta); | ||
600 | |||
601 | err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); | ||
602 | if (err) { | ||
603 | b43err(ring->dev->wl, | ||
604 | "Failed to allocate initial descbuffers\n"); | ||
605 | goto err_unwind; | ||
606 | } | ||
607 | } | ||
608 | mb(); | ||
609 | ring->used_slots = ring->nr_slots; | ||
610 | err = 0; | ||
611 | out: | ||
612 | return err; | ||
613 | |||
614 | err_unwind: | ||
615 | for (i--; i >= 0; i--) { | ||
616 | desc = ring->ops->idx2desc(ring, i, &meta); | ||
617 | |||
618 | unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); | ||
619 | dev_kfree_skb(meta->skb); | ||
620 | } | ||
621 | goto out; | ||
622 | } | ||
623 | |||
624 | /* Do initial setup of the DMA controller. | ||
625 | * Reset the controller, write the ring busaddress | ||
626 | * and switch the "enable" bit on. | ||
627 | */ | ||
628 | static int dmacontroller_setup(struct b43_dmaring *ring) | ||
629 | { | ||
630 | int err = 0; | ||
631 | u32 value; | ||
632 | u32 addrext; | ||
633 | u32 trans = ssb_dma_translation(ring->dev->dev); | ||
634 | |||
635 | if (ring->tx) { | ||
636 | if (ring->dma64) { | ||
637 | u64 ringbase = (u64) (ring->dmabase); | ||
638 | |||
639 | addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) | ||
640 | >> SSB_DMA_TRANSLATION_SHIFT; | ||
641 | value = B43_DMA64_TXENABLE; | ||
642 | value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) | ||
643 | & B43_DMA64_TXADDREXT_MASK; | ||
644 | b43_dma_write(ring, B43_DMA64_TXCTL, value); | ||
645 | b43_dma_write(ring, B43_DMA64_TXRINGLO, | ||
646 | (ringbase & 0xFFFFFFFF)); | ||
647 | b43_dma_write(ring, B43_DMA64_TXRINGHI, | ||
648 | ((ringbase >> 32) & | ||
649 | ~SSB_DMA_TRANSLATION_MASK) | ||
650 | | trans); | ||
651 | } else { | ||
652 | u32 ringbase = (u32) (ring->dmabase); | ||
653 | |||
654 | addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) | ||
655 | >> SSB_DMA_TRANSLATION_SHIFT; | ||
656 | value = B43_DMA32_TXENABLE; | ||
657 | value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) | ||
658 | & B43_DMA32_TXADDREXT_MASK; | ||
659 | b43_dma_write(ring, B43_DMA32_TXCTL, value); | ||
660 | b43_dma_write(ring, B43_DMA32_TXRING, | ||
661 | (ringbase & ~SSB_DMA_TRANSLATION_MASK) | ||
662 | | trans); | ||
663 | } | ||
664 | } else { | ||
665 | err = alloc_initial_descbuffers(ring); | ||
666 | if (err) | ||
667 | goto out; | ||
668 | if (ring->dma64) { | ||
669 | u64 ringbase = (u64) (ring->dmabase); | ||
670 | |||
671 | addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) | ||
672 | >> SSB_DMA_TRANSLATION_SHIFT; | ||
673 | value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); | ||
674 | value |= B43_DMA64_RXENABLE; | ||
675 | value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) | ||
676 | & B43_DMA64_RXADDREXT_MASK; | ||
677 | b43_dma_write(ring, B43_DMA64_RXCTL, value); | ||
678 | b43_dma_write(ring, B43_DMA64_RXRINGLO, | ||
679 | (ringbase & 0xFFFFFFFF)); | ||
680 | b43_dma_write(ring, B43_DMA64_RXRINGHI, | ||
681 | ((ringbase >> 32) & | ||
682 | ~SSB_DMA_TRANSLATION_MASK) | ||
683 | | trans); | ||
684 | b43_dma_write(ring, B43_DMA64_RXINDEX, 200); | ||
685 | } else { | ||
686 | u32 ringbase = (u32) (ring->dmabase); | ||
687 | |||
688 | addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) | ||
689 | >> SSB_DMA_TRANSLATION_SHIFT; | ||
690 | value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); | ||
691 | value |= B43_DMA32_RXENABLE; | ||
692 | value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) | ||
693 | & B43_DMA32_RXADDREXT_MASK; | ||
694 | b43_dma_write(ring, B43_DMA32_RXCTL, value); | ||
695 | b43_dma_write(ring, B43_DMA32_RXRING, | ||
696 | (ringbase & ~SSB_DMA_TRANSLATION_MASK) | ||
697 | | trans); | ||
698 | b43_dma_write(ring, B43_DMA32_RXINDEX, 200); | ||
699 | } | ||
700 | } | ||
701 | |||
702 | out: | ||
703 | return err; | ||
704 | } | ||
705 | |||
706 | /* Shutdown the DMA controller. */ | ||
707 | static void dmacontroller_cleanup(struct b43_dmaring *ring) | ||
708 | { | ||
709 | if (ring->tx) { | ||
710 | b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, | ||
711 | ring->dma64); | ||
712 | if (ring->dma64) { | ||
713 | b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); | ||
714 | b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); | ||
715 | } else | ||
716 | b43_dma_write(ring, B43_DMA32_TXRING, 0); | ||
717 | } else { | ||
718 | b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, | ||
719 | ring->dma64); | ||
720 | if (ring->dma64) { | ||
721 | b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); | ||
722 | b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); | ||
723 | } else | ||
724 | b43_dma_write(ring, B43_DMA32_RXRING, 0); | ||
725 | } | ||
726 | } | ||
727 | |||
728 | static void free_all_descbuffers(struct b43_dmaring *ring) | ||
729 | { | ||
730 | struct b43_dmadesc_generic *desc; | ||
731 | struct b43_dmadesc_meta *meta; | ||
732 | int i; | ||
733 | |||
734 | if (!ring->used_slots) | ||
735 | return; | ||
736 | for (i = 0; i < ring->nr_slots; i++) { | ||
737 | desc = ring->ops->idx2desc(ring, i, &meta); | ||
738 | |||
739 | if (!meta->skb) { | ||
740 | B43_WARN_ON(!ring->tx); | ||
741 | continue; | ||
742 | } | ||
743 | if (ring->tx) { | ||
744 | unmap_descbuffer(ring, meta->dmaaddr, | ||
745 | meta->skb->len, 1); | ||
746 | } else { | ||
747 | unmap_descbuffer(ring, meta->dmaaddr, | ||
748 | ring->rx_buffersize, 0); | ||
749 | } | ||
750 | free_descriptor_buffer(ring, meta); | ||
751 | } | ||
752 | } | ||
753 | |||
754 | static u64 supported_dma_mask(struct b43_wldev *dev) | ||
755 | { | ||
756 | u32 tmp; | ||
757 | u16 mmio_base; | ||
758 | |||
759 | tmp = b43_read32(dev, SSB_TMSHIGH); | ||
760 | if (tmp & SSB_TMSHIGH_DMA64) | ||
761 | return DMA_64BIT_MASK; | ||
762 | mmio_base = b43_dmacontroller_base(0, 0); | ||
763 | b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); | ||
764 | tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); | ||
765 | if (tmp & B43_DMA32_TXADDREXT_MASK) | ||
766 | return DMA_32BIT_MASK; | ||
767 | |||
768 | return DMA_30BIT_MASK; | ||
769 | } | ||
770 | |||
771 | /* Main initialization function. */ | ||
772 | static | ||
773 | struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | ||
774 | int controller_index, | ||
775 | int for_tx, int dma64) | ||
776 | { | ||
777 | struct b43_dmaring *ring; | ||
778 | int err; | ||
779 | int nr_slots; | ||
780 | dma_addr_t dma_test; | ||
781 | |||
782 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
783 | if (!ring) | ||
784 | goto out; | ||
785 | |||
786 | nr_slots = B43_RXRING_SLOTS; | ||
787 | if (for_tx) | ||
788 | nr_slots = B43_TXRING_SLOTS; | ||
789 | |||
790 | ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta), | ||
791 | GFP_KERNEL); | ||
792 | if (!ring->meta) | ||
793 | goto err_kfree_ring; | ||
794 | if (for_tx) { | ||
795 | ring->txhdr_cache = kcalloc(nr_slots, | ||
796 | sizeof(struct b43_txhdr_fw4), | ||
797 | GFP_KERNEL); | ||
798 | if (!ring->txhdr_cache) | ||
799 | goto err_kfree_meta; | ||
800 | |||
801 | /* test for ability to dma to txhdr_cache */ | ||
802 | dma_test = dma_map_single(dev->dev->dev, | ||
803 | ring->txhdr_cache, | ||
804 | sizeof(struct b43_txhdr_fw4), | ||
805 | DMA_TO_DEVICE); | ||
806 | |||
807 | if (dma_mapping_error(dma_test)) { | ||
808 | /* ugh realloc */ | ||
809 | kfree(ring->txhdr_cache); | ||
810 | ring->txhdr_cache = kcalloc(nr_slots, | ||
811 | sizeof(struct | ||
812 | b43_txhdr_fw4), | ||
813 | GFP_KERNEL | GFP_DMA); | ||
814 | if (!ring->txhdr_cache) | ||
815 | goto err_kfree_meta; | ||
816 | |||
817 | dma_test = dma_map_single(dev->dev->dev, | ||
818 | ring->txhdr_cache, | ||
819 | sizeof(struct b43_txhdr_fw4), | ||
820 | DMA_TO_DEVICE); | ||
821 | |||
822 | if (dma_mapping_error(dma_test)) | ||
823 | goto err_kfree_txhdr_cache; | ||
824 | } | ||
825 | |||
826 | dma_unmap_single(dev->dev->dev, | ||
827 | dma_test, sizeof(struct b43_txhdr_fw4), | ||
828 | DMA_TO_DEVICE); | ||
829 | } | ||
830 | |||
831 | ring->dev = dev; | ||
832 | ring->nr_slots = nr_slots; | ||
833 | ring->mmio_base = b43_dmacontroller_base(dma64, controller_index); | ||
834 | ring->index = controller_index; | ||
835 | ring->dma64 = !!dma64; | ||
836 | if (dma64) | ||
837 | ring->ops = &dma64_ops; | ||
838 | else | ||
839 | ring->ops = &dma32_ops; | ||
840 | if (for_tx) { | ||
841 | ring->tx = 1; | ||
842 | ring->current_slot = -1; | ||
843 | } else { | ||
844 | if (ring->index == 0) { | ||
845 | ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; | ||
846 | ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; | ||
847 | } else if (ring->index == 3) { | ||
848 | ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; | ||
849 | ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; | ||
850 | } else | ||
851 | B43_WARN_ON(1); | ||
852 | } | ||
853 | spin_lock_init(&ring->lock); | ||
854 | #ifdef CONFIG_B43_DEBUG | ||
855 | ring->last_injected_overflow = jiffies; | ||
856 | #endif | ||
857 | |||
858 | err = alloc_ringmemory(ring); | ||
859 | if (err) | ||
860 | goto err_kfree_txhdr_cache; | ||
861 | err = dmacontroller_setup(ring); | ||
862 | if (err) | ||
863 | goto err_free_ringmemory; | ||
864 | |||
865 | out: | ||
866 | return ring; | ||
867 | |||
868 | err_free_ringmemory: | ||
869 | free_ringmemory(ring); | ||
870 | err_kfree_txhdr_cache: | ||
871 | kfree(ring->txhdr_cache); | ||
872 | err_kfree_meta: | ||
873 | kfree(ring->meta); | ||
874 | err_kfree_ring: | ||
875 | kfree(ring); | ||
876 | ring = NULL; | ||
877 | goto out; | ||
878 | } | ||
879 | |||
880 | /* Main cleanup function. */ | ||
881 | static void b43_destroy_dmaring(struct b43_dmaring *ring) | ||
882 | { | ||
883 | if (!ring) | ||
884 | return; | ||
885 | |||
886 | b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", | ||
887 | (ring->dma64) ? "64" : "32", | ||
888 | ring->mmio_base, | ||
889 | (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); | ||
890 | /* Device IRQs are disabled prior entering this function, | ||
891 | * so no need to take care of concurrency with rx handler stuff. | ||
892 | */ | ||
893 | dmacontroller_cleanup(ring); | ||
894 | free_all_descbuffers(ring); | ||
895 | free_ringmemory(ring); | ||
896 | |||
897 | kfree(ring->txhdr_cache); | ||
898 | kfree(ring->meta); | ||
899 | kfree(ring); | ||
900 | } | ||
901 | |||
902 | void b43_dma_free(struct b43_wldev *dev) | ||
903 | { | ||
904 | struct b43_dma *dma; | ||
905 | |||
906 | if (b43_using_pio(dev)) | ||
907 | return; | ||
908 | dma = &dev->dma; | ||
909 | |||
910 | b43_destroy_dmaring(dma->rx_ring3); | ||
911 | dma->rx_ring3 = NULL; | ||
912 | b43_destroy_dmaring(dma->rx_ring0); | ||
913 | dma->rx_ring0 = NULL; | ||
914 | |||
915 | b43_destroy_dmaring(dma->tx_ring5); | ||
916 | dma->tx_ring5 = NULL; | ||
917 | b43_destroy_dmaring(dma->tx_ring4); | ||
918 | dma->tx_ring4 = NULL; | ||
919 | b43_destroy_dmaring(dma->tx_ring3); | ||
920 | dma->tx_ring3 = NULL; | ||
921 | b43_destroy_dmaring(dma->tx_ring2); | ||
922 | dma->tx_ring2 = NULL; | ||
923 | b43_destroy_dmaring(dma->tx_ring1); | ||
924 | dma->tx_ring1 = NULL; | ||
925 | b43_destroy_dmaring(dma->tx_ring0); | ||
926 | dma->tx_ring0 = NULL; | ||
927 | } | ||
928 | |||
929 | int b43_dma_init(struct b43_wldev *dev) | ||
930 | { | ||
931 | struct b43_dma *dma = &dev->dma; | ||
932 | struct b43_dmaring *ring; | ||
933 | int err; | ||
934 | u64 dmamask; | ||
935 | int dma64 = 0; | ||
936 | |||
937 | dmamask = supported_dma_mask(dev); | ||
938 | if (dmamask == DMA_64BIT_MASK) | ||
939 | dma64 = 1; | ||
940 | |||
941 | err = ssb_dma_set_mask(dev->dev, dmamask); | ||
942 | if (err) { | ||
943 | #ifdef B43_PIO | ||
944 | b43warn(dev->wl, "DMA for this device not supported. " | ||
945 | "Falling back to PIO\n"); | ||
946 | dev->__using_pio = 1; | ||
947 | return -EAGAIN; | ||
948 | #else | ||
949 | b43err(dev->wl, "DMA for this device not supported and " | ||
950 | "no PIO support compiled in\n"); | ||
951 | return -EOPNOTSUPP; | ||
952 | #endif | ||
953 | } | ||
954 | |||
955 | err = -ENOMEM; | ||
956 | /* setup TX DMA channels. */ | ||
957 | ring = b43_setup_dmaring(dev, 0, 1, dma64); | ||
958 | if (!ring) | ||
959 | goto out; | ||
960 | dma->tx_ring0 = ring; | ||
961 | |||
962 | ring = b43_setup_dmaring(dev, 1, 1, dma64); | ||
963 | if (!ring) | ||
964 | goto err_destroy_tx0; | ||
965 | dma->tx_ring1 = ring; | ||
966 | |||
967 | ring = b43_setup_dmaring(dev, 2, 1, dma64); | ||
968 | if (!ring) | ||
969 | goto err_destroy_tx1; | ||
970 | dma->tx_ring2 = ring; | ||
971 | |||
972 | ring = b43_setup_dmaring(dev, 3, 1, dma64); | ||
973 | if (!ring) | ||
974 | goto err_destroy_tx2; | ||
975 | dma->tx_ring3 = ring; | ||
976 | |||
977 | ring = b43_setup_dmaring(dev, 4, 1, dma64); | ||
978 | if (!ring) | ||
979 | goto err_destroy_tx3; | ||
980 | dma->tx_ring4 = ring; | ||
981 | |||
982 | ring = b43_setup_dmaring(dev, 5, 1, dma64); | ||
983 | if (!ring) | ||
984 | goto err_destroy_tx4; | ||
985 | dma->tx_ring5 = ring; | ||
986 | |||
987 | /* setup RX DMA channels. */ | ||
988 | ring = b43_setup_dmaring(dev, 0, 0, dma64); | ||
989 | if (!ring) | ||
990 | goto err_destroy_tx5; | ||
991 | dma->rx_ring0 = ring; | ||
992 | |||
993 | if (dev->dev->id.revision < 5) { | ||
994 | ring = b43_setup_dmaring(dev, 3, 0, dma64); | ||
995 | if (!ring) | ||
996 | goto err_destroy_rx0; | ||
997 | dma->rx_ring3 = ring; | ||
998 | } | ||
999 | |||
1000 | b43dbg(dev->wl, "%d-bit DMA initialized\n", | ||
1001 | (dmamask == DMA_64BIT_MASK) ? 64 : | ||
1002 | (dmamask == DMA_32BIT_MASK) ? 32 : 30); | ||
1003 | err = 0; | ||
1004 | out: | ||
1005 | return err; | ||
1006 | |||
1007 | err_destroy_rx0: | ||
1008 | b43_destroy_dmaring(dma->rx_ring0); | ||
1009 | dma->rx_ring0 = NULL; | ||
1010 | err_destroy_tx5: | ||
1011 | b43_destroy_dmaring(dma->tx_ring5); | ||
1012 | dma->tx_ring5 = NULL; | ||
1013 | err_destroy_tx4: | ||
1014 | b43_destroy_dmaring(dma->tx_ring4); | ||
1015 | dma->tx_ring4 = NULL; | ||
1016 | err_destroy_tx3: | ||
1017 | b43_destroy_dmaring(dma->tx_ring3); | ||
1018 | dma->tx_ring3 = NULL; | ||
1019 | err_destroy_tx2: | ||
1020 | b43_destroy_dmaring(dma->tx_ring2); | ||
1021 | dma->tx_ring2 = NULL; | ||
1022 | err_destroy_tx1: | ||
1023 | b43_destroy_dmaring(dma->tx_ring1); | ||
1024 | dma->tx_ring1 = NULL; | ||
1025 | err_destroy_tx0: | ||
1026 | b43_destroy_dmaring(dma->tx_ring0); | ||
1027 | dma->tx_ring0 = NULL; | ||
1028 | goto out; | ||
1029 | } | ||
1030 | |||
1031 | /* Generate a cookie for the TX header. */ | ||
1032 | static u16 generate_cookie(struct b43_dmaring *ring, int slot) | ||
1033 | { | ||
1034 | u16 cookie = 0x1000; | ||
1035 | |||
1036 | /* Use the upper 4 bits of the cookie as | ||
1037 | * DMA controller ID and store the slot number | ||
1038 | * in the lower 12 bits. | ||
1039 | * Note that the cookie must never be 0, as this | ||
1040 | * is a special value used in RX path. | ||
1041 | */ | ||
1042 | switch (ring->index) { | ||
1043 | case 0: | ||
1044 | cookie = 0xA000; | ||
1045 | break; | ||
1046 | case 1: | ||
1047 | cookie = 0xB000; | ||
1048 | break; | ||
1049 | case 2: | ||
1050 | cookie = 0xC000; | ||
1051 | break; | ||
1052 | case 3: | ||
1053 | cookie = 0xD000; | ||
1054 | break; | ||
1055 | case 4: | ||
1056 | cookie = 0xE000; | ||
1057 | break; | ||
1058 | case 5: | ||
1059 | cookie = 0xF000; | ||
1060 | break; | ||
1061 | } | ||
1062 | B43_WARN_ON(slot & ~0x0FFF); | ||
1063 | cookie |= (u16) slot; | ||
1064 | |||
1065 | return cookie; | ||
1066 | } | ||
1067 | |||
1068 | /* Inspect a cookie and find out to which controller/slot it belongs. */ | ||
1069 | static | ||
1070 | struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) | ||
1071 | { | ||
1072 | struct b43_dma *dma = &dev->dma; | ||
1073 | struct b43_dmaring *ring = NULL; | ||
1074 | |||
1075 | switch (cookie & 0xF000) { | ||
1076 | case 0xA000: | ||
1077 | ring = dma->tx_ring0; | ||
1078 | break; | ||
1079 | case 0xB000: | ||
1080 | ring = dma->tx_ring1; | ||
1081 | break; | ||
1082 | case 0xC000: | ||
1083 | ring = dma->tx_ring2; | ||
1084 | break; | ||
1085 | case 0xD000: | ||
1086 | ring = dma->tx_ring3; | ||
1087 | break; | ||
1088 | case 0xE000: | ||
1089 | ring = dma->tx_ring4; | ||
1090 | break; | ||
1091 | case 0xF000: | ||
1092 | ring = dma->tx_ring5; | ||
1093 | break; | ||
1094 | default: | ||
1095 | B43_WARN_ON(1); | ||
1096 | } | ||
1097 | *slot = (cookie & 0x0FFF); | ||
1098 | B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); | ||
1099 | |||
1100 | return ring; | ||
1101 | } | ||
1102 | |||
1103 | static int dma_tx_fragment(struct b43_dmaring *ring, | ||
1104 | struct sk_buff *skb, | ||
1105 | struct ieee80211_tx_control *ctl) | ||
1106 | { | ||
1107 | const struct b43_dma_ops *ops = ring->ops; | ||
1108 | u8 *header; | ||
1109 | int slot; | ||
1110 | int err; | ||
1111 | struct b43_dmadesc_generic *desc; | ||
1112 | struct b43_dmadesc_meta *meta; | ||
1113 | struct b43_dmadesc_meta *meta_hdr; | ||
1114 | struct sk_buff *bounce_skb; | ||
1115 | |||
1116 | #define SLOTS_PER_PACKET 2 | ||
1117 | B43_WARN_ON(skb_shinfo(skb)->nr_frags); | ||
1118 | |||
1119 | /* Get a slot for the header. */ | ||
1120 | slot = request_slot(ring); | ||
1121 | desc = ops->idx2desc(ring, slot, &meta_hdr); | ||
1122 | memset(meta_hdr, 0, sizeof(*meta_hdr)); | ||
1123 | |||
1124 | header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]); | ||
1125 | b43_generate_txhdr(ring->dev, header, | ||
1126 | skb->data, skb->len, ctl, | ||
1127 | generate_cookie(ring, slot)); | ||
1128 | |||
1129 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | ||
1130 | sizeof(struct b43_txhdr_fw4), 1); | ||
1131 | if (dma_mapping_error(meta_hdr->dmaaddr)) | ||
1132 | return -EIO; | ||
1133 | ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, | ||
1134 | sizeof(struct b43_txhdr_fw4), 1, 0, 0); | ||
1135 | |||
1136 | /* Get a slot for the payload. */ | ||
1137 | slot = request_slot(ring); | ||
1138 | desc = ops->idx2desc(ring, slot, &meta); | ||
1139 | memset(meta, 0, sizeof(*meta)); | ||
1140 | |||
1141 | memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); | ||
1142 | meta->skb = skb; | ||
1143 | meta->is_last_fragment = 1; | ||
1144 | |||
1145 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | ||
1146 | /* create a bounce buffer in zone_dma on mapping failure. */ | ||
1147 | if (dma_mapping_error(meta->dmaaddr)) { | ||
1148 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | ||
1149 | if (!bounce_skb) { | ||
1150 | err = -ENOMEM; | ||
1151 | goto out_unmap_hdr; | ||
1152 | } | ||
1153 | |||
1154 | memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); | ||
1155 | dev_kfree_skb_any(skb); | ||
1156 | skb = bounce_skb; | ||
1157 | meta->skb = skb; | ||
1158 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | ||
1159 | if (dma_mapping_error(meta->dmaaddr)) { | ||
1160 | err = -EIO; | ||
1161 | goto out_free_bounce; | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); | ||
1166 | |||
1167 | /* Now transfer the whole frame. */ | ||
1168 | wmb(); | ||
1169 | ops->poke_tx(ring, next_slot(ring, slot)); | ||
1170 | return 0; | ||
1171 | |||
1172 | out_free_bounce: | ||
1173 | dev_kfree_skb_any(skb); | ||
1174 | out_unmap_hdr: | ||
1175 | unmap_descbuffer(ring, meta_hdr->dmaaddr, | ||
1176 | sizeof(struct b43_txhdr_fw4), 1); | ||
1177 | return err; | ||
1178 | } | ||
1179 | |||
1180 | static inline int should_inject_overflow(struct b43_dmaring *ring) | ||
1181 | { | ||
1182 | #ifdef CONFIG_B43_DEBUG | ||
1183 | if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { | ||
1184 | /* Check if we should inject another ringbuffer overflow | ||
1185 | * to test handling of this situation in the stack. */ | ||
1186 | unsigned long next_overflow; | ||
1187 | |||
1188 | next_overflow = ring->last_injected_overflow + HZ; | ||
1189 | if (time_after(jiffies, next_overflow)) { | ||
1190 | ring->last_injected_overflow = jiffies; | ||
1191 | b43dbg(ring->dev->wl, | ||
1192 | "Injecting TX ring overflow on " | ||
1193 | "DMA controller %d\n", ring->index); | ||
1194 | return 1; | ||
1195 | } | ||
1196 | } | ||
1197 | #endif /* CONFIG_B43_DEBUG */ | ||
1198 | return 0; | ||
1199 | } | ||
1200 | |||
1201 | int b43_dma_tx(struct b43_wldev *dev, | ||
1202 | struct sk_buff *skb, struct ieee80211_tx_control *ctl) | ||
1203 | { | ||
1204 | struct b43_dmaring *ring; | ||
1205 | int err = 0; | ||
1206 | unsigned long flags; | ||
1207 | |||
1208 | ring = priority_to_txring(dev, ctl->queue); | ||
1209 | spin_lock_irqsave(&ring->lock, flags); | ||
1210 | B43_WARN_ON(!ring->tx); | ||
1211 | if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { | ||
1212 | b43warn(dev->wl, "DMA queue overflow\n"); | ||
1213 | err = -ENOSPC; | ||
1214 | goto out_unlock; | ||
1215 | } | ||
1216 | /* Check if the queue was stopped in mac80211, | ||
1217 | * but we got called nevertheless. | ||
1218 | * That would be a mac80211 bug. */ | ||
1219 | B43_WARN_ON(ring->stopped); | ||
1220 | |||
1221 | err = dma_tx_fragment(ring, skb, ctl); | ||
1222 | if (unlikely(err)) { | ||
1223 | b43err(dev->wl, "DMA tx mapping failure\n"); | ||
1224 | goto out_unlock; | ||
1225 | } | ||
1226 | ring->nr_tx_packets++; | ||
1227 | if ((free_slots(ring) < SLOTS_PER_PACKET) || | ||
1228 | should_inject_overflow(ring)) { | ||
1229 | /* This TX ring is full. */ | ||
1230 | ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); | ||
1231 | ring->stopped = 1; | ||
1232 | if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { | ||
1233 | b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); | ||
1234 | } | ||
1235 | } | ||
1236 | out_unlock: | ||
1237 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1238 | |||
1239 | return err; | ||
1240 | } | ||
1241 | |||
1242 | void b43_dma_handle_txstatus(struct b43_wldev *dev, | ||
1243 | const struct b43_txstatus *status) | ||
1244 | { | ||
1245 | const struct b43_dma_ops *ops; | ||
1246 | struct b43_dmaring *ring; | ||
1247 | struct b43_dmadesc_generic *desc; | ||
1248 | struct b43_dmadesc_meta *meta; | ||
1249 | int slot; | ||
1250 | |||
1251 | ring = parse_cookie(dev, status->cookie, &slot); | ||
1252 | if (unlikely(!ring)) | ||
1253 | return; | ||
1254 | B43_WARN_ON(!irqs_disabled()); | ||
1255 | spin_lock(&ring->lock); | ||
1256 | |||
1257 | B43_WARN_ON(!ring->tx); | ||
1258 | ops = ring->ops; | ||
1259 | while (1) { | ||
1260 | B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); | ||
1261 | desc = ops->idx2desc(ring, slot, &meta); | ||
1262 | |||
1263 | if (meta->skb) | ||
1264 | unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, | ||
1265 | 1); | ||
1266 | else | ||
1267 | unmap_descbuffer(ring, meta->dmaaddr, | ||
1268 | sizeof(struct b43_txhdr_fw4), 1); | ||
1269 | |||
1270 | if (meta->is_last_fragment) { | ||
1271 | B43_WARN_ON(!meta->skb); | ||
1272 | /* Call back to inform the ieee80211 subsystem about the | ||
1273 | * status of the transmission. | ||
1274 | * Some fields of txstat are already filled in dma_tx(). | ||
1275 | */ | ||
1276 | if (status->acked) { | ||
1277 | meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; | ||
1278 | } else { | ||
1279 | if (!(meta->txstat.control.flags | ||
1280 | & IEEE80211_TXCTL_NO_ACK)) | ||
1281 | meta->txstat.excessive_retries = 1; | ||
1282 | } | ||
1283 | if (status->frame_count == 0) { | ||
1284 | /* The frame was not transmitted at all. */ | ||
1285 | meta->txstat.retry_count = 0; | ||
1286 | } else | ||
1287 | meta->txstat.retry_count = status->frame_count - 1; | ||
1288 | ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, | ||
1289 | &(meta->txstat)); | ||
1290 | /* skb is freed by ieee80211_tx_status_irqsafe() */ | ||
1291 | meta->skb = NULL; | ||
1292 | } else { | ||
1293 | /* No need to call free_descriptor_buffer here, as | ||
1294 | * this is only the txhdr, which is not allocated. | ||
1295 | */ | ||
1296 | B43_WARN_ON(meta->skb); | ||
1297 | } | ||
1298 | |||
1299 | /* Everything unmapped and free'd. So it's not used anymore. */ | ||
1300 | ring->used_slots--; | ||
1301 | |||
1302 | if (meta->is_last_fragment) | ||
1303 | break; | ||
1304 | slot = next_slot(ring, slot); | ||
1305 | } | ||
1306 | dev->stats.last_tx = jiffies; | ||
1307 | if (ring->stopped) { | ||
1308 | B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); | ||
1309 | ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); | ||
1310 | ring->stopped = 0; | ||
1311 | if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { | ||
1312 | b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); | ||
1313 | } | ||
1314 | } | ||
1315 | |||
1316 | spin_unlock(&ring->lock); | ||
1317 | } | ||
1318 | |||
1319 | void b43_dma_get_tx_stats(struct b43_wldev *dev, | ||
1320 | struct ieee80211_tx_queue_stats *stats) | ||
1321 | { | ||
1322 | const int nr_queues = dev->wl->hw->queues; | ||
1323 | struct b43_dmaring *ring; | ||
1324 | struct ieee80211_tx_queue_stats_data *data; | ||
1325 | unsigned long flags; | ||
1326 | int i; | ||
1327 | |||
1328 | for (i = 0; i < nr_queues; i++) { | ||
1329 | data = &(stats->data[i]); | ||
1330 | ring = priority_to_txring(dev, i); | ||
1331 | |||
1332 | spin_lock_irqsave(&ring->lock, flags); | ||
1333 | data->len = ring->used_slots / SLOTS_PER_PACKET; | ||
1334 | data->limit = ring->nr_slots / SLOTS_PER_PACKET; | ||
1335 | data->count = ring->nr_tx_packets; | ||
1336 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1337 | } | ||
1338 | } | ||
1339 | |||
1340 | static void dma_rx(struct b43_dmaring *ring, int *slot) | ||
1341 | { | ||
1342 | const struct b43_dma_ops *ops = ring->ops; | ||
1343 | struct b43_dmadesc_generic *desc; | ||
1344 | struct b43_dmadesc_meta *meta; | ||
1345 | struct b43_rxhdr_fw4 *rxhdr; | ||
1346 | struct sk_buff *skb; | ||
1347 | u16 len; | ||
1348 | int err; | ||
1349 | dma_addr_t dmaaddr; | ||
1350 | |||
1351 | desc = ops->idx2desc(ring, *slot, &meta); | ||
1352 | |||
1353 | sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); | ||
1354 | skb = meta->skb; | ||
1355 | |||
1356 | if (ring->index == 3) { | ||
1357 | /* We received an xmit status. */ | ||
1358 | struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data; | ||
1359 | int i = 0; | ||
1360 | |||
1361 | while (hw->cookie == 0) { | ||
1362 | if (i > 100) | ||
1363 | break; | ||
1364 | i++; | ||
1365 | udelay(2); | ||
1366 | barrier(); | ||
1367 | } | ||
1368 | b43_handle_hwtxstatus(ring->dev, hw); | ||
1369 | /* recycle the descriptor buffer. */ | ||
1370 | sync_descbuffer_for_device(ring, meta->dmaaddr, | ||
1371 | ring->rx_buffersize); | ||
1372 | |||
1373 | return; | ||
1374 | } | ||
1375 | rxhdr = (struct b43_rxhdr_fw4 *)skb->data; | ||
1376 | len = le16_to_cpu(rxhdr->frame_len); | ||
1377 | if (len == 0) { | ||
1378 | int i = 0; | ||
1379 | |||
1380 | do { | ||
1381 | udelay(2); | ||
1382 | barrier(); | ||
1383 | len = le16_to_cpu(rxhdr->frame_len); | ||
1384 | } while (len == 0 && i++ < 5); | ||
1385 | if (unlikely(len == 0)) { | ||
1386 | /* recycle the descriptor buffer. */ | ||
1387 | sync_descbuffer_for_device(ring, meta->dmaaddr, | ||
1388 | ring->rx_buffersize); | ||
1389 | goto drop; | ||
1390 | } | ||
1391 | } | ||
1392 | if (unlikely(len > ring->rx_buffersize)) { | ||
1393 | /* The data did not fit into one descriptor buffer | ||
1394 | * and is split over multiple buffers. | ||
1395 | * This should never happen, as we try to allocate buffers | ||
1396 | * big enough. So simply ignore this packet. | ||
1397 | */ | ||
1398 | int cnt = 0; | ||
1399 | s32 tmp = len; | ||
1400 | |||
1401 | while (1) { | ||
1402 | desc = ops->idx2desc(ring, *slot, &meta); | ||
1403 | /* recycle the descriptor buffer. */ | ||
1404 | sync_descbuffer_for_device(ring, meta->dmaaddr, | ||
1405 | ring->rx_buffersize); | ||
1406 | *slot = next_slot(ring, *slot); | ||
1407 | cnt++; | ||
1408 | tmp -= ring->rx_buffersize; | ||
1409 | if (tmp <= 0) | ||
1410 | break; | ||
1411 | } | ||
1412 | b43err(ring->dev->wl, "DMA RX buffer too small " | ||
1413 | "(len: %u, buffer: %u, nr-dropped: %d)\n", | ||
1414 | len, ring->rx_buffersize, cnt); | ||
1415 | goto drop; | ||
1416 | } | ||
1417 | |||
1418 | dmaaddr = meta->dmaaddr; | ||
1419 | err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); | ||
1420 | if (unlikely(err)) { | ||
1421 | b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); | ||
1422 | sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); | ||
1423 | goto drop; | ||
1424 | } | ||
1425 | |||
1426 | unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); | ||
1427 | skb_put(skb, len + ring->frameoffset); | ||
1428 | skb_pull(skb, ring->frameoffset); | ||
1429 | |||
1430 | b43_rx(ring->dev, skb, rxhdr); | ||
1431 | drop: | ||
1432 | return; | ||
1433 | } | ||
1434 | |||
1435 | void b43_dma_rx(struct b43_dmaring *ring) | ||
1436 | { | ||
1437 | const struct b43_dma_ops *ops = ring->ops; | ||
1438 | int slot, current_slot; | ||
1439 | int used_slots = 0; | ||
1440 | |||
1441 | B43_WARN_ON(ring->tx); | ||
1442 | current_slot = ops->get_current_rxslot(ring); | ||
1443 | B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); | ||
1444 | |||
1445 | slot = ring->current_slot; | ||
1446 | for (; slot != current_slot; slot = next_slot(ring, slot)) { | ||
1447 | dma_rx(ring, &slot); | ||
1448 | update_max_used_slots(ring, ++used_slots); | ||
1449 | } | ||
1450 | ops->set_current_rxslot(ring, slot); | ||
1451 | ring->current_slot = slot; | ||
1452 | } | ||
1453 | |||
1454 | static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) | ||
1455 | { | ||
1456 | unsigned long flags; | ||
1457 | |||
1458 | spin_lock_irqsave(&ring->lock, flags); | ||
1459 | B43_WARN_ON(!ring->tx); | ||
1460 | ring->ops->tx_suspend(ring); | ||
1461 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1462 | } | ||
1463 | |||
1464 | static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) | ||
1465 | { | ||
1466 | unsigned long flags; | ||
1467 | |||
1468 | spin_lock_irqsave(&ring->lock, flags); | ||
1469 | B43_WARN_ON(!ring->tx); | ||
1470 | ring->ops->tx_resume(ring); | ||
1471 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1472 | } | ||
1473 | |||
1474 | void b43_dma_tx_suspend(struct b43_wldev *dev) | ||
1475 | { | ||
1476 | b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); | ||
1477 | b43_dma_tx_suspend_ring(dev->dma.tx_ring0); | ||
1478 | b43_dma_tx_suspend_ring(dev->dma.tx_ring1); | ||
1479 | b43_dma_tx_suspend_ring(dev->dma.tx_ring2); | ||
1480 | b43_dma_tx_suspend_ring(dev->dma.tx_ring3); | ||
1481 | b43_dma_tx_suspend_ring(dev->dma.tx_ring4); | ||
1482 | b43_dma_tx_suspend_ring(dev->dma.tx_ring5); | ||
1483 | } | ||
1484 | |||
1485 | void b43_dma_tx_resume(struct b43_wldev *dev) | ||
1486 | { | ||
1487 | b43_dma_tx_resume_ring(dev->dma.tx_ring5); | ||
1488 | b43_dma_tx_resume_ring(dev->dma.tx_ring4); | ||
1489 | b43_dma_tx_resume_ring(dev->dma.tx_ring3); | ||
1490 | b43_dma_tx_resume_ring(dev->dma.tx_ring2); | ||
1491 | b43_dma_tx_resume_ring(dev->dma.tx_ring1); | ||
1492 | b43_dma_tx_resume_ring(dev->dma.tx_ring0); | ||
1493 | b43_power_saving_ctl_bits(dev, 0); | ||
1494 | } | ||