aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/pio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/b43/pio.c')
-rw-r--r--drivers/net/wireless/b43/pio.c835
1 files changed, 835 insertions, 0 deletions
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
new file mode 100644
index 000000000000..e73769ac027f
--- /dev/null
+++ b/drivers/net/wireless/b43/pio.c
@@ -0,0 +1,835 @@
1/*
2
3 Broadcom B43 wireless driver
4
5 PIO data transfer
6
7 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26#include "b43.h"
27#include "pio.h"
28#include "dma.h"
29#include "main.h"
30#include "xmit.h"
31
32#include <linux/delay.h>
33
34
35static void b43_pio_rx_work(struct work_struct *work);
36
37
38static u16 generate_cookie(struct b43_pio_txqueue *q,
39 struct b43_pio_txpacket *pack)
40{
41 u16 cookie;
42
43 /* Use the upper 4 bits of the cookie as
44 * PIO controller ID and store the packet index number
45 * in the lower 12 bits.
46 * Note that the cookie must never be 0, as this
47 * is a special value used in RX path.
48 * It can also not be 0xFFFF because that is special
49 * for multicast frames.
50 */
51 cookie = (((u16)q->index + 1) << 12);
52 cookie |= pack->index;
53
54 return cookie;
55}
56
57static
58struct b43_pio_txqueue * parse_cookie(struct b43_wldev *dev,
59 u16 cookie,
60 struct b43_pio_txpacket **pack)
61{
62 struct b43_pio *pio = &dev->pio;
63 struct b43_pio_txqueue *q = NULL;
64 unsigned int pack_index;
65
66 switch (cookie & 0xF000) {
67 case 0x1000:
68 q = pio->tx_queue_AC_BK;
69 break;
70 case 0x2000:
71 q = pio->tx_queue_AC_BE;
72 break;
73 case 0x3000:
74 q = pio->tx_queue_AC_VI;
75 break;
76 case 0x4000:
77 q = pio->tx_queue_AC_VO;
78 break;
79 case 0x5000:
80 q = pio->tx_queue_mcast;
81 break;
82 }
83 if (B43_WARN_ON(!q))
84 return NULL;
85 pack_index = (cookie & 0x0FFF);
86 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
87 return NULL;
88 *pack = &q->packets[pack_index];
89
90 return q;
91}
92
93static u16 index_to_pioqueue_base(struct b43_wldev *dev,
94 unsigned int index)
95{
96 static const u16 bases[] = {
97 B43_MMIO_PIO_BASE0,
98 B43_MMIO_PIO_BASE1,
99 B43_MMIO_PIO_BASE2,
100 B43_MMIO_PIO_BASE3,
101 B43_MMIO_PIO_BASE4,
102 B43_MMIO_PIO_BASE5,
103 B43_MMIO_PIO_BASE6,
104 B43_MMIO_PIO_BASE7,
105 };
106 static const u16 bases_rev11[] = {
107 B43_MMIO_PIO11_BASE0,
108 B43_MMIO_PIO11_BASE1,
109 B43_MMIO_PIO11_BASE2,
110 B43_MMIO_PIO11_BASE3,
111 B43_MMIO_PIO11_BASE4,
112 B43_MMIO_PIO11_BASE5,
113 };
114
115 if (dev->dev->id.revision >= 11) {
116 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
117 return bases_rev11[index];
118 }
119 B43_WARN_ON(index >= ARRAY_SIZE(bases));
120 return bases[index];
121}
122
123static u16 pio_txqueue_offset(struct b43_wldev *dev)
124{
125 if (dev->dev->id.revision >= 11)
126 return 0x18;
127 return 0;
128}
129
130static u16 pio_rxqueue_offset(struct b43_wldev *dev)
131{
132 if (dev->dev->id.revision >= 11)
133 return 0x38;
134 return 8;
135}
136
137static struct b43_pio_txqueue * b43_setup_pioqueue_tx(struct b43_wldev *dev,
138 unsigned int index)
139{
140 struct b43_pio_txqueue *q;
141 struct b43_pio_txpacket *p;
142 unsigned int i;
143
144 q = kzalloc(sizeof(*q), GFP_KERNEL);
145 if (!q)
146 return NULL;
147 spin_lock_init(&q->lock);
148 q->dev = dev;
149 q->rev = dev->dev->id.revision;
150 q->mmio_base = index_to_pioqueue_base(dev, index) +
151 pio_txqueue_offset(dev);
152 q->index = index;
153
154 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
155 if (q->rev >= 8) {
156 q->buffer_size = 1920; //FIXME this constant is wrong.
157 } else {
158 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
159 q->buffer_size -= 80;
160 }
161
162 INIT_LIST_HEAD(&q->packets_list);
163 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
164 p = &(q->packets[i]);
165 INIT_LIST_HEAD(&p->list);
166 p->index = i;
167 p->queue = q;
168 list_add(&p->list, &q->packets_list);
169 }
170
171 return q;
172}
173
174static struct b43_pio_rxqueue * b43_setup_pioqueue_rx(struct b43_wldev *dev,
175 unsigned int index)
176{
177 struct b43_pio_rxqueue *q;
178
179 q = kzalloc(sizeof(*q), GFP_KERNEL);
180 if (!q)
181 return NULL;
182 spin_lock_init(&q->lock);
183 q->dev = dev;
184 q->rev = dev->dev->id.revision;
185 q->mmio_base = index_to_pioqueue_base(dev, index) +
186 pio_rxqueue_offset(dev);
187 INIT_WORK(&q->rx_work, b43_pio_rx_work);
188
189 /* Enable Direct FIFO RX (PIO) on the engine. */
190 b43_dma_direct_fifo_rx(dev, index, 1);
191
192 return q;
193}
194
195static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
196{
197 struct b43_pio_txpacket *pack;
198 unsigned int i;
199
200 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
201 pack = &(q->packets[i]);
202 if (pack->skb) {
203 dev_kfree_skb_any(pack->skb);
204 pack->skb = NULL;
205 }
206 }
207}
208
209static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
210 const char *name)
211{
212 if (!q)
213 return;
214 b43_pio_cancel_tx_packets(q);
215 kfree(q);
216}
217
218static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
219 const char *name)
220{
221 if (!q)
222 return;
223 kfree(q);
224}
225
226#define destroy_queue_tx(pio, queue) do { \
227 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
228 (pio)->queue = NULL; \
229 } while (0)
230
231#define destroy_queue_rx(pio, queue) do { \
232 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
233 (pio)->queue = NULL; \
234 } while (0)
235
236void b43_pio_free(struct b43_wldev *dev)
237{
238 struct b43_pio *pio;
239
240 if (!b43_using_pio_transfers(dev))
241 return;
242 pio = &dev->pio;
243
244 destroy_queue_rx(pio, rx_queue);
245 destroy_queue_tx(pio, tx_queue_mcast);
246 destroy_queue_tx(pio, tx_queue_AC_VO);
247 destroy_queue_tx(pio, tx_queue_AC_VI);
248 destroy_queue_tx(pio, tx_queue_AC_BE);
249 destroy_queue_tx(pio, tx_queue_AC_BK);
250}
251
252void b43_pio_stop(struct b43_wldev *dev)
253{
254 if (!b43_using_pio_transfers(dev))
255 return;
256 cancel_work_sync(&dev->pio.rx_queue->rx_work);
257}
258
259int b43_pio_init(struct b43_wldev *dev)
260{
261 struct b43_pio *pio = &dev->pio;
262 int err = -ENOMEM;
263
264 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
265 & ~B43_MACCTL_BE);
266 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
267
268 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
269 if (!pio->tx_queue_AC_BK)
270 goto out;
271
272 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
273 if (!pio->tx_queue_AC_BE)
274 goto err_destroy_bk;
275
276 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
277 if (!pio->tx_queue_AC_VI)
278 goto err_destroy_be;
279
280 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
281 if (!pio->tx_queue_AC_VO)
282 goto err_destroy_vi;
283
284 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
285 if (!pio->tx_queue_mcast)
286 goto err_destroy_vo;
287
288 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
289 if (!pio->rx_queue)
290 goto err_destroy_mcast;
291
292 b43dbg(dev->wl, "PIO initialized\n");
293 err = 0;
294out:
295 return err;
296
297err_destroy_mcast:
298 destroy_queue_tx(pio, tx_queue_mcast);
299err_destroy_vo:
300 destroy_queue_tx(pio, tx_queue_AC_VO);
301err_destroy_vi:
302 destroy_queue_tx(pio, tx_queue_AC_VI);
303err_destroy_be:
304 destroy_queue_tx(pio, tx_queue_AC_BE);
305err_destroy_bk:
306 destroy_queue_tx(pio, tx_queue_AC_BK);
307 return err;
308}
309
310/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
311static struct b43_pio_txqueue * select_queue_by_priority(struct b43_wldev *dev,
312 u8 queue_prio)
313{
314 struct b43_pio_txqueue *q;
315
316 if (b43_modparam_qos) {
317 /* 0 = highest priority */
318 switch (queue_prio) {
319 default:
320 B43_WARN_ON(1);
321 /* fallthrough */
322 case 0:
323 q = dev->pio.tx_queue_AC_VO;
324 break;
325 case 1:
326 q = dev->pio.tx_queue_AC_VI;
327 break;
328 case 2:
329 q = dev->pio.tx_queue_AC_BE;
330 break;
331 case 3:
332 q = dev->pio.tx_queue_AC_BK;
333 break;
334 }
335 } else
336 q = dev->pio.tx_queue_AC_BE;
337
338 return q;
339}
340
341static inline void tx_write_2byte_queue(struct b43_pio_txqueue *q,
342 u16 *ctl,
343 const void *_data,
344 unsigned int data_len)
345{
346 const u8 *data = _data;
347 unsigned int i;
348 u16 value;
349
350 *ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
351 b43_piotx_write16(q, B43_PIO_TXCTL, *ctl);
352 for (i = 0; i < data_len; i += 2) {
353 value = data[i];
354 if (i + 1 < data_len) {
355 value |= (u16)(data[i + 1]) << 8;
356 } else {
357 *ctl &= ~B43_PIO_TXCTL_WRITEHI;
358 b43_piotx_write16(q, B43_PIO_TXCTL, *ctl);
359 }
360 b43_piotx_write16(q, B43_PIO_TXDATA, value);
361 }
362}
363
364static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
365 const u8 *hdr, unsigned int hdrlen)
366{
367 struct b43_pio_txqueue *q = pack->queue;
368 const char *frame = pack->skb->data;
369 unsigned int frame_len = pack->skb->len;
370 u16 ctl;
371
372 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
373 ctl |= B43_PIO_TXCTL_FREADY;
374 ctl &= ~B43_PIO_TXCTL_EOF;
375
376 /* Transfer the header data. */
377 tx_write_2byte_queue(q, &ctl, hdr, hdrlen);
378 /* Transfer the frame data. */
379 tx_write_2byte_queue(q, &ctl, frame, frame_len);
380
381 ctl |= B43_PIO_TXCTL_EOF;
382 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
383}
384
385static inline void tx_write_4byte_queue(struct b43_pio_txqueue *q,
386 u32 *ctl,
387 const void *_data,
388 unsigned int data_len)
389{
390 const u8 *data = _data;
391 unsigned int i;
392 u32 value;
393 bool ctl_changed = 0;
394
395 *ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
396 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
397 b43_piotx_write32(q, B43_PIO8_TXCTL, *ctl);
398 for (i = 0; i < data_len; i += 4) {
399 value = data[i];
400 if (i + 1 < data_len) {
401 value |= (u32)(data[i + 1]) << 8;
402 } else {
403 *ctl &= ~B43_PIO8_TXCTL_8_15;
404 ctl_changed = 1;
405 }
406 if (i + 2 < data_len) {
407 value |= (u32)(data[i + 2]) << 16;
408 } else {
409 *ctl &= ~B43_PIO8_TXCTL_16_23;
410 ctl_changed = 1;
411 }
412 if (i + 3 < data_len) {
413 value |= (u32)(data[i + 3]) << 24;
414 } else {
415 *ctl &= ~B43_PIO8_TXCTL_24_31;
416 ctl_changed = 1;
417 }
418 if (ctl_changed)
419 b43_piotx_write32(q, B43_PIO8_TXCTL, *ctl);
420 b43_piotx_write32(q, B43_PIO8_TXDATA, value);
421 }
422}
423
424static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
425 const u8 *hdr, unsigned int hdrlen)
426{
427 struct b43_pio_txqueue *q = pack->queue;
428 const char *frame = pack->skb->data;
429 unsigned int frame_len = pack->skb->len;
430 u32 ctl;
431
432 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
433 ctl |= B43_PIO8_TXCTL_FREADY;
434 ctl &= ~B43_PIO8_TXCTL_EOF;
435
436 /* Transfer the header data. */
437 tx_write_4byte_queue(q, &ctl, hdr, hdrlen);
438 /* Transfer the frame data. */
439 tx_write_4byte_queue(q, &ctl, frame, frame_len);
440
441 ctl |= B43_PIO8_TXCTL_EOF;
442 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
443}
444
445static int pio_tx_frame(struct b43_pio_txqueue *q,
446 struct sk_buff *skb,
447 struct ieee80211_tx_control *ctl)
448{
449 struct b43_pio_txpacket *pack;
450 struct b43_txhdr txhdr;
451 u16 cookie;
452 int err;
453 unsigned int hdrlen;
454
455 B43_WARN_ON(list_empty(&q->packets_list));
456 pack = list_entry(q->packets_list.next,
457 struct b43_pio_txpacket, list);
458 memset(&pack->txstat, 0, sizeof(pack->txstat));
459 memcpy(&pack->txstat.control, ctl, sizeof(*ctl));
460
461 cookie = generate_cookie(q, pack);
462 hdrlen = b43_txhdr_size(q->dev);
463 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data,
464 skb->len, ctl, cookie);
465 if (err)
466 return err;
467
468 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
469 /* Tell the firmware about the cookie of the last
470 * mcast frame, so it can clear the more-data bit in it. */
471 b43_shm_write16(q->dev, B43_SHM_SHARED,
472 B43_SHM_SH_MCASTCOOKIE, cookie);
473 }
474
475 pack->skb = skb;
476 if (q->rev >= 8)
477 pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen);
478 else
479 pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen);
480
481 /* Remove it from the list of available packet slots.
482 * It will be put back when we receive the status report. */
483 list_del(&pack->list);
484
485 /* Update the queue statistics. */
486 q->buffer_used += roundup(skb->len + hdrlen, 4);
487 q->free_packet_slots -= 1;
488
489 return 0;
490}
491
492int b43_pio_tx(struct b43_wldev *dev,
493 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
494{
495 struct b43_pio_txqueue *q;
496 struct ieee80211_hdr *hdr;
497 unsigned long flags;
498 unsigned int hdrlen, total_len;
499 int err = 0;
500
501 hdr = (struct ieee80211_hdr *)skb->data;
502 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
503 /* The multicast queue will be sent after the DTIM. */
504 q = dev->pio.tx_queue_mcast;
505 /* Set the frame More-Data bit. Ucode will clear it
506 * for us on the last frame. */
507 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
508 } else {
509 /* Decide by priority where to put this frame. */
510 q = select_queue_by_priority(dev, ctl->queue);
511 }
512
513 spin_lock_irqsave(&q->lock, flags);
514
515 hdrlen = b43_txhdr_size(dev);
516 total_len = roundup(skb->len + hdrlen, 4);
517
518 if (unlikely(total_len > q->buffer_size)) {
519 err = -ENOBUFS;
520 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
521 goto out_unlock;
522 }
523 if (unlikely(q->free_packet_slots == 0)) {
524 err = -ENOBUFS;
525 b43warn(dev->wl, "PIO: TX packet overflow.\n");
526 goto out_unlock;
527 }
528 B43_WARN_ON(q->buffer_used > q->buffer_size);
529
530 if (total_len > (q->buffer_size - q->buffer_used)) {
531 /* Not enough memory on the queue. */
532 err = -EBUSY;
533 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
534 q->stopped = 1;
535 goto out_unlock;
536 }
537
538 /* Assign the queue number to the ring (if not already done before)
539 * so TX status handling can use it. The mac80211-queue to b43-queue
540 * mapping is static, so we don't need to store it per frame. */
541 q->queue_prio = ctl->queue;
542
543 err = pio_tx_frame(q, skb, ctl);
544 if (unlikely(err == -ENOKEY)) {
545 /* Drop this packet, as we don't have the encryption key
546 * anymore and must not transmit it unencrypted. */
547 dev_kfree_skb_any(skb);
548 err = 0;
549 goto out_unlock;
550 }
551 if (unlikely(err)) {
552 b43err(dev->wl, "PIO transmission failure\n");
553 goto out_unlock;
554 }
555 q->nr_tx_packets++;
556
557 B43_WARN_ON(q->buffer_used > q->buffer_size);
558 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
559 (q->free_packet_slots == 0)) {
560 /* The queue is full. */
561 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
562 q->stopped = 1;
563 }
564
565out_unlock:
566 spin_unlock_irqrestore(&q->lock, flags);
567
568 return err;
569}
570
571/* Called with IRQs disabled. */
572void b43_pio_handle_txstatus(struct b43_wldev *dev,
573 const struct b43_txstatus *status)
574{
575 struct b43_pio_txqueue *q;
576 struct b43_pio_txpacket *pack = NULL;
577 unsigned int total_len;
578
579 q = parse_cookie(dev, status->cookie, &pack);
580 if (unlikely(!q))
581 return;
582 B43_WARN_ON(!pack);
583
584 spin_lock(&q->lock); /* IRQs are already disabled. */
585
586 b43_fill_txstatus_report(&(pack->txstat), status);
587
588 total_len = pack->skb->len + b43_txhdr_size(dev);
589 total_len = roundup(total_len, 4);
590 q->buffer_used -= total_len;
591 q->free_packet_slots += 1;
592
593 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb,
594 &(pack->txstat));
595 pack->skb = NULL;
596 list_add(&pack->list, &q->packets_list);
597
598 if (q->stopped) {
599 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
600 q->stopped = 0;
601 }
602
603 spin_unlock(&q->lock);
604}
605
606void b43_pio_get_tx_stats(struct b43_wldev *dev,
607 struct ieee80211_tx_queue_stats *stats)
608{
609 const int nr_queues = dev->wl->hw->queues;
610 struct b43_pio_txqueue *q;
611 struct ieee80211_tx_queue_stats_data *data;
612 unsigned long flags;
613 int i;
614
615 for (i = 0; i < nr_queues; i++) {
616 data = &(stats->data[i]);
617 q = select_queue_by_priority(dev, i);
618
619 spin_lock_irqsave(&q->lock, flags);
620 data->len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
621 data->limit = B43_PIO_MAX_NR_TXPACKETS;
622 data->count = q->nr_tx_packets;
623 spin_unlock_irqrestore(&q->lock, flags);
624 }
625}
626
627/* Returns whether we should fetch another frame. */
628static bool pio_rx_frame(struct b43_pio_rxqueue *q)
629{
630 struct b43_rxhdr_fw4 rxhdr;
631 u16 len;
632 u32 macstat;
633 unsigned int i, padding;
634 struct sk_buff *skb;
635 const char *err_msg = NULL;
636
637 memset(&rxhdr, 0, sizeof(rxhdr));
638
639 /* Check if we have data and wait for it to get ready. */
640 if (q->rev >= 8) {
641 u32 ctl;
642
643 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
644 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
645 return 0;
646 b43_piorx_write32(q, B43_PIO8_RXCTL,
647 B43_PIO8_RXCTL_FRAMERDY);
648 for (i = 0; i < 10; i++) {
649 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
650 if (ctl & B43_PIO8_RXCTL_DATARDY)
651 goto data_ready;
652 udelay(10);
653 }
654 } else {
655 u16 ctl;
656
657 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
658 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
659 return 0;
660 b43_piorx_write16(q, B43_PIO_RXCTL,
661 B43_PIO_RXCTL_FRAMERDY);
662 for (i = 0; i < 10; i++) {
663 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
664 if (ctl & B43_PIO_RXCTL_DATARDY)
665 goto data_ready;
666 udelay(10);
667 }
668 }
669 b43dbg(q->dev->wl, "PIO RX timed out\n");
670 return 1;
671data_ready:
672
673 /* Get the preamble (RX header) */
674 if (q->rev >= 8) {
675 u32 *preamble = (u32 *)&rxhdr;
676 u32 value;
677
678 for (i = 0; i < sizeof(rxhdr); i += 4) {
679 value = b43_piorx_read32(q, B43_PIO8_RXDATA);
680 preamble[i / 4] = cpu_to_le32(value);
681 }
682 } else {
683 u16 *preamble = (u16 *)&rxhdr;
684 u16 value;
685
686 for (i = 0; i < sizeof(rxhdr); i += 2) {
687 value = b43_piorx_read16(q, B43_PIO_RXDATA);
688 preamble[i / 2] = cpu_to_le16(value);
689 }
690 }
691 /* Sanity checks. */
692 len = le16_to_cpu(rxhdr.frame_len);
693 if (unlikely(len > 0x700)) {
694 err_msg = "len > 0x700";
695 goto rx_error;
696 }
697 if (unlikely(len == 0)) {
698 err_msg = "len == 0";
699 goto rx_error;
700 }
701
702 macstat = le32_to_cpu(rxhdr.mac_status);
703 if (macstat & B43_RX_MAC_FCSERR) {
704 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
705 /* Drop frames with failed FCS. */
706 err_msg = "Frame FCS error";
707 goto rx_error;
708 }
709 }
710
711 /* We always pad 2 bytes, as that's what upstream code expects
712 * due to the RX-header being 30 bytes. In case the frame is
713 * unaligned, we pad another 2 bytes. */
714 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
715 skb = dev_alloc_skb(len + padding + 2);
716 if (unlikely(!skb)) {
717 err_msg = "Out of memory";
718 goto rx_error;
719 }
720 skb_reserve(skb, 2);
721 skb_put(skb, len + padding);
722 if (q->rev >= 8) {
723 u32 value;
724
725 for (i = padding; i < len + padding; i += 4) {
726 value = b43_piorx_read32(q, B43_PIO8_RXDATA);
727 skb->data[i] = value;
728 if ((i + 1) < (len + padding))
729 skb->data[i + 1] = value >> 8;
730 if ((i + 2) < (len + padding))
731 skb->data[i + 2] = value >> 16;
732 if ((i + 3) < (len + padding))
733 skb->data[i + 3] = value >> 24;
734 }
735 } else {
736 u16 value;
737
738 for (i = padding; i < len + padding; i += 2) {
739 value = b43_piorx_read16(q, B43_PIO_RXDATA);
740 skb->data[i] = value;
741 if ((i + 1) < (len + padding))
742 skb->data[i + 1] = value >> 8;
743 }
744 }
745
746 b43_rx(q->dev, skb, &rxhdr);
747
748 return 1;
749
750rx_error:
751 if (err_msg)
752 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
753 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
754 return 1;
755}
756
757/* RX workqueue. We can sleep, yay! */
758static void b43_pio_rx_work(struct work_struct *work)
759{
760 struct b43_pio_rxqueue *q = container_of(work, struct b43_pio_rxqueue,
761 rx_work);
762 unsigned int budget = 50;
763 bool stop;
764
765 do {
766 spin_lock_irq(&q->lock);
767 stop = (pio_rx_frame(q) == 0);
768 spin_unlock_irq(&q->lock);
769 cond_resched();
770 if (stop)
771 break;
772 } while (--budget);
773}
774
775/* Called with IRQs disabled. */
776void b43_pio_rx(struct b43_pio_rxqueue *q)
777{
778 /* Due to latency issues we must run the RX path in
779 * a workqueue to be able to schedule between packets. */
780 queue_work(q->dev->wl->hw->workqueue, &q->rx_work);
781}
782
783static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
784{
785 unsigned long flags;
786
787 spin_lock_irqsave(&q->lock, flags);
788 if (q->rev >= 8) {
789 b43_piotx_write32(q, B43_PIO8_TXCTL,
790 b43_piotx_read32(q, B43_PIO8_TXCTL)
791 | B43_PIO8_TXCTL_SUSPREQ);
792 } else {
793 b43_piotx_write16(q, B43_PIO_TXCTL,
794 b43_piotx_read16(q, B43_PIO_TXCTL)
795 | B43_PIO_TXCTL_SUSPREQ);
796 }
797 spin_unlock_irqrestore(&q->lock, flags);
798}
799
800static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
801{
802 unsigned long flags;
803
804 spin_lock_irqsave(&q->lock, flags);
805 if (q->rev >= 8) {
806 b43_piotx_write32(q, B43_PIO8_TXCTL,
807 b43_piotx_read32(q, B43_PIO8_TXCTL)
808 & ~B43_PIO8_TXCTL_SUSPREQ);
809 } else {
810 b43_piotx_write16(q, B43_PIO_TXCTL,
811 b43_piotx_read16(q, B43_PIO_TXCTL)
812 & ~B43_PIO_TXCTL_SUSPREQ);
813 }
814 spin_unlock_irqrestore(&q->lock, flags);
815}
816
817void b43_pio_tx_suspend(struct b43_wldev *dev)
818{
819 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
820 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
821 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
822 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
823 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
824 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
825}
826
827void b43_pio_tx_resume(struct b43_wldev *dev)
828{
829 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
830 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
831 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
832 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
833 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
834 b43_power_saving_ctl_bits(dev, 0);
835}