diff options
author | Sujith <Sujith.Manoharan@atheros.com> | 2008-12-07 11:14:03 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-12-12 14:02:17 -0500 |
commit | b77f483fcf0579de28873828897f53371a33a0ea (patch) | |
tree | a08d0f942d4b5d0cd8a7893753f9b7554ebc89e4 /drivers/net/wireless/ath9k/recv.c | |
parent | 59651e89187293e88863891b821c7379391ef75c (diff) |
ath9k: Refactor struct ath_softc
Split ath_softc into smaller structures for rx, tx and beacon
handling.
Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/recv.c')
-rw-r--r-- | drivers/net/wireless/ath9k/recv.c | 107 |
1 files changed, 53 insertions, 54 deletions
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index b182ef570f88..cb449f0b4171 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c | |||
@@ -41,20 +41,19 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |||
41 | ASSERT(skb != NULL); | 41 | ASSERT(skb != NULL); |
42 | ds->ds_vdata = skb->data; | 42 | ds->ds_vdata = skb->data; |
43 | 43 | ||
44 | /* setup rx descriptors. The sc_rxbufsize here tells the harware | 44 | /* setup rx descriptors. The rx.bufsize here tells the harware |
45 | * how much data it can DMA to us and that we are prepared | 45 | * how much data it can DMA to us and that we are prepared |
46 | * to process */ | 46 | * to process */ |
47 | ath9k_hw_setuprxdesc(ah, | 47 | ath9k_hw_setuprxdesc(ah, ds, |
48 | ds, | 48 | sc->rx.bufsize, |
49 | sc->sc_rxbufsize, | ||
50 | 0); | 49 | 0); |
51 | 50 | ||
52 | if (sc->sc_rxlink == NULL) | 51 | if (sc->rx.rxlink == NULL) |
53 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 52 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
54 | else | 53 | else |
55 | *sc->sc_rxlink = bf->bf_daddr; | 54 | *sc->rx.rxlink = bf->bf_daddr; |
56 | 55 | ||
57 | sc->sc_rxlink = &ds->ds_link; | 56 | sc->rx.rxlink = &ds->ds_link; |
58 | ath9k_hw_rxena(ah); | 57 | ath9k_hw_rxena(ah); |
59 | } | 58 | } |
60 | 59 | ||
@@ -62,8 +61,8 @@ static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) | |||
62 | { | 61 | { |
63 | /* XXX block beacon interrupts */ | 62 | /* XXX block beacon interrupts */ |
64 | ath9k_hw_setantenna(sc->sc_ah, antenna); | 63 | ath9k_hw_setantenna(sc->sc_ah, antenna); |
65 | sc->sc_defant = antenna; | 64 | sc->rx.defant = antenna; |
66 | sc->sc_rxotherant = 0; | 65 | sc->rx.rxotherant = 0; |
67 | } | 66 | } |
68 | 67 | ||
69 | /* | 68 | /* |
@@ -272,20 +271,20 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
272 | int error = 0; | 271 | int error = 0; |
273 | 272 | ||
274 | do { | 273 | do { |
275 | spin_lock_init(&sc->sc_rxflushlock); | 274 | spin_lock_init(&sc->rx.rxflushlock); |
276 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 275 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
277 | spin_lock_init(&sc->sc_rxbuflock); | 276 | spin_lock_init(&sc->rx.rxbuflock); |
278 | 277 | ||
279 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | 278 | sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, |
280 | min(sc->sc_cachelsz, | 279 | min(sc->sc_cachelsz, |
281 | (u16)64)); | 280 | (u16)64)); |
282 | 281 | ||
283 | DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | 282 | DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", |
284 | sc->sc_cachelsz, sc->sc_rxbufsize); | 283 | sc->sc_cachelsz, sc->rx.bufsize); |
285 | 284 | ||
286 | /* Initialize rx descriptors */ | 285 | /* Initialize rx descriptors */ |
287 | 286 | ||
288 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, | 287 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, |
289 | "rx", nbufs, 1); | 288 | "rx", nbufs, 1); |
290 | if (error != 0) { | 289 | if (error != 0) { |
291 | DPRINTF(sc, ATH_DBG_FATAL, | 290 | DPRINTF(sc, ATH_DBG_FATAL, |
@@ -293,8 +292,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
293 | break; | 292 | break; |
294 | } | 293 | } |
295 | 294 | ||
296 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | 295 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
297 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | 296 | skb = ath_rxbuf_alloc(sc, sc->rx.bufsize); |
298 | if (skb == NULL) { | 297 | if (skb == NULL) { |
299 | error = -ENOMEM; | 298 | error = -ENOMEM; |
300 | break; | 299 | break; |
@@ -302,8 +301,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
302 | 301 | ||
303 | bf->bf_mpdu = skb; | 302 | bf->bf_mpdu = skb; |
304 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, | 303 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, |
305 | sc->sc_rxbufsize, | 304 | sc->rx.bufsize, |
306 | PCI_DMA_FROMDEVICE); | 305 | PCI_DMA_FROMDEVICE); |
307 | if (unlikely(pci_dma_mapping_error(sc->pdev, | 306 | if (unlikely(pci_dma_mapping_error(sc->pdev, |
308 | bf->bf_buf_addr))) { | 307 | bf->bf_buf_addr))) { |
309 | dev_kfree_skb_any(skb); | 308 | dev_kfree_skb_any(skb); |
@@ -315,7 +314,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
315 | } | 314 | } |
316 | bf->bf_dmacontext = bf->bf_buf_addr; | 315 | bf->bf_dmacontext = bf->bf_buf_addr; |
317 | } | 316 | } |
318 | sc->sc_rxlink = NULL; | 317 | sc->rx.rxlink = NULL; |
319 | 318 | ||
320 | } while (0); | 319 | } while (0); |
321 | 320 | ||
@@ -330,14 +329,14 @@ void ath_rx_cleanup(struct ath_softc *sc) | |||
330 | struct sk_buff *skb; | 329 | struct sk_buff *skb; |
331 | struct ath_buf *bf; | 330 | struct ath_buf *bf; |
332 | 331 | ||
333 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | 332 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
334 | skb = bf->bf_mpdu; | 333 | skb = bf->bf_mpdu; |
335 | if (skb) | 334 | if (skb) |
336 | dev_kfree_skb(skb); | 335 | dev_kfree_skb(skb); |
337 | } | 336 | } |
338 | 337 | ||
339 | if (sc->sc_rxdma.dd_desc_len != 0) | 338 | if (sc->rx.rxdma.dd_desc_len != 0) |
340 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | 339 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); |
341 | } | 340 | } |
342 | 341 | ||
343 | /* | 342 | /* |
@@ -375,7 +374,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) | |||
375 | 374 | ||
376 | /* Can't set HOSTAP into promiscous mode */ | 375 | /* Can't set HOSTAP into promiscous mode */ |
377 | if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) && | 376 | if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) && |
378 | (sc->rx_filter & FIF_PROMISC_IN_BSS)) || | 377 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
379 | (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) { | 378 | (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) { |
380 | rfilt |= ATH9K_RX_FILTER_PROM; | 379 | rfilt |= ATH9K_RX_FILTER_PROM; |
381 | /* ??? To prevent from sending ACK */ | 380 | /* ??? To prevent from sending ACK */ |
@@ -401,25 +400,25 @@ int ath_startrecv(struct ath_softc *sc) | |||
401 | struct ath_hal *ah = sc->sc_ah; | 400 | struct ath_hal *ah = sc->sc_ah; |
402 | struct ath_buf *bf, *tbf; | 401 | struct ath_buf *bf, *tbf; |
403 | 402 | ||
404 | spin_lock_bh(&sc->sc_rxbuflock); | 403 | spin_lock_bh(&sc->rx.rxbuflock); |
405 | if (list_empty(&sc->sc_rxbuf)) | 404 | if (list_empty(&sc->rx.rxbuf)) |
406 | goto start_recv; | 405 | goto start_recv; |
407 | 406 | ||
408 | sc->sc_rxlink = NULL; | 407 | sc->rx.rxlink = NULL; |
409 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | 408 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { |
410 | ath_rx_buf_link(sc, bf); | 409 | ath_rx_buf_link(sc, bf); |
411 | } | 410 | } |
412 | 411 | ||
413 | /* We could have deleted elements so the list may be empty now */ | 412 | /* We could have deleted elements so the list may be empty now */ |
414 | if (list_empty(&sc->sc_rxbuf)) | 413 | if (list_empty(&sc->rx.rxbuf)) |
415 | goto start_recv; | 414 | goto start_recv; |
416 | 415 | ||
417 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 416 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
418 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 417 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
419 | ath9k_hw_rxena(ah); | 418 | ath9k_hw_rxena(ah); |
420 | 419 | ||
421 | start_recv: | 420 | start_recv: |
422 | spin_unlock_bh(&sc->sc_rxbuflock); | 421 | spin_unlock_bh(&sc->rx.rxbuflock); |
423 | ath_opmode_init(sc); | 422 | ath_opmode_init(sc); |
424 | ath9k_hw_startpcureceive(ah); | 423 | ath9k_hw_startpcureceive(ah); |
425 | 424 | ||
@@ -435,25 +434,25 @@ bool ath_stoprecv(struct ath_softc *sc) | |||
435 | ath9k_hw_setrxfilter(ah, 0); | 434 | ath9k_hw_setrxfilter(ah, 0); |
436 | stopped = ath9k_hw_stopdmarecv(ah); | 435 | stopped = ath9k_hw_stopdmarecv(ah); |
437 | mdelay(3); /* 3ms is long enough for 1 frame */ | 436 | mdelay(3); /* 3ms is long enough for 1 frame */ |
438 | sc->sc_rxlink = NULL; | 437 | sc->rx.rxlink = NULL; |
439 | 438 | ||
440 | return stopped; | 439 | return stopped; |
441 | } | 440 | } |
442 | 441 | ||
443 | void ath_flushrecv(struct ath_softc *sc) | 442 | void ath_flushrecv(struct ath_softc *sc) |
444 | { | 443 | { |
445 | spin_lock_bh(&sc->sc_rxflushlock); | 444 | spin_lock_bh(&sc->rx.rxflushlock); |
446 | sc->sc_flags |= SC_OP_RXFLUSH; | 445 | sc->sc_flags |= SC_OP_RXFLUSH; |
447 | ath_rx_tasklet(sc, 1); | 446 | ath_rx_tasklet(sc, 1); |
448 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 447 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
449 | spin_unlock_bh(&sc->sc_rxflushlock); | 448 | spin_unlock_bh(&sc->rx.rxflushlock); |
450 | } | 449 | } |
451 | 450 | ||
452 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | 451 | int ath_rx_tasklet(struct ath_softc *sc, int flush) |
453 | { | 452 | { |
454 | #define PA2DESC(_sc, _pa) \ | 453 | #define PA2DESC(_sc, _pa) \ |
455 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | 454 | ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ |
456 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | 455 | ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) |
457 | 456 | ||
458 | struct ath_buf *bf; | 457 | struct ath_buf *bf; |
459 | struct ath_desc *ds; | 458 | struct ath_desc *ds; |
@@ -465,19 +464,19 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
465 | bool decrypt_error = false; | 464 | bool decrypt_error = false; |
466 | u8 keyix; | 465 | u8 keyix; |
467 | 466 | ||
468 | spin_lock_bh(&sc->sc_rxbuflock); | 467 | spin_lock_bh(&sc->rx.rxbuflock); |
469 | 468 | ||
470 | do { | 469 | do { |
471 | /* If handling rx interrupt and flush is in progress => exit */ | 470 | /* If handling rx interrupt and flush is in progress => exit */ |
472 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) | 471 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
473 | break; | 472 | break; |
474 | 473 | ||
475 | if (list_empty(&sc->sc_rxbuf)) { | 474 | if (list_empty(&sc->rx.rxbuf)) { |
476 | sc->sc_rxlink = NULL; | 475 | sc->rx.rxlink = NULL; |
477 | break; | 476 | break; |
478 | } | 477 | } |
479 | 478 | ||
480 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 479 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
481 | ds = bf->bf_desc; | 480 | ds = bf->bf_desc; |
482 | 481 | ||
483 | /* | 482 | /* |
@@ -499,8 +498,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
499 | struct ath_buf *tbf; | 498 | struct ath_buf *tbf; |
500 | struct ath_desc *tds; | 499 | struct ath_desc *tds; |
501 | 500 | ||
502 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | 501 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { |
503 | sc->sc_rxlink = NULL; | 502 | sc->rx.rxlink = NULL; |
504 | break; | 503 | break; |
505 | } | 504 | } |
506 | 505 | ||
@@ -540,7 +539,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
540 | goto requeue; | 539 | goto requeue; |
541 | 540 | ||
542 | /* The status portion of the descriptor could get corrupted. */ | 541 | /* The status portion of the descriptor could get corrupted. */ |
543 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | 542 | if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen) |
544 | goto requeue; | 543 | goto requeue; |
545 | 544 | ||
546 | if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) | 545 | if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) |
@@ -548,21 +547,21 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
548 | 547 | ||
549 | /* Ensure we always have an skb to requeue once we are done | 548 | /* Ensure we always have an skb to requeue once we are done |
550 | * processing the current buffer's skb */ | 549 | * processing the current buffer's skb */ |
551 | requeue_skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | 550 | requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize); |
552 | 551 | ||
553 | /* If there is no memory we ignore the current RX'd frame, | 552 | /* If there is no memory we ignore the current RX'd frame, |
554 | * tell hardware it can give us a new frame using the old | 553 | * tell hardware it can give us a new frame using the old |
555 | * skb and put it at the tail of the sc->sc_rxbuf list for | 554 | * skb and put it at the tail of the sc->rx.rxbuf list for |
556 | * processing. */ | 555 | * processing. */ |
557 | if (!requeue_skb) | 556 | if (!requeue_skb) |
558 | goto requeue; | 557 | goto requeue; |
559 | 558 | ||
560 | pci_dma_sync_single_for_cpu(sc->pdev, | 559 | /* Sync and unmap the frame */ |
561 | bf->bf_buf_addr, | 560 | pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr, |
562 | sc->sc_rxbufsize, | 561 | sc->rx.bufsize, |
563 | PCI_DMA_FROMDEVICE); | 562 | PCI_DMA_FROMDEVICE); |
564 | pci_unmap_single(sc->pdev, bf->bf_buf_addr, | 563 | pci_unmap_single(sc->pdev, bf->bf_buf_addr, |
565 | sc->sc_rxbufsize, | 564 | sc->rx.bufsize, |
566 | PCI_DMA_FROMDEVICE); | 565 | PCI_DMA_FROMDEVICE); |
567 | 566 | ||
568 | skb_put(skb, ds->ds_rxstat.rs_datalen); | 567 | skb_put(skb, ds->ds_rxstat.rs_datalen); |
@@ -596,7 +595,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
596 | /* We will now give hardware our shiny new allocated skb */ | 595 | /* We will now give hardware our shiny new allocated skb */ |
597 | bf->bf_mpdu = requeue_skb; | 596 | bf->bf_mpdu = requeue_skb; |
598 | bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, | 597 | bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, |
599 | sc->sc_rxbufsize, | 598 | sc->rx.bufsize, |
600 | PCI_DMA_FROMDEVICE); | 599 | PCI_DMA_FROMDEVICE); |
601 | if (unlikely(pci_dma_mapping_error(sc->pdev, | 600 | if (unlikely(pci_dma_mapping_error(sc->pdev, |
602 | bf->bf_buf_addr))) { | 601 | bf->bf_buf_addr))) { |
@@ -612,18 +611,18 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
612 | * change the default rx antenna if rx diversity chooses the | 611 | * change the default rx antenna if rx diversity chooses the |
613 | * other antenna 3 times in a row. | 612 | * other antenna 3 times in a row. |
614 | */ | 613 | */ |
615 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | 614 | if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { |
616 | if (++sc->sc_rxotherant >= 3) | 615 | if (++sc->rx.rxotherant >= 3) |
617 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); | 616 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); |
618 | } else { | 617 | } else { |
619 | sc->sc_rxotherant = 0; | 618 | sc->rx.rxotherant = 0; |
620 | } | 619 | } |
621 | requeue: | 620 | requeue: |
622 | list_move_tail(&bf->list, &sc->sc_rxbuf); | 621 | list_move_tail(&bf->list, &sc->rx.rxbuf); |
623 | ath_rx_buf_link(sc, bf); | 622 | ath_rx_buf_link(sc, bf); |
624 | } while (1); | 623 | } while (1); |
625 | 624 | ||
626 | spin_unlock_bh(&sc->sc_rxbuflock); | 625 | spin_unlock_bh(&sc->rx.rxbuflock); |
627 | 626 | ||
628 | return 0; | 627 | return 0; |
629 | #undef PA2DESC | 628 | #undef PA2DESC |