aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
authorMichael Buesch <mb@bu3sch.de>2008-03-29 16:01:16 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-04-08 15:05:56 -0400
commit5100d5ac81b9330dc57e35adbe50923ba6107b8f (patch)
tree48224236b50703606c97c05ec077fde4880fc3b9 /drivers/net/wireless/b43/dma.c
parent3109ece1114293b8201d9c140d02d7ce9a9fa387 (diff)
b43: Add PIO support for PCMCIA devices
This adds PIO support back (D'oh!) for PCMCIA devices. This is a complete rewrite of the old PIO code. It does actually work and we get reasonable performance out of it on a modern machine. On a PowerBook G4 I get a few MBit for TX and a few more for RX. So it doesn't work as well as DMA (of course), but it's a _lot_ faster than the old PIO code (only got a few kBit with that). The limiting factor is the host CPU speed. So it will generate 100% CPU usage when the network interface is heavily loaded. A voluntary preemption point in the RX path makes sure Desktop Latency isn't hurt. PIO is needed for 16bit PCMCIA devices, as we really don't want to poke with the braindead DMA mechanisms on PCMCIA sockets. Additionally, not all PCMCIA sockets do actually support DMA in 16bit mode (mine doesn't). Signed-off-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c120
1 files changed, 64 insertions, 56 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 70db0570adc5..f1b983cb9c1f 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -550,7 +550,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
550 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 550 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
551{ 551{
552 struct b43_rxhdr_fw4 *rxhdr; 552 struct b43_rxhdr_fw4 *rxhdr;
553 struct b43_hwtxstatus *txstat;
554 dma_addr_t dmaaddr; 553 dma_addr_t dmaaddr;
555 struct sk_buff *skb; 554 struct sk_buff *skb;
556 555
@@ -586,8 +585,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
586 585
587 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); 586 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
588 rxhdr->frame_len = 0; 587 rxhdr->frame_len = 0;
589 txstat = (struct b43_hwtxstatus *)(skb->data);
590 txstat->cookie = 0;
591 588
592 return 0; 589 return 0;
593} 590}
@@ -776,6 +773,18 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
776 return DMA_30BIT_MASK; 773 return DMA_30BIT_MASK;
777} 774}
778 775
776static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
777{
778 if (dmamask == DMA_30BIT_MASK)
779 return B43_DMA_30BIT;
780 if (dmamask == DMA_32BIT_MASK)
781 return B43_DMA_32BIT;
782 if (dmamask == DMA_64BIT_MASK)
783 return B43_DMA_64BIT;
784 B43_WARN_ON(1);
785 return B43_DMA_30BIT;
786}
787
779/* Main initialization function. */ 788/* Main initialization function. */
780static 789static
781struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, 790struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
@@ -956,7 +965,11 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring,
956 965
957void b43_dma_free(struct b43_wldev *dev) 966void b43_dma_free(struct b43_wldev *dev)
958{ 967{
959 struct b43_dma *dma = &dev->dma; 968 struct b43_dma *dma;
969
970 if (b43_using_pio_transfers(dev))
971 return;
972 dma = &dev->dma;
960 973
961 destroy_ring(dma, rx_ring); 974 destroy_ring(dma, rx_ring);
962 destroy_ring(dma, tx_ring_AC_BK); 975 destroy_ring(dma, tx_ring_AC_BK);
@@ -974,19 +987,7 @@ int b43_dma_init(struct b43_wldev *dev)
974 enum b43_dmatype type; 987 enum b43_dmatype type;
975 988
976 dmamask = supported_dma_mask(dev); 989 dmamask = supported_dma_mask(dev);
977 switch (dmamask) { 990 type = dma_mask_to_engine_type(dmamask);
978 default:
979 B43_WARN_ON(1);
980 case DMA_30BIT_MASK:
981 type = B43_DMA_30BIT;
982 break;
983 case DMA_32BIT_MASK:
984 type = B43_DMA_32BIT;
985 break;
986 case DMA_64BIT_MASK:
987 type = B43_DMA_64BIT;
988 break;
989 }
990 err = ssb_dma_set_mask(dev->dev, dmamask); 991 err = ssb_dma_set_mask(dev->dev, dmamask);
991 if (err) { 992 if (err) {
992 b43err(dev->wl, "The machine/kernel does not support " 993 b43err(dev->wl, "The machine/kernel does not support "
@@ -1113,7 +1114,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1113 size_t hdrsize = b43_txhdr_size(ring->dev); 1114 size_t hdrsize = b43_txhdr_size(ring->dev);
1114 1115
1115#define SLOTS_PER_PACKET 2 1116#define SLOTS_PER_PACKET 2
1116 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1117 1117
1118 old_top_slot = ring->current_slot; 1118 old_top_slot = ring->current_slot;
1119 old_used_slots = ring->used_slots; 1119 old_used_slots = ring->used_slots;
@@ -1257,11 +1257,6 @@ int b43_dma_tx(struct b43_wldev *dev,
1257 int err = 0; 1257 int err = 0;
1258 unsigned long flags; 1258 unsigned long flags;
1259 1259
1260 if (unlikely(skb->len < 2 + 2 + 6)) {
1261 /* Too short, this can't be a valid frame. */
1262 return -EINVAL;
1263 }
1264
1265 hdr = (struct ieee80211_hdr *)skb->data; 1260 hdr = (struct ieee80211_hdr *)skb->data;
1266 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1261 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1267 /* The multicast ring will be sent after the DTIM */ 1262 /* The multicast ring will be sent after the DTIM */
@@ -1319,38 +1314,6 @@ out_unlock:
1319 return err; 1314 return err;
1320} 1315}
1321 1316
1322static void b43_fill_txstatus_report(struct b43_dmaring *ring,
1323 struct ieee80211_tx_status *report,
1324 const struct b43_txstatus *status)
1325{
1326 bool frame_failed = 0;
1327
1328 if (status->acked) {
1329 /* The frame was ACKed. */
1330 report->flags |= IEEE80211_TX_STATUS_ACK;
1331 } else {
1332 /* The frame was not ACKed... */
1333 if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) {
1334 /* ...but we expected an ACK. */
1335 frame_failed = 1;
1336 report->excessive_retries = 1;
1337 }
1338 }
1339 if (status->frame_count == 0) {
1340 /* The frame was not transmitted at all. */
1341 report->retry_count = 0;
1342 } else {
1343 report->retry_count = status->frame_count - 1;
1344#ifdef CONFIG_B43_DEBUG
1345 if (frame_failed)
1346 ring->nr_failed_tx_packets++;
1347 else
1348 ring->nr_succeed_tx_packets++;
1349 ring->nr_total_packet_tries += status->frame_count;
1350#endif /* DEBUG */
1351 }
1352}
1353
1354/* Called with IRQs disabled. */ 1317/* Called with IRQs disabled. */
1355void b43_dma_handle_txstatus(struct b43_wldev *dev, 1318void b43_dma_handle_txstatus(struct b43_wldev *dev,
1356 const struct b43_txstatus *status) 1319 const struct b43_txstatus *status)
@@ -1360,6 +1323,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1360 struct b43_dmadesc_generic *desc; 1323 struct b43_dmadesc_generic *desc;
1361 struct b43_dmadesc_meta *meta; 1324 struct b43_dmadesc_meta *meta;
1362 int slot; 1325 int slot;
1326 bool frame_succeed;
1363 1327
1364 ring = parse_cookie(dev, status->cookie, &slot); 1328 ring = parse_cookie(dev, status->cookie, &slot);
1365 if (unlikely(!ring)) 1329 if (unlikely(!ring))
@@ -1386,7 +1350,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1386 * status of the transmission. 1350 * status of the transmission.
1387 * Some fields of txstat are already filled in dma_tx(). 1351 * Some fields of txstat are already filled in dma_tx().
1388 */ 1352 */
1389 b43_fill_txstatus_report(ring, &(meta->txstat), status); 1353 frame_succeed = b43_fill_txstatus_report(
1354 &(meta->txstat), status);
1355#ifdef CONFIG_B43_DEBUG
1356 if (frame_succeed)
1357 ring->nr_succeed_tx_packets++;
1358 else
1359 ring->nr_failed_tx_packets++;
1360 ring->nr_total_packet_tries += status->frame_count;
1361#endif /* DEBUG */
1390 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1362 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1391 &(meta->txstat)); 1363 &(meta->txstat));
1392 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1364 /* skb is freed by ieee80211_tx_status_irqsafe() */
@@ -1573,3 +1545,39 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
1573 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); 1545 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1574 b43_power_saving_ctl_bits(dev, 0); 1546 b43_power_saving_ctl_bits(dev, 0);
1575} 1547}
1548
1549#ifdef CONFIG_B43_PIO
1550static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1551 u16 mmio_base, bool enable)
1552{
1553 u32 ctl;
1554
1555 if (type == B43_DMA_64BIT) {
1556 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1557 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1558 if (enable)
1559 ctl |= B43_DMA64_RXDIRECTFIFO;
1560 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1561 } else {
1562 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1563 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1564 if (enable)
1565 ctl |= B43_DMA32_RXDIRECTFIFO;
1566 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1567 }
1568}
1569
1570/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1571 * This is called from PIO code, so DMA structures are not available. */
1572void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1573 unsigned int engine_index, bool enable)
1574{
1575 enum b43_dmatype type;
1576 u16 mmio_base;
1577
1578 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1579
1580 mmio_base = b43_dmacontroller_base(type, engine_index);
1581 direct_fifo_rx(dev, type, mmio_base, enable);
1582}
1583#endif /* CONFIG_B43_PIO */