diff options
author | Michael Buesch <mb@bu3sch.de> | 2009-09-04 16:55:00 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-09-08 16:31:07 -0400 |
commit | 637dae3f637eb7dab447e74362e0dfeded775c7c (patch) | |
tree | 2589ed0c29c0b8f360e100ee0e82ad79f2df3129 /drivers/net/wireless/b43/dma.c | |
parent | f5d40eedb32aa9a0e226d468e1f89fb676824694 (diff) |
b43: Remove DMA/PIO queue locks
This removes the DMA/PIO queue locks. Locking is handled by
wl->mutex now.
Signed-off-by: Michael Buesch <mb@bu3sch.de>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 30 |
1 files changed, 5 insertions, 25 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 25ced8bdec8f..a467ee260a19 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -856,7 +856,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
856 | } else | 856 | } else |
857 | B43_WARN_ON(1); | 857 | B43_WARN_ON(1); |
858 | } | 858 | } |
859 | spin_lock_init(&ring->lock); | ||
860 | #ifdef CONFIG_B43_DEBUG | 859 | #ifdef CONFIG_B43_DEBUG |
861 | ring->last_injected_overflow = jiffies; | 860 | ring->last_injected_overflow = jiffies; |
862 | #endif | 861 | #endif |
@@ -1315,7 +1314,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) | |||
1315 | struct b43_dmaring *ring; | 1314 | struct b43_dmaring *ring; |
1316 | struct ieee80211_hdr *hdr; | 1315 | struct ieee80211_hdr *hdr; |
1317 | int err = 0; | 1316 | int err = 0; |
1318 | unsigned long flags; | ||
1319 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1317 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1320 | 1318 | ||
1321 | hdr = (struct ieee80211_hdr *)skb->data; | 1319 | hdr = (struct ieee80211_hdr *)skb->data; |
@@ -1331,8 +1329,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) | |||
1331 | dev, skb_get_queue_mapping(skb)); | 1329 | dev, skb_get_queue_mapping(skb)); |
1332 | } | 1330 | } |
1333 | 1331 | ||
1334 | spin_lock_irqsave(&ring->lock, flags); | ||
1335 | |||
1336 | B43_WARN_ON(!ring->tx); | 1332 | B43_WARN_ON(!ring->tx); |
1337 | 1333 | ||
1338 | if (unlikely(ring->stopped)) { | 1334 | if (unlikely(ring->stopped)) { |
@@ -1343,7 +1339,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) | |||
1343 | if (b43_debug(dev, B43_DBG_DMAVERBOSE)) | 1339 | if (b43_debug(dev, B43_DBG_DMAVERBOSE)) |
1344 | b43err(dev->wl, "Packet after queue stopped\n"); | 1340 | b43err(dev->wl, "Packet after queue stopped\n"); |
1345 | err = -ENOSPC; | 1341 | err = -ENOSPC; |
1346 | goto out_unlock; | 1342 | goto out; |
1347 | } | 1343 | } |
1348 | 1344 | ||
1349 | if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { | 1345 | if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { |
@@ -1351,7 +1347,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) | |||
1351 | * full, but queues not stopped. */ | 1347 | * full, but queues not stopped. */ |
1352 | b43err(dev->wl, "DMA queue overflow\n"); | 1348 | b43err(dev->wl, "DMA queue overflow\n"); |
1353 | err = -ENOSPC; | 1349 | err = -ENOSPC; |
1354 | goto out_unlock; | 1350 | goto out; |
1355 | } | 1351 | } |
1356 | 1352 | ||
1357 | /* Assign the queue number to the ring (if not already done before) | 1353 | /* Assign the queue number to the ring (if not already done before) |
@@ -1365,11 +1361,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) | |||
1365 | * anymore and must not transmit it unencrypted. */ | 1361 | * anymore and must not transmit it unencrypted. */ |
1366 | dev_kfree_skb_any(skb); | 1362 | dev_kfree_skb_any(skb); |
1367 | err = 0; | 1363 | err = 0; |
1368 | goto out_unlock; | 1364 | goto out; |
1369 | } | 1365 | } |
1370 | if (unlikely(err)) { | 1366 | if (unlikely(err)) { |
1371 | b43err(dev->wl, "DMA tx mapping failure\n"); | 1367 | b43err(dev->wl, "DMA tx mapping failure\n"); |
1372 | goto out_unlock; | 1368 | goto out; |
1373 | } | 1369 | } |
1374 | ring->nr_tx_packets++; | 1370 | ring->nr_tx_packets++; |
1375 | if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || | 1371 | if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || |
@@ -1381,8 +1377,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) | |||
1381 | b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); | 1377 | b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); |
1382 | } | 1378 | } |
1383 | } | 1379 | } |
1384 | out_unlock: | 1380 | out: |
1385 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1386 | 1381 | ||
1387 | return err; | 1382 | return err; |
1388 | } | 1383 | } |
@@ -1401,8 +1396,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1401 | if (unlikely(!ring)) | 1396 | if (unlikely(!ring)) |
1402 | return; | 1397 | return; |
1403 | 1398 | ||
1404 | spin_lock_irq(&ring->lock); | ||
1405 | |||
1406 | B43_WARN_ON(!ring->tx); | 1399 | B43_WARN_ON(!ring->tx); |
1407 | ops = ring->ops; | 1400 | ops = ring->ops; |
1408 | while (1) { | 1401 | while (1) { |
@@ -1461,8 +1454,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1461 | b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); | 1454 | b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); |
1462 | } | 1455 | } |
1463 | } | 1456 | } |
1464 | |||
1465 | spin_unlock_irq(&ring->lock); | ||
1466 | } | 1457 | } |
1467 | 1458 | ||
1468 | void b43_dma_get_tx_stats(struct b43_wldev *dev, | 1459 | void b43_dma_get_tx_stats(struct b43_wldev *dev, |
@@ -1470,17 +1461,14 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev, | |||
1470 | { | 1461 | { |
1471 | const int nr_queues = dev->wl->hw->queues; | 1462 | const int nr_queues = dev->wl->hw->queues; |
1472 | struct b43_dmaring *ring; | 1463 | struct b43_dmaring *ring; |
1473 | unsigned long flags; | ||
1474 | int i; | 1464 | int i; |
1475 | 1465 | ||
1476 | for (i = 0; i < nr_queues; i++) { | 1466 | for (i = 0; i < nr_queues; i++) { |
1477 | ring = select_ring_by_priority(dev, i); | 1467 | ring = select_ring_by_priority(dev, i); |
1478 | 1468 | ||
1479 | spin_lock_irqsave(&ring->lock, flags); | ||
1480 | stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME; | 1469 | stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME; |
1481 | stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME; | 1470 | stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME; |
1482 | stats[i].count = ring->nr_tx_packets; | 1471 | stats[i].count = ring->nr_tx_packets; |
1483 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1484 | } | 1472 | } |
1485 | } | 1473 | } |
1486 | 1474 | ||
@@ -1591,22 +1579,14 @@ void b43_dma_rx(struct b43_dmaring *ring) | |||
1591 | 1579 | ||
1592 | static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) | 1580 | static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) |
1593 | { | 1581 | { |
1594 | unsigned long flags; | ||
1595 | |||
1596 | spin_lock_irqsave(&ring->lock, flags); | ||
1597 | B43_WARN_ON(!ring->tx); | 1582 | B43_WARN_ON(!ring->tx); |
1598 | ring->ops->tx_suspend(ring); | 1583 | ring->ops->tx_suspend(ring); |
1599 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1600 | } | 1584 | } |
1601 | 1585 | ||
1602 | static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) | 1586 | static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) |
1603 | { | 1587 | { |
1604 | unsigned long flags; | ||
1605 | |||
1606 | spin_lock_irqsave(&ring->lock, flags); | ||
1607 | B43_WARN_ON(!ring->tx); | 1588 | B43_WARN_ON(!ring->tx); |
1608 | ring->ops->tx_resume(ring); | 1589 | ring->ops->tx_resume(ring); |
1609 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1610 | } | 1590 | } |
1611 | 1591 | ||
1612 | void b43_dma_tx_suspend(struct b43_wldev *dev) | 1592 | void b43_dma_tx_suspend(struct b43_wldev *dev) |