aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael Buesch <mb@bu3sch.de>2009-09-04 16:55:00 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-09-08 16:31:07 -0400
commit637dae3f637eb7dab447e74362e0dfeded775c7c (patch)
tree2589ed0c29c0b8f360e100ee0e82ad79f2df3129 /drivers
parentf5d40eedb32aa9a0e226d468e1f89fb676824694 (diff)
b43: Remove DMA/PIO queue locks
This removes the DMA/PIO queue locks. Locking is handled by wl->mutex now. Signed-off-by: Michael Buesch <mb@bu3sch.de> Tested-by: Larry Finger <Larry.Finger@lwfinger.net> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/b43/dma.c30
-rw-r--r--drivers/net/wireless/b43/dma.h3
-rw-r--r--drivers/net/wireless/b43/pio.c38
-rw-r--r--drivers/net/wireless/b43/pio.h2
4 files changed, 13 insertions, 60 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 25ced8bdec8f..a467ee260a19 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -856,7 +856,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
856 } else 856 } else
857 B43_WARN_ON(1); 857 B43_WARN_ON(1);
858 } 858 }
859 spin_lock_init(&ring->lock);
860#ifdef CONFIG_B43_DEBUG 859#ifdef CONFIG_B43_DEBUG
861 ring->last_injected_overflow = jiffies; 860 ring->last_injected_overflow = jiffies;
862#endif 861#endif
@@ -1315,7 +1314,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1315 struct b43_dmaring *ring; 1314 struct b43_dmaring *ring;
1316 struct ieee80211_hdr *hdr; 1315 struct ieee80211_hdr *hdr;
1317 int err = 0; 1316 int err = 0;
1318 unsigned long flags;
1319 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1317 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1320 1318
1321 hdr = (struct ieee80211_hdr *)skb->data; 1319 hdr = (struct ieee80211_hdr *)skb->data;
@@ -1331,8 +1329,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1331 dev, skb_get_queue_mapping(skb)); 1329 dev, skb_get_queue_mapping(skb));
1332 } 1330 }
1333 1331
1334 spin_lock_irqsave(&ring->lock, flags);
1335
1336 B43_WARN_ON(!ring->tx); 1332 B43_WARN_ON(!ring->tx);
1337 1333
1338 if (unlikely(ring->stopped)) { 1334 if (unlikely(ring->stopped)) {
@@ -1343,7 +1339,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1343 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) 1339 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1344 b43err(dev->wl, "Packet after queue stopped\n"); 1340 b43err(dev->wl, "Packet after queue stopped\n");
1345 err = -ENOSPC; 1341 err = -ENOSPC;
1346 goto out_unlock; 1342 goto out;
1347 } 1343 }
1348 1344
1349 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { 1345 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
@@ -1351,7 +1347,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1351 * full, but queues not stopped. */ 1347 * full, but queues not stopped. */
1352 b43err(dev->wl, "DMA queue overflow\n"); 1348 b43err(dev->wl, "DMA queue overflow\n");
1353 err = -ENOSPC; 1349 err = -ENOSPC;
1354 goto out_unlock; 1350 goto out;
1355 } 1351 }
1356 1352
1357 /* Assign the queue number to the ring (if not already done before) 1353 /* Assign the queue number to the ring (if not already done before)
@@ -1365,11 +1361,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1365 * anymore and must not transmit it unencrypted. */ 1361 * anymore and must not transmit it unencrypted. */
1366 dev_kfree_skb_any(skb); 1362 dev_kfree_skb_any(skb);
1367 err = 0; 1363 err = 0;
1368 goto out_unlock; 1364 goto out;
1369 } 1365 }
1370 if (unlikely(err)) { 1366 if (unlikely(err)) {
1371 b43err(dev->wl, "DMA tx mapping failure\n"); 1367 b43err(dev->wl, "DMA tx mapping failure\n");
1372 goto out_unlock; 1368 goto out;
1373 } 1369 }
1374 ring->nr_tx_packets++; 1370 ring->nr_tx_packets++;
1375 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || 1371 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
@@ -1381,8 +1377,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1381 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1377 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1382 } 1378 }
1383 } 1379 }
1384out_unlock: 1380out:
1385 spin_unlock_irqrestore(&ring->lock, flags);
1386 1381
1387 return err; 1382 return err;
1388} 1383}
@@ -1401,8 +1396,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1401 if (unlikely(!ring)) 1396 if (unlikely(!ring))
1402 return; 1397 return;
1403 1398
1404 spin_lock_irq(&ring->lock);
1405
1406 B43_WARN_ON(!ring->tx); 1399 B43_WARN_ON(!ring->tx);
1407 ops = ring->ops; 1400 ops = ring->ops;
1408 while (1) { 1401 while (1) {
@@ -1461,8 +1454,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1461 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); 1454 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1462 } 1455 }
1463 } 1456 }
1464
1465 spin_unlock_irq(&ring->lock);
1466} 1457}
1467 1458
1468void b43_dma_get_tx_stats(struct b43_wldev *dev, 1459void b43_dma_get_tx_stats(struct b43_wldev *dev,
@@ -1470,17 +1461,14 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
1470{ 1461{
1471 const int nr_queues = dev->wl->hw->queues; 1462 const int nr_queues = dev->wl->hw->queues;
1472 struct b43_dmaring *ring; 1463 struct b43_dmaring *ring;
1473 unsigned long flags;
1474 int i; 1464 int i;
1475 1465
1476 for (i = 0; i < nr_queues; i++) { 1466 for (i = 0; i < nr_queues; i++) {
1477 ring = select_ring_by_priority(dev, i); 1467 ring = select_ring_by_priority(dev, i);
1478 1468
1479 spin_lock_irqsave(&ring->lock, flags);
1480 stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME; 1469 stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
1481 stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME; 1470 stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
1482 stats[i].count = ring->nr_tx_packets; 1471 stats[i].count = ring->nr_tx_packets;
1483 spin_unlock_irqrestore(&ring->lock, flags);
1484 } 1472 }
1485} 1473}
1486 1474
@@ -1591,22 +1579,14 @@ void b43_dma_rx(struct b43_dmaring *ring)
1591 1579
1592static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) 1580static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1593{ 1581{
1594 unsigned long flags;
1595
1596 spin_lock_irqsave(&ring->lock, flags);
1597 B43_WARN_ON(!ring->tx); 1582 B43_WARN_ON(!ring->tx);
1598 ring->ops->tx_suspend(ring); 1583 ring->ops->tx_suspend(ring);
1599 spin_unlock_irqrestore(&ring->lock, flags);
1600} 1584}
1601 1585
1602static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) 1586static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1603{ 1587{
1604 unsigned long flags;
1605
1606 spin_lock_irqsave(&ring->lock, flags);
1607 B43_WARN_ON(!ring->tx); 1588 B43_WARN_ON(!ring->tx);
1608 ring->ops->tx_resume(ring); 1589 ring->ops->tx_resume(ring);
1609 spin_unlock_irqrestore(&ring->lock, flags);
1610} 1590}
1611 1591
1612void b43_dma_tx_suspend(struct b43_wldev *dev) 1592void b43_dma_tx_suspend(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 05dde646d831..f0b0838fb5ba 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -2,7 +2,6 @@
2#define B43_DMA_H_ 2#define B43_DMA_H_
3 3
4#include <linux/ieee80211.h> 4#include <linux/ieee80211.h>
5#include <linux/spinlock.h>
6 5
7#include "b43.h" 6#include "b43.h"
8 7
@@ -244,8 +243,6 @@ struct b43_dmaring {
244 /* The QOS priority assigned to this ring. Only used for TX rings. 243 /* The QOS priority assigned to this ring. Only used for TX rings.
245 * This is the mac80211 "queue" value. */ 244 * This is the mac80211 "queue" value. */
246 u8 queue_prio; 245 u8 queue_prio;
247 /* Lock, only used for TX. */
248 spinlock_t lock;
249 struct b43_wldev *dev; 246 struct b43_wldev *dev;
250#ifdef CONFIG_B43_DEBUG 247#ifdef CONFIG_B43_DEBUG
251 /* Maximum number of used slots. */ 248 /* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index ce6f36eb88ca..4635baa9b998 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -144,7 +144,6 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
144 q = kzalloc(sizeof(*q), GFP_KERNEL); 144 q = kzalloc(sizeof(*q), GFP_KERNEL);
145 if (!q) 145 if (!q)
146 return NULL; 146 return NULL;
147 spin_lock_init(&q->lock);
148 q->dev = dev; 147 q->dev = dev;
149 q->rev = dev->dev->id.revision; 148 q->rev = dev->dev->id.revision;
150 q->mmio_base = index_to_pioqueue_base(dev, index) + 149 q->mmio_base = index_to_pioqueue_base(dev, index) +
@@ -179,7 +178,6 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
179 q = kzalloc(sizeof(*q), GFP_KERNEL); 178 q = kzalloc(sizeof(*q), GFP_KERNEL);
180 if (!q) 179 if (!q)
181 return NULL; 180 return NULL;
182 spin_lock_init(&q->lock);
183 q->dev = dev; 181 q->dev = dev;
184 q->rev = dev->dev->id.revision; 182 q->rev = dev->dev->id.revision;
185 q->mmio_base = index_to_pioqueue_base(dev, index) + 183 q->mmio_base = index_to_pioqueue_base(dev, index) +
@@ -494,7 +492,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
494{ 492{
495 struct b43_pio_txqueue *q; 493 struct b43_pio_txqueue *q;
496 struct ieee80211_hdr *hdr; 494 struct ieee80211_hdr *hdr;
497 unsigned long flags;
498 unsigned int hdrlen, total_len; 495 unsigned int hdrlen, total_len;
499 int err = 0; 496 int err = 0;
500 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 497 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,20 +509,18 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
512 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb)); 509 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
513 } 510 }
514 511
515 spin_lock_irqsave(&q->lock, flags);
516
517 hdrlen = b43_txhdr_size(dev); 512 hdrlen = b43_txhdr_size(dev);
518 total_len = roundup(skb->len + hdrlen, 4); 513 total_len = roundup(skb->len + hdrlen, 4);
519 514
520 if (unlikely(total_len > q->buffer_size)) { 515 if (unlikely(total_len > q->buffer_size)) {
521 err = -ENOBUFS; 516 err = -ENOBUFS;
522 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n"); 517 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
523 goto out_unlock; 518 goto out;
524 } 519 }
525 if (unlikely(q->free_packet_slots == 0)) { 520 if (unlikely(q->free_packet_slots == 0)) {
526 err = -ENOBUFS; 521 err = -ENOBUFS;
527 b43warn(dev->wl, "PIO: TX packet overflow.\n"); 522 b43warn(dev->wl, "PIO: TX packet overflow.\n");
528 goto out_unlock; 523 goto out;
529 } 524 }
530 B43_WARN_ON(q->buffer_used > q->buffer_size); 525 B43_WARN_ON(q->buffer_used > q->buffer_size);
531 526
@@ -534,7 +529,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
534 err = -EBUSY; 529 err = -EBUSY;
535 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); 530 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
536 q->stopped = 1; 531 q->stopped = 1;
537 goto out_unlock; 532 goto out;
538 } 533 }
539 534
540 /* Assign the queue number to the ring (if not already done before) 535 /* Assign the queue number to the ring (if not already done before)
@@ -548,11 +543,11 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
548 * anymore and must not transmit it unencrypted. */ 543 * anymore and must not transmit it unencrypted. */
549 dev_kfree_skb_any(skb); 544 dev_kfree_skb_any(skb);
550 err = 0; 545 err = 0;
551 goto out_unlock; 546 goto out;
552 } 547 }
553 if (unlikely(err)) { 548 if (unlikely(err)) {
554 b43err(dev->wl, "PIO transmission failure\n"); 549 b43err(dev->wl, "PIO transmission failure\n");
555 goto out_unlock; 550 goto out;
556 } 551 }
557 q->nr_tx_packets++; 552 q->nr_tx_packets++;
558 553
@@ -564,9 +559,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
564 q->stopped = 1; 559 q->stopped = 1;
565 } 560 }
566 561
567out_unlock: 562out:
568 spin_unlock_irqrestore(&q->lock, flags);
569
570 return err; 563 return err;
571} 564}
572 565
@@ -583,8 +576,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
583 return; 576 return;
584 B43_WARN_ON(!pack); 577 B43_WARN_ON(!pack);
585 578
586 spin_lock_irq(&q->lock);
587
588 info = IEEE80211_SKB_CB(pack->skb); 579 info = IEEE80211_SKB_CB(pack->skb);
589 580
590 b43_fill_txstatus_report(dev, info, status); 581 b43_fill_txstatus_report(dev, info, status);
@@ -602,8 +593,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
602 ieee80211_wake_queue(dev->wl->hw, q->queue_prio); 593 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
603 q->stopped = 0; 594 q->stopped = 0;
604 } 595 }
605
606 spin_unlock_irq(&q->lock);
607} 596}
608 597
609void b43_pio_get_tx_stats(struct b43_wldev *dev, 598void b43_pio_get_tx_stats(struct b43_wldev *dev,
@@ -611,17 +600,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
611{ 600{
612 const int nr_queues = dev->wl->hw->queues; 601 const int nr_queues = dev->wl->hw->queues;
613 struct b43_pio_txqueue *q; 602 struct b43_pio_txqueue *q;
614 unsigned long flags;
615 int i; 603 int i;
616 604
617 for (i = 0; i < nr_queues; i++) { 605 for (i = 0; i < nr_queues; i++) {
618 q = select_queue_by_priority(dev, i); 606 q = select_queue_by_priority(dev, i);
619 607
620 spin_lock_irqsave(&q->lock, flags);
621 stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots; 608 stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
622 stats[i].limit = B43_PIO_MAX_NR_TXPACKETS; 609 stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
623 stats[i].count = q->nr_tx_packets; 610 stats[i].count = q->nr_tx_packets;
624 spin_unlock_irqrestore(&q->lock, flags);
625 } 611 }
626} 612}
627 613
@@ -768,9 +754,9 @@ static void b43_pio_rx_work(struct work_struct *work)
768 bool stop; 754 bool stop;
769 755
770 do { 756 do {
771 spin_lock_irq(&q->lock); 757 mutex_lock(&q->dev->wl->mutex);
772 stop = (pio_rx_frame(q) == 0); 758 stop = (pio_rx_frame(q) == 0);
773 spin_unlock_irq(&q->lock); 759 mutex_unlock(&q->dev->wl->mutex);
774 cond_resched(); 760 cond_resched();
775 if (stop) 761 if (stop)
776 break; 762 break;
@@ -787,9 +773,6 @@ void b43_pio_rx(struct b43_pio_rxqueue *q)
787 773
788static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q) 774static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
789{ 775{
790 unsigned long flags;
791
792 spin_lock_irqsave(&q->lock, flags);
793 if (q->rev >= 8) { 776 if (q->rev >= 8) {
794 b43_piotx_write32(q, B43_PIO8_TXCTL, 777 b43_piotx_write32(q, B43_PIO8_TXCTL,
795 b43_piotx_read32(q, B43_PIO8_TXCTL) 778 b43_piotx_read32(q, B43_PIO8_TXCTL)
@@ -799,14 +782,10 @@ static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
799 b43_piotx_read16(q, B43_PIO_TXCTL) 782 b43_piotx_read16(q, B43_PIO_TXCTL)
800 | B43_PIO_TXCTL_SUSPREQ); 783 | B43_PIO_TXCTL_SUSPREQ);
801 } 784 }
802 spin_unlock_irqrestore(&q->lock, flags);
803} 785}
804 786
805static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q) 787static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
806{ 788{
807 unsigned long flags;
808
809 spin_lock_irqsave(&q->lock, flags);
810 if (q->rev >= 8) { 789 if (q->rev >= 8) {
811 b43_piotx_write32(q, B43_PIO8_TXCTL, 790 b43_piotx_write32(q, B43_PIO8_TXCTL,
812 b43_piotx_read32(q, B43_PIO8_TXCTL) 791 b43_piotx_read32(q, B43_PIO8_TXCTL)
@@ -816,7 +795,6 @@ static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
816 b43_piotx_read16(q, B43_PIO_TXCTL) 795 b43_piotx_read16(q, B43_PIO_TXCTL)
817 & ~B43_PIO_TXCTL_SUSPREQ); 796 & ~B43_PIO_TXCTL_SUSPREQ);
818 } 797 }
819 spin_unlock_irqrestore(&q->lock, flags);
820} 798}
821 799
822void b43_pio_tx_suspend(struct b43_wldev *dev) 800void b43_pio_tx_suspend(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 6c174c91ca20..a976bbdd6f44 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -70,7 +70,6 @@ struct b43_pio_txpacket {
70 70
71struct b43_pio_txqueue { 71struct b43_pio_txqueue {
72 struct b43_wldev *dev; 72 struct b43_wldev *dev;
73 spinlock_t lock;
74 u16 mmio_base; 73 u16 mmio_base;
75 74
76 /* The device queue buffer size in bytes. */ 75 /* The device queue buffer size in bytes. */
@@ -103,7 +102,6 @@ struct b43_pio_txqueue {
103 102
104struct b43_pio_rxqueue { 103struct b43_pio_rxqueue {
105 struct b43_wldev *dev; 104 struct b43_wldev *dev;
106 spinlock_t lock;
107 u16 mmio_base; 105 u16 mmio_base;
108 106
109 /* Work to reduce latency issues on RX. */ 107 /* Work to reduce latency issues on RX. */