aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAbhijeet Kolekar <abhijeet.kolekar@intel.com>2009-04-30 16:56:26 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-05-06 15:14:59 -0400
commit722404983b9deb21e4f786224201ca2ab27a1c48 (patch)
tree97b1e9de442c5c3aad267743abf23a6177e33cdb
parent84379cba44042a4caf793f821be8bd1a9b8235c2 (diff)
iwl3945: fix lock dependency
Patch seperates rx_used and rx_free into two different atomic contexts. We can now avoid using GFP_ATOMIC for skb allocation and use GFP_KERNEL. Signed-off-by: Abhijeet Kolekar <abhijeet.kolekar@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c37
1 files changed, 17 insertions, 20 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 5cd4321d7cf5..dc0359ce1ec0 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1344,15 +1344,24 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
1344 struct list_head *element; 1344 struct list_head *element;
1345 struct iwl_rx_mem_buffer *rxb; 1345 struct iwl_rx_mem_buffer *rxb;
1346 unsigned long flags; 1346 unsigned long flags;
1347 spin_lock_irqsave(&rxq->lock, flags); 1347
1348 while (!list_empty(&rxq->rx_used)) { 1348 while (1) {
1349 spin_lock_irqsave(&rxq->lock, flags);
1350
1351 if (list_empty(&rxq->rx_used)) {
1352 spin_unlock_irqrestore(&rxq->lock, flags);
1353 return;
1354 }
1355
1349 element = rxq->rx_used.next; 1356 element = rxq->rx_used.next;
1350 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 1357 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1358 list_del(element);
1359 spin_unlock_irqrestore(&rxq->lock, flags);
1351 1360
1352 /* Alloc a new receive buffer */ 1361 /* Alloc a new receive buffer */
1353 rxb->skb = 1362 rxb->skb =
1354 alloc_skb(priv->hw_params.rx_buf_size, 1363 alloc_skb(priv->hw_params.rx_buf_size,
1355 __GFP_NOWARN | GFP_ATOMIC); 1364 GFP_KERNEL);
1356 if (!rxb->skb) { 1365 if (!rxb->skb) {
1357 if (net_ratelimit()) 1366 if (net_ratelimit())
1358 IWL_CRIT(priv, ": Can not allocate SKB buffers\n"); 1367 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
@@ -1370,18 +1379,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
1370 */ 1379 */
1371 skb_reserve(rxb->skb, 4); 1380 skb_reserve(rxb->skb, 4);
1372 1381
1373 priv->alloc_rxb_skb++;
1374 list_del(element);
1375
1376 /* Get physical address of RB/SKB */ 1382 /* Get physical address of RB/SKB */
1377 rxb->real_dma_addr = pci_map_single(priv->pci_dev, 1383 rxb->real_dma_addr = pci_map_single(priv->pci_dev,
1378 rxb->skb->data, 1384 rxb->skb->data,
1379 priv->hw_params.rx_buf_size, 1385 priv->hw_params.rx_buf_size,
1380 PCI_DMA_FROMDEVICE); 1386 PCI_DMA_FROMDEVICE);
1387
1388 spin_lock_irqsave(&rxq->lock, flags);
1381 list_add_tail(&rxb->list, &rxq->rx_free); 1389 list_add_tail(&rxb->list, &rxq->rx_free);
1390 priv->alloc_rxb_skb++;
1382 rxq->free_count++; 1391 rxq->free_count++;
1392 spin_unlock_irqrestore(&rxq->lock, flags);
1383 } 1393 }
1384 spin_unlock_irqrestore(&rxq->lock, flags);
1385} 1394}
1386 1395
1387void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 1396void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
@@ -1414,18 +1423,6 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1414 spin_unlock_irqrestore(&rxq->lock, flags); 1423 spin_unlock_irqrestore(&rxq->lock, flags);
1415} 1424}
1416 1425
1417/*
1418 * this should be called while priv->lock is locked
1419 */
1420static void __iwl3945_rx_replenish(void *data)
1421{
1422 struct iwl_priv *priv = data;
1423
1424 iwl3945_rx_allocate(priv);
1425 iwl3945_rx_queue_restock(priv);
1426}
1427
1428
1429void iwl3945_rx_replenish(void *data) 1426void iwl3945_rx_replenish(void *data)
1430{ 1427{
1431 struct iwl_priv *priv = data; 1428 struct iwl_priv *priv = data;
@@ -1644,7 +1641,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1644 count++; 1641 count++;
1645 if (count >= 8) { 1642 if (count >= 8) {
1646 priv->rxq.read = i; 1643 priv->rxq.read = i;
1647 __iwl3945_rx_replenish(priv); 1644 iwl3945_rx_queue_restock(priv);
1648 count = 0; 1645 count = 0;
1649 } 1646 }
1650 } 1647 }