aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn.c
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2008-12-17 03:52:33 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-12-19 15:23:41 -0500
commitf1bc4ac61f2c08515afd80c6dc3962aa6d0b138b (patch)
treed9c765de135de818dbf12d5910d38658cac5be47 /drivers/net/wireless/iwlwifi/iwl-agn.c
parent4087f6f68cdbd2845c7e54236bae1b058a7b827b (diff)
iwlwifi: use GFP_KERNEL to allocate Rx SKB memory
Previously we allocate Rx SKB with GFP_ATOMIC flag. This is because we need to hold a spinlock to protect the two rx_used and rx_free lists operation in the rxq. spin_lock(); ... element = rxq->rx_used.next; element->skb = alloc_skb(..., GFP_ATOMIC); list_del(element); list_add_tail(&element->list, &rxq->rx_free); ... spin_unlock(); After spliting the rx_used delete and rx_free insert into two operations, we don't require the skb allocation in an atomic context any more (the function itself is scheduled in a workqueue). spin_lock(); ... element = rxq->rx_used.next; list_del(element); ... spin_unlock(); ... element->skb = alloc_skb(..., GFP_KERNEL); ... spin_lock() ... list_add_tail(&element->list, &rxq->rx_free); ... spin_unlock(); This patch should fix the "iwlagn: Can not allocate SKB buffers" warning we see recently. Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Tomas Winkler <tomas.winkler@intel.com> Cc: stable@kernel.org Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index a0051928d298..ff8635f0ed45 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1110,16 +1110,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
1110 priv->cfg->ops->lib->rx_handler_setup(priv); 1110 priv->cfg->ops->lib->rx_handler_setup(priv);
1111} 1111}
1112 1112
1113/*
1114 * this should be called while priv->lock is locked
1115*/
1116static void __iwl_rx_replenish(struct iwl_priv *priv)
1117{
1118 iwl_rx_allocate(priv);
1119 iwl_rx_queue_restock(priv);
1120}
1121
1122
1123/** 1113/**
1124 * iwl_rx_handle - Main entry function for receiving responses from uCode 1114 * iwl_rx_handle - Main entry function for receiving responses from uCode
1125 * 1115 *
@@ -1228,7 +1218,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1228 count++; 1218 count++;
1229 if (count >= 8) { 1219 if (count >= 8) {
1230 priv->rxq.read = i; 1220 priv->rxq.read = i;
1231 __iwl_rx_replenish(priv); 1221 iwl_rx_queue_restock(priv);
1232 count = 0; 1222 count = 0;
1233 } 1223 }
1234 } 1224 }