aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-rx.c
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2008-12-17 03:52:33 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-12-19 15:23:41 -0500
commitf1bc4ac61f2c08515afd80c6dc3962aa6d0b138b (patch)
treed9c765de135de818dbf12d5910d38658cac5be47 /drivers/net/wireless/iwlwifi/iwl-rx.c
parent4087f6f68cdbd2845c7e54236bae1b058a7b827b (diff)
iwlwifi: use GFP_KERNEL to allocate Rx SKB memory
Previously we allocate Rx SKB with GFP_ATOMIC flag. This is because we need to hold a spinlock to protect the two rx_used and rx_free lists operation in the rxq. spin_lock(); ... element = rxq->rx_used.next; element->skb = alloc_skb(..., GFP_ATOMIC); list_del(element); list_add_tail(&element->list, &rxq->rx_free); ... spin_unlock(); After spliting the rx_used delete and rx_free insert into two operations, we don't require the skb allocation in an atomic context any more (the function itself is scheduled in a workqueue). spin_lock(); ... element = rxq->rx_used.next; list_del(element); ... spin_unlock(); ... element->skb = alloc_skb(..., GFP_KERNEL); ... spin_lock() ... list_add_tail(&element->list, &rxq->rx_free); ... spin_unlock(); This patch should fix the "iwlagn: Can not allocate SKB buffers" warning we see recently. Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Tomas Winkler <tomas.winkler@intel.com> Cc: stable@kernel.org Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 919a775121e4..c5f1aa0feac8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -244,25 +244,31 @@ void iwl_rx_allocate(struct iwl_priv *priv)
244 struct list_head *element; 244 struct list_head *element;
245 struct iwl_rx_mem_buffer *rxb; 245 struct iwl_rx_mem_buffer *rxb;
246 unsigned long flags; 246 unsigned long flags;
247 spin_lock_irqsave(&rxq->lock, flags); 247
248 while (!list_empty(&rxq->rx_used)) { 248 while (1) {
249 spin_lock_irqsave(&rxq->lock, flags);
250
251 if (list_empty(&rxq->rx_used)) {
252 spin_unlock_irqrestore(&rxq->lock, flags);
253 return;
254 }
249 element = rxq->rx_used.next; 255 element = rxq->rx_used.next;
250 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 256 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
257 list_del(element);
258
259 spin_unlock_irqrestore(&rxq->lock, flags);
251 260
252 /* Alloc a new receive buffer */ 261 /* Alloc a new receive buffer */
253 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 262 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
254 __GFP_NOWARN | GFP_ATOMIC); 263 GFP_KERNEL);
255 if (!rxb->skb) { 264 if (!rxb->skb) {
256 if (net_ratelimit()) 265 printk(KERN_CRIT DRV_NAME
257 printk(KERN_CRIT DRV_NAME 266 "Can not allocate SKB buffers\n");
258 ": Can not allocate SKB buffers\n");
259 /* We don't reschedule replenish work here -- we will 267 /* We don't reschedule replenish work here -- we will
260 * call the restock method and if it still needs 268 * call the restock method and if it still needs
261 * more buffers it will schedule replenish */ 269 * more buffers it will schedule replenish */
262 break; 270 break;
263 } 271 }
264 priv->alloc_rxb_skb++;
265 list_del(element);
266 272
267 /* Get physical address of RB/SKB */ 273 /* Get physical address of RB/SKB */
268 rxb->real_dma_addr = pci_map_single( 274 rxb->real_dma_addr = pci_map_single(
@@ -276,12 +282,15 @@ void iwl_rx_allocate(struct iwl_priv *priv)
276 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 282 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
277 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr); 283 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
278 284
285 spin_lock_irqsave(&rxq->lock, flags);
286
279 list_add_tail(&rxb->list, &rxq->rx_free); 287 list_add_tail(&rxb->list, &rxq->rx_free);
280 rxq->free_count++; 288 rxq->free_count++;
289 priv->alloc_rxb_skb++;
290
291 spin_unlock_irqrestore(&rxq->lock, flags);
281 } 292 }
282 spin_unlock_irqrestore(&rxq->lock, flags);
283} 293}
284EXPORT_SYMBOL(iwl_rx_allocate);
285 294
286void iwl_rx_replenish(struct iwl_priv *priv) 295void iwl_rx_replenish(struct iwl_priv *priv)
287{ 296{