aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-rx.c
diff options
context:
space:
mode:
authorReinette Chatre <reinette.chatre@intel.com>2009-09-11 13:38:12 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-09-23 11:35:40 -0400
commitde0bd50845eb5935ce3d503c5d2f565d6cb9ece1 (patch)
tree11403d40a5985886bf5fa04f6006654e11130c83 /drivers/net/wireless/iwlwifi/iwl-rx.c
parentc929c5a1281b666fabc9aa94c607932ec6d28c6d (diff)
iwlwifi: fix potential rx buffer loss
RX handling maintains a few lists that keep track of the RX buffers. Buffers move from one list to the other as they are used, replenished, and again made available for usage. In one such instance, when a buffer is used it enters the "rx_used" list. When buffers are replenished an skb is attached to the buffer and it is moved to the "rx_free" list. The problem here is that the buffer is first removed from the "rx_used" list _before_ the skb is allocated. Thus, if the skb allocation fails this buffer remains removed from the "rx_used" list and is thus lost for future usage. Fix this by first allocating the skb before trying to attach it to a list. We add an additional check to not do this unnecessarily. Reported-by: Rick Farrington <rickdic@hotmail.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 8150c5c3a16b..b90adcb73b06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -239,26 +239,22 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
239 struct iwl_rx_queue *rxq = &priv->rxq; 239 struct iwl_rx_queue *rxq = &priv->rxq;
240 struct list_head *element; 240 struct list_head *element;
241 struct iwl_rx_mem_buffer *rxb; 241 struct iwl_rx_mem_buffer *rxb;
242 struct sk_buff *skb;
242 unsigned long flags; 243 unsigned long flags;
243 244
244 while (1) { 245 while (1) {
245 spin_lock_irqsave(&rxq->lock, flags); 246 spin_lock_irqsave(&rxq->lock, flags);
246
247 if (list_empty(&rxq->rx_used)) { 247 if (list_empty(&rxq->rx_used)) {
248 spin_unlock_irqrestore(&rxq->lock, flags); 248 spin_unlock_irqrestore(&rxq->lock, flags);
249 return; 249 return;
250 } 250 }
251 element = rxq->rx_used.next;
252 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
253 list_del(element);
254
255 spin_unlock_irqrestore(&rxq->lock, flags); 251 spin_unlock_irqrestore(&rxq->lock, flags);
256 252
257 /* Alloc a new receive buffer */ 253 /* Alloc a new receive buffer */
258 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 254 skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
259 priority); 255 priority);
260 256
261 if (!rxb->skb) { 257 if (!skb) {
262 IWL_CRIT(priv, "Can not allocate SKB buffers\n"); 258 IWL_CRIT(priv, "Can not allocate SKB buffers\n");
263 /* We don't reschedule replenish work here -- we will 259 /* We don't reschedule replenish work here -- we will
264 * call the restock method and if it still needs 260 * call the restock method and if it still needs
@@ -266,6 +262,20 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
266 break; 262 break;
267 } 263 }
268 264
265 spin_lock_irqsave(&rxq->lock, flags);
266
267 if (list_empty(&rxq->rx_used)) {
268 spin_unlock_irqrestore(&rxq->lock, flags);
269 dev_kfree_skb_any(skb);
270 return;
271 }
272 element = rxq->rx_used.next;
273 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
274 list_del(element);
275
276 spin_unlock_irqrestore(&rxq->lock, flags);
277
278 rxb->skb = skb;
269 /* Get physical address of RB/SKB */ 279 /* Get physical address of RB/SKB */
270 rxb->real_dma_addr = pci_map_single( 280 rxb->real_dma_addr = pci_map_single(
271 priv->pci_dev, 281 priv->pci_dev,