aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2014-05-14 09:23:31 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2014-05-14 09:40:47 -0400
commit3e841fd0a54745c8ad24ecfb0303c505b7460ffb (patch)
treee53cf63c1784fbfb497ff6adcf1e97d83e01aedb
parente21353576df1fae38710bdbff1c3abfe49f651cd (diff)
ath10k: fix htt rx ring clean up
msdu_payId was read before txrx tasklet was killed so it was possible to end up using an invalid sk_buff pointer leading to a panic. Make sure to sanitize rx ring sk_buff pointers and make the clean up go through all possible entries and not rely on coherent-DMA mapped u32 index which could be (in theory) corrupted by the device as well. Reported-By: Avery Pennarun <apenwarr@gmail.com> Reported-By: Ben Greear <greearb@candelatech.com> Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f85a3cf6da31..db6c8af0c9b1 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
225 ath10k_htt_rx_msdu_buff_replenish(htt); 225 ath10k_htt_rx_msdu_buff_replenish(htt);
226} 226}
227 227
228void ath10k_htt_rx_detach(struct ath10k_htt *htt) 228static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
229{ 229{
230 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 230 struct sk_buff *skb;
231 int i;
232
233 for (i = 0; i < htt->rx_ring.size; i++) {
234 skb = htt->rx_ring.netbufs_ring[i];
235 if (!skb)
236 continue;
237
238 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
239 skb->len + skb_tailroom(skb),
240 DMA_FROM_DEVICE);
241 dev_kfree_skb_any(skb);
242 htt->rx_ring.netbufs_ring[i] = NULL;
243 }
244}
231 245
246void ath10k_htt_rx_detach(struct ath10k_htt *htt)
247{
232 del_timer_sync(&htt->rx_ring.refill_retry_timer); 248 del_timer_sync(&htt->rx_ring.refill_retry_timer);
233 tasklet_kill(&htt->rx_replenish_task); 249 tasklet_kill(&htt->rx_replenish_task);
234 tasklet_kill(&htt->txrx_compl_task); 250 tasklet_kill(&htt->txrx_compl_task);
@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
236 skb_queue_purge(&htt->tx_compl_q); 252 skb_queue_purge(&htt->tx_compl_q);
237 skb_queue_purge(&htt->rx_compl_q); 253 skb_queue_purge(&htt->rx_compl_q);
238 254
239 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 255 ath10k_htt_rx_ring_clean_up(htt);
240 struct sk_buff *skb =
241 htt->rx_ring.netbufs_ring[sw_rd_idx];
242 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
243
244 dma_unmap_single(htt->ar->dev, cb->paddr,
245 skb->len + skb_tailroom(skb),
246 DMA_FROM_DEVICE);
247 dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
248 sw_rd_idx++;
249 sw_rd_idx &= htt->rx_ring.size_mask;
250 }
251 256
252 dma_free_coherent(htt->ar->dev, 257 dma_free_coherent(htt->ar->dev,
253 (htt->rx_ring.size * 258 (htt->rx_ring.size *
@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
277 282
278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 283 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
279 msdu = htt->rx_ring.netbufs_ring[idx]; 284 msdu = htt->rx_ring.netbufs_ring[idx];
285 htt->rx_ring.netbufs_ring[idx] = NULL;
280 286
281 idx++; 287 idx++;
282 idx &= htt->rx_ring.size_mask; 288 idx &= htt->rx_ring.size_mask;
@@ -494,7 +500,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
494 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); 500 htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
495 501
496 htt->rx_ring.netbufs_ring = 502 htt->rx_ring.netbufs_ring =
497 kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), 503 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
498 GFP_KERNEL); 504 GFP_KERNEL);
499 if (!htt->rx_ring.netbufs_ring) 505 if (!htt->rx_ring.netbufs_ring)
500 goto err_netbuf; 506 goto err_netbuf;