aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2014-08-22 08:33:14 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2014-08-25 04:31:47 -0400
commit728f95eef5238bffdb20e511f5cd553321d404c3 (patch)
treef3603b906f383d3c33908ba32aafef90fd56db2e
parent5c771e7454d148af35e8b4297d00f880de79ea49 (diff)
ath10k: rework posting pci rx buffers
It was possible on a host system running low on memory to end up with no rx buffers on pci pipes. This makes the driver more robust as it won't fail to start if it can't allocate all rx buffers right away. If it is fatal then upper layers will notice trouble anyway. Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h16
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c232
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
4 files changed, 160 insertions, 161 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 8cbc0ab8ad42..f6668162a0a8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -385,44 +385,59 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
385 return delta; 385 return delta;
386} 386}
387 387
388int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, 388
389 void *per_recv_context, 389int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
390 u32 buffer)
391{ 390{
392 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 391 struct ath10k *ar = pipe->ar;
393 u32 ctrl_addr = ce_state->ctrl_addr;
394 struct ath10k *ar = ce_state->ar;
395 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 392 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
393 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
396 unsigned int nentries_mask = dest_ring->nentries_mask; 394 unsigned int nentries_mask = dest_ring->nentries_mask;
397 unsigned int write_index; 395 unsigned int write_index = dest_ring->write_index;
398 unsigned int sw_index; 396 unsigned int sw_index = dest_ring->sw_index;
399 int ret;
400 397
401 spin_lock_bh(&ar_pci->ce_lock); 398 lockdep_assert_held(&ar_pci->ce_lock);
402 write_index = dest_ring->write_index;
403 sw_index = dest_ring->sw_index;
404 399
405 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { 400 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
406 struct ce_desc *base = dest_ring->base_addr_owner_space; 401}
407 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); 402
403int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
404{
405 struct ath10k *ar = pipe->ar;
406 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
407 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
408 unsigned int nentries_mask = dest_ring->nentries_mask;
409 unsigned int write_index = dest_ring->write_index;
410 unsigned int sw_index = dest_ring->sw_index;
411 struct ce_desc *base = dest_ring->base_addr_owner_space;
412 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
413 u32 ctrl_addr = pipe->ctrl_addr;
408 414
409 /* Update destination descriptor */ 415 lockdep_assert_held(&ar_pci->ce_lock);
410 desc->addr = __cpu_to_le32(buffer);
411 desc->nbytes = 0;
412 416
413 dest_ring->per_transfer_context[write_index] = 417 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
414 per_recv_context; 418 return -EIO;
415 419
416 /* Update Destination Ring Write Index */ 420 desc->addr = __cpu_to_le32(paddr);
417 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 421 desc->nbytes = 0;
418 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); 422
419 dest_ring->write_index = write_index; 423 dest_ring->per_transfer_context[write_index] = ctx;
420 ret = 0; 424 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
421 } else { 425 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
422 ret = -EIO; 426 dest_ring->write_index = write_index;
423 }
424 427
428 return 0;
429}
430
431int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
432{
433 struct ath10k *ar = pipe->ar;
434 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
435 int ret;
436
437 spin_lock_bh(&ar_pci->ce_lock);
438 ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
425 spin_unlock_bh(&ar_pci->ce_lock); 439 spin_unlock_bh(&ar_pci->ce_lock);
440
426 return ret; 441 return ret;
427} 442}
428 443
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index d48dbb972004..82d1f23546b9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -166,19 +166,9 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
166 166
167/*==================Recv=======================*/ 167/*==================Recv=======================*/
168 168
169/* 169int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
170 * Make a buffer available to receive. The buffer must be at least of a 170int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
171 * minimal size appropriate for this copy engine (src_sz_max attribute). 171int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
172 * ce - which copy engine to use
173 * per_transfer_recv_context - context passed back to caller's recv_cb
174 * buffer - address of buffer in CE space
175 * Returns 0 on success; otherwise an error status.
176 *
177 * Implemenation note: Pushes a buffer to Dest ring.
178 */
179int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
180 void *per_transfer_recv_context,
181 u32 buffer);
182 172
183/* recv flags */ 173/* recv flags */
184/* Data is byte-swapped */ 174/* Data is byte-swapped */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ffb980c5080d..21f7dc3fb57e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -67,10 +67,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
67static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, 67static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
68 u32 *data); 68 u32 *data);
69 69
70static int ath10k_pci_post_rx(struct ath10k *ar); 70static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
71static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
72 int num);
73static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
74static int ath10k_pci_cold_reset(struct ath10k *ar); 71static int ath10k_pci_cold_reset(struct ath10k *ar);
75static int ath10k_pci_warm_reset(struct ath10k *ar); 72static int ath10k_pci_warm_reset(struct ath10k *ar);
76static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 73static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
@@ -278,6 +275,101 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
278 return "legacy"; 275 return "legacy";
279} 276}
280 277
278static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
279{
280 struct ath10k *ar = pipe->hif_ce_state;
281 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
282 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
283 struct sk_buff *skb;
284 dma_addr_t paddr;
285 int ret;
286
287 lockdep_assert_held(&ar_pci->ce_lock);
288
289 skb = dev_alloc_skb(pipe->buf_sz);
290 if (!skb)
291 return -ENOMEM;
292
293 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
294
295 paddr = dma_map_single(ar->dev, skb->data,
296 skb->len + skb_tailroom(skb),
297 DMA_FROM_DEVICE);
298 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
299 ath10k_warn("failed to dma map pci rx buf\n");
300 dev_kfree_skb_any(skb);
301 return -EIO;
302 }
303
304 ATH10K_SKB_CB(skb)->paddr = paddr;
305
306 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
307 if (ret) {
308 ath10k_warn("failed to post pci rx buf: %d\n", ret);
309 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
310 DMA_FROM_DEVICE);
311 dev_kfree_skb_any(skb);
312 return ret;
313 }
314
315 return 0;
316}
317
318static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
319{
320 struct ath10k *ar = pipe->hif_ce_state;
321 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
322 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
323 int ret, num;
324
325 lockdep_assert_held(&ar_pci->ce_lock);
326
327 if (pipe->buf_sz == 0)
328 return;
329
330 if (!ce_pipe->dest_ring)
331 return;
332
333 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
334 while (num--) {
335 ret = __ath10k_pci_rx_post_buf(pipe);
336 if (ret) {
337 ath10k_warn("failed to post pci rx buf: %d\n", ret);
338 mod_timer(&ar_pci->rx_post_retry, jiffies +
339 ATH10K_PCI_RX_POST_RETRY_MS);
340 break;
341 }
342 }
343}
344
345static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
346{
347 struct ath10k *ar = pipe->hif_ce_state;
348 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
349
350 spin_lock_bh(&ar_pci->ce_lock);
351 __ath10k_pci_rx_post_pipe(pipe);
352 spin_unlock_bh(&ar_pci->ce_lock);
353}
354
355static void ath10k_pci_rx_post(struct ath10k *ar)
356{
357 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
358 int i;
359
360 spin_lock_bh(&ar_pci->ce_lock);
361 for (i = 0; i < CE_COUNT; i++)
362 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
363 spin_unlock_bh(&ar_pci->ce_lock);
364}
365
366static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
367{
368 struct ath10k *ar = (void *)ptr;
369
370 ath10k_pci_rx_post(ar);
371}
372
281/* 373/*
282 * Diagnostic read/write access is provided for startup/config/debug usage. 374 * Diagnostic read/write access is provided for startup/config/debug usage.
283 * Caller must guarantee proper alignment, when applicable, and single user 375 * Caller must guarantee proper alignment, when applicable, and single user
@@ -344,7 +436,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
344 nbytes = min_t(unsigned int, remaining_bytes, 436 nbytes = min_t(unsigned int, remaining_bytes,
345 DIAG_TRANSFER_LIMIT); 437 DIAG_TRANSFER_LIMIT);
346 438
347 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); 439 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
348 if (ret != 0) 440 if (ret != 0)
349 goto done; 441 goto done;
350 442
@@ -534,7 +626,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
534 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 626 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
535 627
536 /* Set up to receive directly into Target(!) address */ 628 /* Set up to receive directly into Target(!) address */
537 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); 629 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
538 if (ret != 0) 630 if (ret != 0)
539 goto done; 631 goto done;
540 632
@@ -696,12 +788,10 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
696 unsigned int nbytes, max_nbytes; 788 unsigned int nbytes, max_nbytes;
697 unsigned int transfer_id; 789 unsigned int transfer_id;
698 unsigned int flags; 790 unsigned int flags;
699 int err, num_replenish = 0;
700 791
701 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 792 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
702 &ce_data, &nbytes, &transfer_id, 793 &ce_data, &nbytes, &transfer_id,
703 &flags) == 0) { 794 &flags) == 0) {
704 num_replenish++;
705 skb = transfer_context; 795 skb = transfer_context;
706 max_nbytes = skb->len + skb_tailroom(skb); 796 max_nbytes = skb->len + skb_tailroom(skb);
707 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 797 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
@@ -718,12 +808,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
718 cb->rx_completion(ar, skb, pipe_info->pipe_num); 808 cb->rx_completion(ar, skb, pipe_info->pipe_num);
719 } 809 }
720 810
721 err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish); 811 ath10k_pci_rx_post_pipe(pipe_info);
722 if (unlikely(err)) {
723 /* FIXME: retry */
724 ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
725 pipe_info->pipe_num, num_replenish, err);
726 }
727} 812}
728 813
729static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 814static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -911,6 +996,8 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
911 996
912 for (i = 0; i < CE_COUNT; i++) 997 for (i = 0; i < CE_COUNT; i++)
913 tasklet_kill(&ar_pci->pipe_info[i].intr); 998 tasklet_kill(&ar_pci->pipe_info[i].intr);
999
1000 del_timer_sync(&ar_pci->rx_post_retry);
914} 1001}
915 1002
916/* TODO - temporary mapping while we have too few CE's */ 1003/* TODO - temporary mapping while we have too few CE's */
@@ -992,94 +1079,6 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
992 &dl_is_polled); 1079 &dl_is_polled);
993} 1080}
994 1081
995static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
996 int num)
997{
998 struct ath10k *ar = pipe_info->hif_ce_state;
999 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1000 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1001 struct sk_buff *skb;
1002 dma_addr_t ce_data;
1003 int i, ret = 0;
1004
1005 if (pipe_info->buf_sz == 0)
1006 return 0;
1007
1008 for (i = 0; i < num; i++) {
1009 skb = dev_alloc_skb(pipe_info->buf_sz);
1010 if (!skb) {
1011 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1012 num);
1013 ret = -ENOMEM;
1014 goto err;
1015 }
1016
1017 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1018
1019 ce_data = dma_map_single(ar->dev, skb->data,
1020 skb->len + skb_tailroom(skb),
1021 DMA_FROM_DEVICE);
1022
1023 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1024 ath10k_warn("failed to DMA map sk_buff\n");
1025 dev_kfree_skb_any(skb);
1026 ret = -EIO;
1027 goto err;
1028 }
1029
1030 ATH10K_SKB_CB(skb)->paddr = ce_data;
1031
1032 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1033 pipe_info->buf_sz,
1034 PCI_DMA_FROMDEVICE);
1035
1036 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1037 ce_data);
1038 if (ret) {
1039 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1040 num, ret);
1041 goto err;
1042 }
1043 }
1044
1045 return ret;
1046
1047err:
1048 ath10k_pci_rx_pipe_cleanup(pipe_info);
1049 return ret;
1050}
1051
1052static int ath10k_pci_post_rx(struct ath10k *ar)
1053{
1054 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1055 struct ath10k_pci_pipe *pipe_info;
1056 const struct ce_attr *attr;
1057 int pipe_num, ret = 0;
1058
1059 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1060 pipe_info = &ar_pci->pipe_info[pipe_num];
1061 attr = &host_ce_config_wlan[pipe_num];
1062
1063 if (attr->dest_nentries == 0)
1064 continue;
1065
1066 ret = ath10k_pci_post_rx_pipe(pipe_info,
1067 attr->dest_nentries - 1);
1068 if (ret) {
1069 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1070 pipe_num, ret);
1071
1072 for (; pipe_num >= 0; pipe_num--) {
1073 pipe_info = &ar_pci->pipe_info[pipe_num];
1074 ath10k_pci_rx_pipe_cleanup(pipe_info);
1075 }
1076 return ret;
1077 }
1078 }
1079
1080 return 0;
1081}
1082
1083static void ath10k_pci_irq_disable(struct ath10k *ar) 1082static void ath10k_pci_irq_disable(struct ath10k *ar)
1084{ 1083{
1085 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1084 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1117,28 +1116,14 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
1117static int ath10k_pci_hif_start(struct ath10k *ar) 1116static int ath10k_pci_hif_start(struct ath10k *ar)
1118{ 1117{
1119 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1118 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1120 int ret;
1121 1119
1122 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n"); 1120 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1123 1121
1124 ath10k_pci_irq_enable(ar); 1122 ath10k_pci_irq_enable(ar);
1125 1123 ath10k_pci_rx_post(ar);
1126 /* Post buffers once to start things off. */
1127 ret = ath10k_pci_post_rx(ar);
1128 if (ret) {
1129 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1130 ret);
1131 goto err_stop;
1132 }
1133 1124
1134 ar_pci->started = 1; 1125 ar_pci->started = 1;
1135 return 0; 1126 return 0;
1136
1137err_stop:
1138 ath10k_pci_irq_disable(ar);
1139 ath10k_pci_kill_tasklet(ar);
1140
1141 return ret;
1142} 1127}
1143 1128
1144static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) 1129static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
@@ -1240,6 +1225,12 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
1240 ath10k_ce_deinit_pipe(ar, i); 1225 ath10k_ce_deinit_pipe(ar, i);
1241} 1226}
1242 1227
1228static void ath10k_pci_flush(struct ath10k *ar)
1229{
1230 ath10k_pci_kill_tasklet(ar);
1231 ath10k_pci_buffer_cleanup(ar);
1232}
1233
1243static void ath10k_pci_hif_stop(struct ath10k *ar) 1234static void ath10k_pci_hif_stop(struct ath10k *ar)
1244{ 1235{
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1250,8 +1241,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1250 return; 1241 return;
1251 1242
1252 ath10k_pci_irq_disable(ar); 1243 ath10k_pci_irq_disable(ar);
1253 ath10k_pci_kill_tasklet(ar); 1244 ath10k_pci_flush(ar);
1254 ath10k_pci_buffer_cleanup(ar);
1255 1245
1256 /* Make the sure the device won't access any structures on the host by 1246 /* Make the sure the device won't access any structures on the host by
1257 * resetting it. The device was fed with PCI CE ringbuffer 1247 * resetting it. The device was fed with PCI CE ringbuffer
@@ -1311,7 +1301,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1311 xfer.wait_for_resp = true; 1301 xfer.wait_for_resp = true;
1312 xfer.resp_len = 0; 1302 xfer.resp_len = 0;
1313 1303
1314 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); 1304 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1315 } 1305 }
1316 1306
1317 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 1307 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
@@ -2513,6 +2503,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2513 ar_pci->ar = ar; 2503 ar_pci->ar = ar;
2514 2504
2515 spin_lock_init(&ar_pci->ce_lock); 2505 spin_lock_init(&ar_pci->ce_lock);
2506 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2507 (unsigned long)ar);
2516 2508
2517 ret = ath10k_pci_claim(ar); 2509 ret = ath10k_pci_claim(ar);
2518 if (ret) { 2510 if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index caed918c7102..b9aa692d22f1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -181,6 +181,7 @@ struct ath10k_pci {
181 181
182 /* Map CE id to ce_state */ 182 /* Map CE id to ce_state */
183 struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; 183 struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
184 struct timer_list rx_post_retry;
184}; 185};
185 186
186static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) 187static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -188,6 +189,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
188 return (struct ath10k_pci *)ar->drv_priv; 189 return (struct ath10k_pci *)ar->drv_priv;
189} 190}
190 191
192#define ATH10K_PCI_RX_POST_RETRY_MS 50
191#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ 193#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
192#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ 194#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
193 195