aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2015-01-24 05:14:51 -0500
committerKalle Valo <kvalo@qca.qualcomm.com>2015-01-27 09:16:59 -0500
commit89d6d83565e9a18ae77f4542348d8a34c264c9b1 (patch)
tree1786a6a36372f48289ea7a7f5e6f69da4e7fbf1c /drivers/net/wireless
parent20de2229c634382777eb3b8fc54a34be9669ff8a (diff)
ath10k: use idr api for msdu_ids
HTT Tx protocol uses arbitrary host assigned ids too associate with MSDUs when delivering completions. Instead of rolling out own id generation scheme use the tools provided in kernel. This should have little to no effect on performance. Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c78
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c9
3 files changed, 35 insertions, 59 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index d1f6eb287a10..874bf44ff7a2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1328,12 +1328,11 @@ struct ath10k_htt {
1328 1328
1329 unsigned int prefetch_len; 1329 unsigned int prefetch_len;
1330 1330
1331 /* Protects access to %pending_tx, %used_msdu_ids */ 1331 /* Protects access to pending_tx, num_pending_tx */
1332 spinlock_t tx_lock; 1332 spinlock_t tx_lock;
1333 int max_num_pending_tx; 1333 int max_num_pending_tx;
1334 int num_pending_tx; 1334 int num_pending_tx;
1335 struct sk_buff **pending_tx; 1335 struct idr pending_tx;
1336 unsigned long *used_msdu_ids; /* bitmap */
1337 wait_queue_head_t empty_tx_wq; 1336 wait_queue_head_t empty_tx_wq;
1338 struct dma_pool *tx_pool; 1337 struct dma_pool *tx_pool;
1339 1338
@@ -1424,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
1424 u8 max_subfrms_amsdu); 1423 u8 max_subfrms_amsdu);
1425 1424
1426void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); 1425void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
1427int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); 1426int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
1428void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); 1427void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1429int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); 1428int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
1430int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); 1429int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 5c64139161fc..2a8667e95c46 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -56,21 +56,18 @@ exit:
56 return ret; 56 return ret;
57} 57}
58 58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) 59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
60{ 60{
61 struct ath10k *ar = htt->ar; 61 struct ath10k *ar = htt->ar;
62 int msdu_id; 62 int ret;
63 63
64 lockdep_assert_held(&htt->tx_lock); 64 lockdep_assert_held(&htt->tx_lock);
65 65
66 msdu_id = find_first_zero_bit(htt->used_msdu_ids, 66 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
67 htt->max_num_pending_tx); 67
68 if (msdu_id == htt->max_num_pending_tx) 68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
69 return -ENOBUFS;
70 69
71 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); 70 return ret;
72 __set_bit(msdu_id, htt->used_msdu_ids);
73 return msdu_id;
74} 71}
75 72
76void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 73void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
79 76
80 lockdep_assert_held(&htt->tx_lock); 77 lockdep_assert_held(&htt->tx_lock);
81 78
82 if (!test_bit(msdu_id, htt->used_msdu_ids))
83 ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
84 msdu_id);
85
86 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
87 __clear_bit(msdu_id, htt->used_msdu_ids); 80
81 idr_remove(&htt->pending_tx, msdu_id);
88} 82}
89 83
90int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 84int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
91{ 85{
92 struct ath10k *ar = htt->ar; 86 struct ath10k *ar = htt->ar;
93 87
94 spin_lock_init(&htt->tx_lock);
95
96 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
97 htt->max_num_pending_tx); 89 htt->max_num_pending_tx);
98 90
99 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 91 spin_lock_init(&htt->tx_lock);
100 htt->max_num_pending_tx, GFP_KERNEL); 92 idr_init(&htt->pending_tx);
101 if (!htt->pending_tx)
102 return -ENOMEM;
103
104 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
105 BITS_TO_LONGS(htt->max_num_pending_tx),
106 GFP_KERNEL);
107 if (!htt->used_msdu_ids) {
108 kfree(htt->pending_tx);
109 return -ENOMEM;
110 }
111 93
112 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, 94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
113 sizeof(struct ath10k_htt_txbuf), 4, 0); 95 sizeof(struct ath10k_htt_txbuf), 4, 0);
114 if (!htt->tx_pool) { 96 if (!htt->tx_pool) {
115 kfree(htt->used_msdu_ids); 97 idr_destroy(&htt->pending_tx);
116 kfree(htt->pending_tx);
117 return -ENOMEM; 98 return -ENOMEM;
118 } 99 }
119 100
120 return 0; 101 return 0;
121} 102}
122 103
123static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) 104static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
124{ 105{
125 struct ath10k *ar = htt->ar; 106 struct ath10k *ar = ctx;
107 struct ath10k_htt *htt = &ar->htt;
126 struct htt_tx_done tx_done = {0}; 108 struct htt_tx_done tx_done = {0};
127 int msdu_id;
128
129 spin_lock_bh(&htt->tx_lock);
130 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
131 if (!test_bit(msdu_id, htt->used_msdu_ids))
132 continue;
133 109
134 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
135 msdu_id);
136 111
137 tx_done.discard = 1; 112 tx_done.discard = 1;
138 tx_done.msdu_id = msdu_id; 113 tx_done.msdu_id = msdu_id;
139 114
140 ath10k_txrx_tx_unref(htt, &tx_done); 115 spin_lock_bh(&htt->tx_lock);
141 } 116 ath10k_txrx_tx_unref(htt, &tx_done);
142 spin_unlock_bh(&htt->tx_lock); 117 spin_unlock_bh(&htt->tx_lock);
118
119 return 0;
143} 120}
144 121
145void ath10k_htt_tx_free(struct ath10k_htt *htt) 122void ath10k_htt_tx_free(struct ath10k_htt *htt)
146{ 123{
147 ath10k_htt_tx_free_pending(htt); 124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
148 kfree(htt->pending_tx); 125 idr_destroy(&htt->pending_tx);
149 kfree(htt->used_msdu_ids);
150 dma_pool_destroy(htt->tx_pool); 126 dma_pool_destroy(htt->tx_pool);
151} 127}
152 128
@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
378 len += sizeof(cmd->mgmt_tx); 354 len += sizeof(cmd->mgmt_tx);
379 355
380 spin_lock_bh(&htt->tx_lock); 356 spin_lock_bh(&htt->tx_lock);
381 res = ath10k_htt_tx_alloc_msdu_id(htt); 357 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
382 if (res < 0) { 358 if (res < 0) {
383 spin_unlock_bh(&htt->tx_lock); 359 spin_unlock_bh(&htt->tx_lock);
384 goto err_tx_dec; 360 goto err_tx_dec;
385 } 361 }
386 msdu_id = res; 362 msdu_id = res;
387 htt->pending_tx[msdu_id] = msdu;
388 spin_unlock_bh(&htt->tx_lock); 363 spin_unlock_bh(&htt->tx_lock);
389 364
390 txdesc = ath10k_htc_alloc_skb(ar, len); 365 txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -423,7 +398,6 @@ err_free_txdesc:
423 dev_kfree_skb_any(txdesc); 398 dev_kfree_skb_any(txdesc);
424err_free_msdu_id: 399err_free_msdu_id:
425 spin_lock_bh(&htt->tx_lock); 400 spin_lock_bh(&htt->tx_lock);
426 htt->pending_tx[msdu_id] = NULL;
427 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 401 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
428 spin_unlock_bh(&htt->tx_lock); 402 spin_unlock_bh(&htt->tx_lock);
429err_tx_dec: 403err_tx_dec:
@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
455 goto err; 429 goto err;
456 430
457 spin_lock_bh(&htt->tx_lock); 431 spin_lock_bh(&htt->tx_lock);
458 res = ath10k_htt_tx_alloc_msdu_id(htt); 432 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
459 if (res < 0) { 433 if (res < 0) {
460 spin_unlock_bh(&htt->tx_lock); 434 spin_unlock_bh(&htt->tx_lock);
461 goto err_tx_dec; 435 goto err_tx_dec;
462 } 436 }
463 msdu_id = res; 437 msdu_id = res;
464 htt->pending_tx[msdu_id] = msdu;
465 spin_unlock_bh(&htt->tx_lock); 438 spin_unlock_bh(&htt->tx_lock);
466 439
467 prefetch_len = min(htt->prefetch_len, msdu->len); 440 prefetch_len = min(htt->prefetch_len, msdu->len);
@@ -595,7 +568,6 @@ err_free_txbuf:
595 skb_cb->htt.txbuf_paddr); 568 skb_cb->htt.txbuf_paddr);
596err_free_msdu_id: 569err_free_msdu_id:
597 spin_lock_bh(&htt->tx_lock); 570 spin_lock_bh(&htt->tx_lock);
598 htt->pending_tx[msdu_id] = NULL;
599 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 571 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
600 spin_unlock_bh(&htt->tx_lock); 572 spin_unlock_bh(&htt->tx_lock);
601err_tx_dec: 573err_tx_dec:
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 7579de8e7a8c..3f00cec8aef5 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
64 return; 64 return;
65 } 65 }
66 66
67 msdu = htt->pending_tx[tx_done->msdu_id]; 67 msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
68 if (!msdu) {
69 ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
70 tx_done->msdu_id);
71 return;
72 }
73
68 skb_cb = ATH10K_SKB_CB(msdu); 74 skb_cb = ATH10K_SKB_CB(msdu);
69 75
70 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 76 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
95 /* we do not own the msdu anymore */ 101 /* we do not own the msdu anymore */
96 102
97exit: 103exit:
98 htt->pending_tx[tx_done->msdu_id] = NULL;
99 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); 104 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
100 __ath10k_htt_tx_dec_pending(htt); 105 __ath10k_htt_tx_dec_pending(htt);
101 if (htt->num_pending_tx == 0) 106 if (htt->num_pending_tx == 0)