aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/htt_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htt_tx.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c99
1 files changed, 38 insertions, 61 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 4bc51d8a14a3..cbd2bc9e6202 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -56,21 +56,18 @@ exit:
56 return ret; 56 return ret;
57} 57}
58 58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) 59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
60{ 60{
61 struct ath10k *ar = htt->ar; 61 struct ath10k *ar = htt->ar;
62 int msdu_id; 62 int ret;
63 63
64 lockdep_assert_held(&htt->tx_lock); 64 lockdep_assert_held(&htt->tx_lock);
65 65
66 msdu_id = find_first_zero_bit(htt->used_msdu_ids, 66 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
67 htt->max_num_pending_tx); 67
68 if (msdu_id == htt->max_num_pending_tx) 68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
69 return -ENOBUFS;
70 69
71 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); 70 return ret;
72 __set_bit(msdu_id, htt->used_msdu_ids);
73 return msdu_id;
74} 71}
75 72
76void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 73void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
@@ -79,79 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
79 76
80 lockdep_assert_held(&htt->tx_lock); 77 lockdep_assert_held(&htt->tx_lock);
81 78
82 if (!test_bit(msdu_id, htt->used_msdu_ids))
83 ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
84 msdu_id);
85
86 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
87 __clear_bit(msdu_id, htt->used_msdu_ids); 80
81 idr_remove(&htt->pending_tx, msdu_id);
88} 82}
89 83
90int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 84int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
91{ 85{
92 struct ath10k *ar = htt->ar; 86 struct ath10k *ar = htt->ar;
93 87
94 spin_lock_init(&htt->tx_lock);
95
96 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
97 htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
98 else
99 htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
100
101 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
102 htt->max_num_pending_tx); 89 htt->max_num_pending_tx);
103 90
104 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 91 spin_lock_init(&htt->tx_lock);
105 htt->max_num_pending_tx, GFP_KERNEL); 92 idr_init(&htt->pending_tx);
106 if (!htt->pending_tx)
107 return -ENOMEM;
108
109 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
110 BITS_TO_LONGS(htt->max_num_pending_tx),
111 GFP_KERNEL);
112 if (!htt->used_msdu_ids) {
113 kfree(htt->pending_tx);
114 return -ENOMEM;
115 }
116 93
117 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, 94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
118 sizeof(struct ath10k_htt_txbuf), 4, 0); 95 sizeof(struct ath10k_htt_txbuf), 4, 0);
119 if (!htt->tx_pool) { 96 if (!htt->tx_pool) {
120 kfree(htt->used_msdu_ids); 97 idr_destroy(&htt->pending_tx);
121 kfree(htt->pending_tx);
122 return -ENOMEM; 98 return -ENOMEM;
123 } 99 }
124 100
125 return 0; 101 return 0;
126} 102}
127 103
128static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) 104static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
129{ 105{
130 struct ath10k *ar = htt->ar; 106 struct ath10k *ar = ctx;
107 struct ath10k_htt *htt = &ar->htt;
131 struct htt_tx_done tx_done = {0}; 108 struct htt_tx_done tx_done = {0};
132 int msdu_id;
133
134 spin_lock_bh(&htt->tx_lock);
135 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
136 if (!test_bit(msdu_id, htt->used_msdu_ids))
137 continue;
138 109
139 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
140 msdu_id);
141 111
142 tx_done.discard = 1; 112 tx_done.discard = 1;
143 tx_done.msdu_id = msdu_id; 113 tx_done.msdu_id = msdu_id;
144 114
145 ath10k_txrx_tx_unref(htt, &tx_done); 115 spin_lock_bh(&htt->tx_lock);
146 } 116 ath10k_txrx_tx_unref(htt, &tx_done);
147 spin_unlock_bh(&htt->tx_lock); 117 spin_unlock_bh(&htt->tx_lock);
118
119 return 0;
148} 120}
149 121
150void ath10k_htt_tx_free(struct ath10k_htt *htt) 122void ath10k_htt_tx_free(struct ath10k_htt *htt)
151{ 123{
152 ath10k_htt_tx_free_pending(htt); 124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
153 kfree(htt->pending_tx); 125 idr_destroy(&htt->pending_tx);
154 kfree(htt->used_msdu_ids);
155 dma_pool_destroy(htt->tx_pool); 126 dma_pool_destroy(htt->tx_pool);
156} 127}
157 128
@@ -383,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
383 len += sizeof(cmd->mgmt_tx); 354 len += sizeof(cmd->mgmt_tx);
384 355
385 spin_lock_bh(&htt->tx_lock); 356 spin_lock_bh(&htt->tx_lock);
386 res = ath10k_htt_tx_alloc_msdu_id(htt); 357 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
387 if (res < 0) { 358 if (res < 0) {
388 spin_unlock_bh(&htt->tx_lock); 359 spin_unlock_bh(&htt->tx_lock);
389 goto err_tx_dec; 360 goto err_tx_dec;
390 } 361 }
391 msdu_id = res; 362 msdu_id = res;
392 htt->pending_tx[msdu_id] = msdu;
393 spin_unlock_bh(&htt->tx_lock); 363 spin_unlock_bh(&htt->tx_lock);
394 364
395 txdesc = ath10k_htc_alloc_skb(ar, len); 365 txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -428,7 +398,6 @@ err_free_txdesc:
428 dev_kfree_skb_any(txdesc); 398 dev_kfree_skb_any(txdesc);
429err_free_msdu_id: 399err_free_msdu_id:
430 spin_lock_bh(&htt->tx_lock); 400 spin_lock_bh(&htt->tx_lock);
431 htt->pending_tx[msdu_id] = NULL;
432 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 401 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
433 spin_unlock_bh(&htt->tx_lock); 402 spin_unlock_bh(&htt->tx_lock);
434err_tx_dec: 403err_tx_dec:
@@ -460,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
460 goto err; 429 goto err;
461 430
462 spin_lock_bh(&htt->tx_lock); 431 spin_lock_bh(&htt->tx_lock);
463 res = ath10k_htt_tx_alloc_msdu_id(htt); 432 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
464 if (res < 0) { 433 if (res < 0) {
465 spin_unlock_bh(&htt->tx_lock); 434 spin_unlock_bh(&htt->tx_lock);
466 goto err_tx_dec; 435 goto err_tx_dec;
467 } 436 }
468 msdu_id = res; 437 msdu_id = res;
469 htt->pending_tx[msdu_id] = msdu;
470 spin_unlock_bh(&htt->tx_lock); 438 spin_unlock_bh(&htt->tx_lock);
471 439
472 prefetch_len = min(htt->prefetch_len, msdu->len); 440 prefetch_len = min(htt->prefetch_len, msdu->len);
@@ -480,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
480 448
481 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, 449 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
482 &paddr); 450 &paddr);
483 if (!skb_cb->htt.txbuf) 451 if (!skb_cb->htt.txbuf) {
452 res = -ENOMEM;
484 goto err_free_msdu_id; 453 goto err_free_msdu_id;
454 }
485 skb_cb->htt.txbuf_paddr = paddr; 455 skb_cb->htt.txbuf_paddr = paddr;
486 456
457 if ((ieee80211_is_action(hdr->frame_control) ||
458 ieee80211_is_deauth(hdr->frame_control) ||
459 ieee80211_is_disassoc(hdr->frame_control)) &&
460 ieee80211_has_protected(hdr->frame_control))
461 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
462
487 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 463 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
488 DMA_TO_DEVICE); 464 DMA_TO_DEVICE);
489 res = dma_mapping_error(dev, skb_cb->paddr); 465 res = dma_mapping_error(dev, skb_cb->paddr);
@@ -539,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
539 515
540 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 516 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
541 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 517 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
542 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 518 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
543 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 519 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
520 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
521 }
544 522
545 /* Prevent firmware from sending up tx inspection requests. There's 523 /* Prevent firmware from sending up tx inspection requests. There's
546 * nothing ath10k can do with frames requested for inspection so force 524 * nothing ath10k can do with frames requested for inspection so force
@@ -598,7 +576,6 @@ err_free_txbuf:
598 skb_cb->htt.txbuf_paddr); 576 skb_cb->htt.txbuf_paddr);
599err_free_msdu_id: 577err_free_msdu_id:
600 spin_lock_bh(&htt->tx_lock); 578 spin_lock_bh(&htt->tx_lock);
601 htt->pending_tx[msdu_id] = NULL;
602 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 579 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
603 spin_unlock_bh(&htt->tx_lock); 580 spin_unlock_bh(&htt->tx_lock);
604err_tx_dec: 581err_tx_dec: