aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/htt_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htt_tx.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c94
1 files changed, 38 insertions, 56 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a1bda41fb543..cbd2bc9e6202 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -56,21 +56,18 @@ exit:
56 return ret; 56 return ret;
57} 57}
58 58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) 59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
60{ 60{
61 struct ath10k *ar = htt->ar; 61 struct ath10k *ar = htt->ar;
62 int msdu_id; 62 int ret;
63 63
64 lockdep_assert_held(&htt->tx_lock); 64 lockdep_assert_held(&htt->tx_lock);
65 65
66 msdu_id = find_first_zero_bit(htt->used_msdu_ids, 66 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
67 htt->max_num_pending_tx); 67
68 if (msdu_id == htt->max_num_pending_tx) 68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
69 return -ENOBUFS;
70 69
71 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); 70 return ret;
72 __set_bit(msdu_id, htt->used_msdu_ids);
73 return msdu_id;
74} 71}
75 72
76void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 73void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
79 76
80 lockdep_assert_held(&htt->tx_lock); 77 lockdep_assert_held(&htt->tx_lock);
81 78
82 if (!test_bit(msdu_id, htt->used_msdu_ids))
83 ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
84 msdu_id);
85
86 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
87 __clear_bit(msdu_id, htt->used_msdu_ids); 80
81 idr_remove(&htt->pending_tx, msdu_id);
88} 82}
89 83
90int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 84int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
91{ 85{
92 struct ath10k *ar = htt->ar; 86 struct ath10k *ar = htt->ar;
93 87
94 spin_lock_init(&htt->tx_lock);
95
96 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
97 htt->max_num_pending_tx); 89 htt->max_num_pending_tx);
98 90
99 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 91 spin_lock_init(&htt->tx_lock);
100 htt->max_num_pending_tx, GFP_KERNEL); 92 idr_init(&htt->pending_tx);
101 if (!htt->pending_tx)
102 return -ENOMEM;
103
104 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
105 BITS_TO_LONGS(htt->max_num_pending_tx),
106 GFP_KERNEL);
107 if (!htt->used_msdu_ids) {
108 kfree(htt->pending_tx);
109 return -ENOMEM;
110 }
111 93
112 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, 94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
113 sizeof(struct ath10k_htt_txbuf), 4, 0); 95 sizeof(struct ath10k_htt_txbuf), 4, 0);
114 if (!htt->tx_pool) { 96 if (!htt->tx_pool) {
115 kfree(htt->used_msdu_ids); 97 idr_destroy(&htt->pending_tx);
116 kfree(htt->pending_tx);
117 return -ENOMEM; 98 return -ENOMEM;
118 } 99 }
119 100
120 return 0; 101 return 0;
121} 102}
122 103
123static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) 104static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
124{ 105{
125 struct ath10k *ar = htt->ar; 106 struct ath10k *ar = ctx;
107 struct ath10k_htt *htt = &ar->htt;
126 struct htt_tx_done tx_done = {0}; 108 struct htt_tx_done tx_done = {0};
127 int msdu_id;
128
129 spin_lock_bh(&htt->tx_lock);
130 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
131 if (!test_bit(msdu_id, htt->used_msdu_ids))
132 continue;
133 109
134 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
135 msdu_id);
136 111
137 tx_done.discard = 1; 112 tx_done.discard = 1;
138 tx_done.msdu_id = msdu_id; 113 tx_done.msdu_id = msdu_id;
139 114
140 ath10k_txrx_tx_unref(htt, &tx_done); 115 spin_lock_bh(&htt->tx_lock);
141 } 116 ath10k_txrx_tx_unref(htt, &tx_done);
142 spin_unlock_bh(&htt->tx_lock); 117 spin_unlock_bh(&htt->tx_lock);
118
119 return 0;
143} 120}
144 121
145void ath10k_htt_tx_free(struct ath10k_htt *htt) 122void ath10k_htt_tx_free(struct ath10k_htt *htt)
146{ 123{
147 ath10k_htt_tx_free_pending(htt); 124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
148 kfree(htt->pending_tx); 125 idr_destroy(&htt->pending_tx);
149 kfree(htt->used_msdu_ids);
150 dma_pool_destroy(htt->tx_pool); 126 dma_pool_destroy(htt->tx_pool);
151} 127}
152 128
@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
378 len += sizeof(cmd->mgmt_tx); 354 len += sizeof(cmd->mgmt_tx);
379 355
380 spin_lock_bh(&htt->tx_lock); 356 spin_lock_bh(&htt->tx_lock);
381 res = ath10k_htt_tx_alloc_msdu_id(htt); 357 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
382 if (res < 0) { 358 if (res < 0) {
383 spin_unlock_bh(&htt->tx_lock); 359 spin_unlock_bh(&htt->tx_lock);
384 goto err_tx_dec; 360 goto err_tx_dec;
385 } 361 }
386 msdu_id = res; 362 msdu_id = res;
387 htt->pending_tx[msdu_id] = msdu;
388 spin_unlock_bh(&htt->tx_lock); 363 spin_unlock_bh(&htt->tx_lock);
389 364
390 txdesc = ath10k_htc_alloc_skb(ar, len); 365 txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -423,7 +398,6 @@ err_free_txdesc:
423 dev_kfree_skb_any(txdesc); 398 dev_kfree_skb_any(txdesc);
424err_free_msdu_id: 399err_free_msdu_id:
425 spin_lock_bh(&htt->tx_lock); 400 spin_lock_bh(&htt->tx_lock);
426 htt->pending_tx[msdu_id] = NULL;
427 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 401 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
428 spin_unlock_bh(&htt->tx_lock); 402 spin_unlock_bh(&htt->tx_lock);
429err_tx_dec: 403err_tx_dec:
@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
455 goto err; 429 goto err;
456 430
457 spin_lock_bh(&htt->tx_lock); 431 spin_lock_bh(&htt->tx_lock);
458 res = ath10k_htt_tx_alloc_msdu_id(htt); 432 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
459 if (res < 0) { 433 if (res < 0) {
460 spin_unlock_bh(&htt->tx_lock); 434 spin_unlock_bh(&htt->tx_lock);
461 goto err_tx_dec; 435 goto err_tx_dec;
462 } 436 }
463 msdu_id = res; 437 msdu_id = res;
464 htt->pending_tx[msdu_id] = msdu;
465 spin_unlock_bh(&htt->tx_lock); 438 spin_unlock_bh(&htt->tx_lock);
466 439
467 prefetch_len = min(htt->prefetch_len, msdu->len); 440 prefetch_len = min(htt->prefetch_len, msdu->len);
@@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
475 448
476 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, 449 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
477 &paddr); 450 &paddr);
478 if (!skb_cb->htt.txbuf) 451 if (!skb_cb->htt.txbuf) {
452 res = -ENOMEM;
479 goto err_free_msdu_id; 453 goto err_free_msdu_id;
454 }
480 skb_cb->htt.txbuf_paddr = paddr; 455 skb_cb->htt.txbuf_paddr = paddr;
481 456
457 if ((ieee80211_is_action(hdr->frame_control) ||
458 ieee80211_is_deauth(hdr->frame_control) ||
459 ieee80211_is_disassoc(hdr->frame_control)) &&
460 ieee80211_has_protected(hdr->frame_control))
461 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
462
482 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 463 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
483 DMA_TO_DEVICE); 464 DMA_TO_DEVICE);
484 res = dma_mapping_error(dev, skb_cb->paddr); 465 res = dma_mapping_error(dev, skb_cb->paddr);
@@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
534 515
535 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 516 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
536 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 517 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
537 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 518 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
538 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 519 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
520 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
521 }
539 522
540 /* Prevent firmware from sending up tx inspection requests. There's 523 /* Prevent firmware from sending up tx inspection requests. There's
541 * nothing ath10k can do with frames requested for inspection so force 524 * nothing ath10k can do with frames requested for inspection so force
@@ -593,7 +576,6 @@ err_free_txbuf:
593 skb_cb->htt.txbuf_paddr); 576 skb_cb->htt.txbuf_paddr);
594err_free_msdu_id: 577err_free_msdu_id:
595 spin_lock_bh(&htt->tx_lock); 578 spin_lock_bh(&htt->tx_lock);
596 htt->pending_tx[msdu_id] = NULL;
597 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 579 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
598 spin_unlock_bh(&htt->tx_lock); 580 spin_unlock_bh(&htt->tx_lock);
599err_tx_dec: 581err_tx_dec: