aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath
diff options
context:
space:
mode:
authorRajkumar Manoharan <rmanohar@qti.qualcomm.com>2015-10-23 08:31:05 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2015-10-29 06:58:19 -0400
commit765952e40dbadd4a74a44f55ac1344c338571bb9 (patch)
tree774e4b6a163927a9598276bf22f819fbbd085302 /drivers/net/wireless/ath
parent1e8f86d9cbe9431dcda36fdd85a9f342d639dca5 (diff)
ath10k: cleanup copy engine send completion
The physical address necessary to unmap DMA ('bufferp') is stored in ath10k_skb_cb as 'paddr'. ath10k doesn't rely on the meta/transfer_id when handling send completion (htc ep id is stored in sk_buff control buffer). So the unused output arguments {bufferp, nbytesp and transfer_idp} are removed from CE send completion. This change is needed before removing the shadow copy of copy engine (CE) descriptors in follow up patch. Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath')
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c28
3 files changed, 12 insertions, 50 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f63376b3e258..52021a9d28d7 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -578,17 +578,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
578 * The caller takes responsibility for any necessary locking. 578 * The caller takes responsibility for any necessary locking.
579 */ 579 */
580int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 580int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
581 void **per_transfer_contextp, 581 void **per_transfer_contextp)
582 u32 *bufferp,
583 unsigned int *nbytesp,
584 unsigned int *transfer_idp)
585{ 582{
586 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 583 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
587 u32 ctrl_addr = ce_state->ctrl_addr; 584 u32 ctrl_addr = ce_state->ctrl_addr;
588 struct ath10k *ar = ce_state->ar; 585 struct ath10k *ar = ce_state->ar;
589 unsigned int nentries_mask = src_ring->nentries_mask; 586 unsigned int nentries_mask = src_ring->nentries_mask;
590 unsigned int sw_index = src_ring->sw_index; 587 unsigned int sw_index = src_ring->sw_index;
591 struct ce_desc *sdesc, *sbase;
592 unsigned int read_index; 588 unsigned int read_index;
593 589
594 if (src_ring->hw_index == sw_index) { 590 if (src_ring->hw_index == sw_index) {
@@ -613,15 +609,6 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
613 if (read_index == sw_index) 609 if (read_index == sw_index)
614 return -EIO; 610 return -EIO;
615 611
616 sbase = src_ring->base_addr_owner_space;
617 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
618
619 /* Return data from completed source descriptor */
620 *bufferp = __le32_to_cpu(sdesc->addr);
621 *nbytesp = __le16_to_cpu(sdesc->nbytes);
622 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
623 CE_DESC_FLAGS_META_DATA);
624
625 if (per_transfer_contextp) 612 if (per_transfer_contextp)
626 *per_transfer_contextp = 613 *per_transfer_contextp =
627 src_ring->per_transfer_context[sw_index]; 614 src_ring->per_transfer_context[sw_index];
@@ -696,10 +683,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
696} 683}
697 684
698int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, 685int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
699 void **per_transfer_contextp, 686 void **per_transfer_contextp)
700 u32 *bufferp,
701 unsigned int *nbytesp,
702 unsigned int *transfer_idp)
703{ 687{
704 struct ath10k *ar = ce_state->ar; 688 struct ath10k *ar = ce_state->ar;
705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 689 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -707,9 +691,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
707 691
708 spin_lock_bh(&ar_pci->ce_lock); 692 spin_lock_bh(&ar_pci->ce_lock);
709 ret = ath10k_ce_completed_send_next_nolock(ce_state, 693 ret = ath10k_ce_completed_send_next_nolock(ce_state,
710 per_transfer_contextp, 694 per_transfer_contextp);
711 bufferp, nbytesp,
712 transfer_idp);
713 spin_unlock_bh(&ar_pci->ce_lock); 695 spin_unlock_bh(&ar_pci->ce_lock);
714 696
715 return ret; 697 return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index dbb94fdb274b..67f90ec5de53 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -192,16 +192,10 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
192 * Pops 1 completed send buffer from Source ring. 192 * Pops 1 completed send buffer from Source ring.
193 */ 193 */
194int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, 194int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
195 void **per_transfer_contextp, 195 void **per_transfer_contextp);
196 u32 *bufferp,
197 unsigned int *nbytesp,
198 unsigned int *transfer_idp);
199 196
200int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 197int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
201 void **per_transfer_contextp, 198 void **per_transfer_contextp);
202 u32 *bufferp,
203 unsigned int *nbytesp,
204 unsigned int *transfer_idp);
205 199
206/*==================CE Engine Initialization=======================*/ 200/*==================CE Engine Initialization=======================*/
207 201
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 6f3c3e0ad907..81000257bc1d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -910,9 +910,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
910 goto done; 910 goto done;
911 911
912 i = 0; 912 i = 0;
913 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, 913 while (ath10k_ce_completed_send_next_nolock(ce_diag,
914 &completed_nbytes, 914 NULL) != 0) {
915 &id) != 0) {
916 mdelay(1); 915 mdelay(1);
917 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 916 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
918 ret = -EBUSY; 917 ret = -EBUSY;
@@ -1073,9 +1072,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1073 goto done; 1072 goto done;
1074 1073
1075 i = 0; 1074 i = 0;
1076 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, 1075 while (ath10k_ce_completed_send_next_nolock(ce_diag,
1077 &completed_nbytes, 1076 NULL) != 0) {
1078 &id) != 0) {
1079 mdelay(1); 1077 mdelay(1);
1080 1078
1081 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1079 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1139,13 +1137,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1139 struct ath10k *ar = ce_state->ar; 1137 struct ath10k *ar = ce_state->ar;
1140 struct sk_buff_head list; 1138 struct sk_buff_head list;
1141 struct sk_buff *skb; 1139 struct sk_buff *skb;
1142 u32 ce_data;
1143 unsigned int nbytes;
1144 unsigned int transfer_id;
1145 1140
1146 __skb_queue_head_init(&list); 1141 __skb_queue_head_init(&list);
1147 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, 1142 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1148 &nbytes, &transfer_id) == 0) {
1149 /* no need to call tx completion for NULL pointers */ 1143 /* no need to call tx completion for NULL pointers */
1150 if (skb == NULL) 1144 if (skb == NULL)
1151 continue; 1145 continue;
@@ -1215,12 +1209,8 @@ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1215{ 1209{
1216 struct ath10k *ar = ce_state->ar; 1210 struct ath10k *ar = ce_state->ar;
1217 struct sk_buff *skb; 1211 struct sk_buff *skb;
1218 u32 ce_data;
1219 unsigned int nbytes;
1220 unsigned int transfer_id;
1221 1212
1222 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, 1213 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1223 &nbytes, &transfer_id) == 0) {
1224 /* no need to call tx completion for NULL pointers */ 1214 /* no need to call tx completion for NULL pointers */
1225 if (!skb) 1215 if (!skb)
1226 continue; 1216 continue;
@@ -1796,12 +1786,8 @@ err_dma:
1796static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 1786static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1797{ 1787{
1798 struct bmi_xfer *xfer; 1788 struct bmi_xfer *xfer;
1799 u32 ce_data;
1800 unsigned int nbytes;
1801 unsigned int transfer_id;
1802 1789
1803 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, 1790 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
1804 &nbytes, &transfer_id))
1805 return; 1791 return;
1806 1792
1807 xfer->tx_done = true; 1793 xfer->tx_done = true;