aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/pci.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c536
1 files changed, 215 insertions, 321 deletions
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 29fd197d1fd8..9d242d801d9d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,13 +58,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, 58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data); 59 u32 *data);
60 60
61static void ath10k_pci_process_ce(struct ath10k *ar);
62static int ath10k_pci_post_rx(struct ath10k *ar); 61static int ath10k_pci_post_rx(struct ath10k *ar);
63static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, 62static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
64 int num); 63 int num);
65static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); 64static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
66static void ath10k_pci_stop_ce(struct ath10k *ar); 65static int ath10k_pci_cold_reset(struct ath10k *ar);
67static int ath10k_pci_device_reset(struct ath10k *ar); 66static int ath10k_pci_warm_reset(struct ath10k *ar);
68static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 67static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
69static int ath10k_pci_init_irq(struct ath10k *ar); 68static int ath10k_pci_init_irq(struct ath10k *ar);
70static int ath10k_pci_deinit_irq(struct ath10k *ar); 69static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -73,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
73static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 72static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
74 struct ath10k_ce_pipe *rx_pipe, 73 struct ath10k_ce_pipe *rx_pipe,
75 struct bmi_xfer *xfer); 74 struct bmi_xfer *xfer);
76static void ath10k_pci_cleanup_ce(struct ath10k *ar);
77 75
78static const struct ce_attr host_ce_config_wlan[] = { 76static const struct ce_attr host_ce_config_wlan[] = {
79 /* CE0: host->target HTC control and raw streams */ 77 /* CE0: host->target HTC control and raw streams */
@@ -678,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
678 } 676 }
679} 677}
680 678
681/*
682 * FIXME: Handle OOM properly.
683 */
684static inline
685struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
686{
687 struct ath10k_pci_compl *compl = NULL;
688
689 spin_lock_bh(&pipe_info->pipe_lock);
690 if (list_empty(&pipe_info->compl_free)) {
691 ath10k_warn("Completion buffers are full\n");
692 goto exit;
693 }
694 compl = list_first_entry(&pipe_info->compl_free,
695 struct ath10k_pci_compl, list);
696 list_del(&compl->list);
697exit:
698 spin_unlock_bh(&pipe_info->pipe_lock);
699 return compl;
700}
701
702/* Called by lower (CE) layer when a send to Target completes. */ 679/* Called by lower (CE) layer when a send to Target completes. */
703static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) 680static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
704{ 681{
705 struct ath10k *ar = ce_state->ar; 682 struct ath10k *ar = ce_state->ar;
706 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 683 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
707 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 684 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
708 struct ath10k_pci_compl *compl;
709 void *transfer_context; 685 void *transfer_context;
710 u32 ce_data; 686 u32 ce_data;
711 unsigned int nbytes; 687 unsigned int nbytes;
@@ -714,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
714 while (ath10k_ce_completed_send_next(ce_state, &transfer_context, 690 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
715 &ce_data, &nbytes, 691 &ce_data, &nbytes,
716 &transfer_id) == 0) { 692 &transfer_id) == 0) {
717 compl = get_free_compl(pipe_info); 693 /* no need to call tx completion for NULL pointers */
718 if (!compl) 694 if (transfer_context == NULL)
719 break; 695 continue;
720
721 compl->state = ATH10K_PCI_COMPL_SEND;
722 compl->ce_state = ce_state;
723 compl->pipe_info = pipe_info;
724 compl->skb = transfer_context;
725 compl->nbytes = nbytes;
726 compl->transfer_id = transfer_id;
727 compl->flags = 0;
728 696
729 /* 697 cb->tx_completion(ar, transfer_context, transfer_id);
730 * Add the completion to the processing queue.
731 */
732 spin_lock_bh(&ar_pci->compl_lock);
733 list_add_tail(&compl->list, &ar_pci->compl_process);
734 spin_unlock_bh(&ar_pci->compl_lock);
735 } 698 }
736
737 ath10k_pci_process_ce(ar);
738} 699}
739 700
740/* Called by lower (CE) layer when data is received from the Target. */ 701/* Called by lower (CE) layer when data is received from the Target. */
@@ -743,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
743 struct ath10k *ar = ce_state->ar; 704 struct ath10k *ar = ce_state->ar;
744 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
745 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 706 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
746 struct ath10k_pci_compl *compl; 707 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
747 struct sk_buff *skb; 708 struct sk_buff *skb;
748 void *transfer_context; 709 void *transfer_context;
749 u32 ce_data; 710 u32 ce_data;
750 unsigned int nbytes; 711 unsigned int nbytes, max_nbytes;
751 unsigned int transfer_id; 712 unsigned int transfer_id;
752 unsigned int flags; 713 unsigned int flags;
714 int err;
753 715
754 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 716 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
755 &ce_data, &nbytes, &transfer_id, 717 &ce_data, &nbytes, &transfer_id,
756 &flags) == 0) { 718 &flags) == 0) {
757 compl = get_free_compl(pipe_info); 719 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
758 if (!compl) 720 if (unlikely(err)) {
759 break; 721 /* FIXME: retry */
760 722 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
761 compl->state = ATH10K_PCI_COMPL_RECV; 723 pipe_info->pipe_num, err);
762 compl->ce_state = ce_state; 724 }
763 compl->pipe_info = pipe_info;
764 compl->skb = transfer_context;
765 compl->nbytes = nbytes;
766 compl->transfer_id = transfer_id;
767 compl->flags = flags;
768 725
769 skb = transfer_context; 726 skb = transfer_context;
727 max_nbytes = skb->len + skb_tailroom(skb);
770 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 728 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
771 skb->len + skb_tailroom(skb), 729 max_nbytes, DMA_FROM_DEVICE);
772 DMA_FROM_DEVICE); 730
773 /* 731 if (unlikely(max_nbytes < nbytes)) {
774 * Add the completion to the processing queue. 732 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
775 */ 733 nbytes, max_nbytes);
776 spin_lock_bh(&ar_pci->compl_lock); 734 dev_kfree_skb_any(skb);
777 list_add_tail(&compl->list, &ar_pci->compl_process); 735 continue;
778 spin_unlock_bh(&ar_pci->compl_lock); 736 }
779 }
780 737
781 ath10k_pci_process_ce(ar); 738 skb_put(skb, nbytes);
739 cb->rx_completion(ar, skb, pipe_info->pipe_num);
740 }
782} 741}
783 742
784/* Send the first nbytes bytes of the buffer */ 743static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
785static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, 744 struct ath10k_hif_sg_item *items, int n_items)
786 unsigned int transfer_id,
787 unsigned int bytes, struct sk_buff *nbuf)
788{ 745{
789 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
790 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 746 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
791 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]); 747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
792 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl; 748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
793 unsigned int len; 749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
794 u32 flags = 0; 750 unsigned int nentries_mask = src_ring->nentries_mask;
795 int ret; 751 unsigned int sw_index = src_ring->sw_index;
752 unsigned int write_index = src_ring->write_index;
753 int err, i;
796 754
797 len = min(bytes, nbuf->len); 755 spin_lock_bh(&ar_pci->ce_lock);
798 bytes -= len;
799 756
800 if (len & 3) 757 if (unlikely(CE_RING_DELTA(nentries_mask,
801 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); 758 write_index, sw_index - 1) < n_items)) {
759 err = -ENOBUFS;
760 goto unlock;
761 }
802 762
803 ath10k_dbg(ATH10K_DBG_PCI, 763 for (i = 0; i < n_items - 1; i++) {
804 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", 764 ath10k_dbg(ATH10K_DBG_PCI,
805 nbuf->data, (unsigned long long) skb_cb->paddr, 765 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
806 nbuf->len, len); 766 i, items[i].paddr, items[i].len, n_items);
807 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, 767 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
808 "ath10k tx: data: ", 768 items[i].vaddr, items[i].len);
809 nbuf->data, nbuf->len);
810
811 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
812 flags);
813 if (ret)
814 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
815 769
816 return ret; 770 err = ath10k_ce_send_nolock(ce_pipe,
771 items[i].transfer_context,
772 items[i].paddr,
773 items[i].len,
774 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER);
776 if (err)
777 goto unlock;
778 }
779
780 /* `i` is equal to `n_items -1` after for() */
781
782 ath10k_dbg(ATH10K_DBG_PCI,
783 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 i, items[i].paddr, items[i].len, n_items);
785 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 items[i].vaddr, items[i].len);
787
788 err = ath10k_ce_send_nolock(ce_pipe,
789 items[i].transfer_context,
790 items[i].paddr,
791 items[i].len,
792 items[i].transfer_id,
793 0);
794 if (err)
795 goto unlock;
796
797 err = 0;
798unlock:
799 spin_unlock_bh(&ar_pci->ce_lock);
800 return err;
817} 801}
818 802
819static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 803static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@@ -833,9 +817,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
833 ath10k_err("firmware crashed!\n"); 817 ath10k_err("firmware crashed!\n");
834 ath10k_err("hardware name %s version 0x%x\n", 818 ath10k_err("hardware name %s version 0x%x\n",
835 ar->hw_params.name, ar->target_version); 819 ar->hw_params.name, ar->target_version);
836 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, 820 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
837 ar->fw_version_minor, ar->fw_version_release,
838 ar->fw_version_build);
839 821
840 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); 822 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
841 ret = ath10k_pci_diag_read_mem(ar, host_addr, 823 ret = ath10k_pci_diag_read_mem(ar, host_addr,
@@ -904,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
904 sizeof(ar_pci->msg_callbacks_current)); 886 sizeof(ar_pci->msg_callbacks_current));
905} 887}
906 888
907static int ath10k_pci_alloc_compl(struct ath10k *ar)
908{
909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910 const struct ce_attr *attr;
911 struct ath10k_pci_pipe *pipe_info;
912 struct ath10k_pci_compl *compl;
913 int i, pipe_num, completions;
914
915 spin_lock_init(&ar_pci->compl_lock);
916 INIT_LIST_HEAD(&ar_pci->compl_process);
917
918 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
919 pipe_info = &ar_pci->pipe_info[pipe_num];
920
921 spin_lock_init(&pipe_info->pipe_lock);
922 INIT_LIST_HEAD(&pipe_info->compl_free);
923
924 /* Handle Diagnostic CE specially */
925 if (pipe_info->ce_hdl == ar_pci->ce_diag)
926 continue;
927
928 attr = &host_ce_config_wlan[pipe_num];
929 completions = 0;
930
931 if (attr->src_nentries)
932 completions += attr->src_nentries;
933
934 if (attr->dest_nentries)
935 completions += attr->dest_nentries;
936
937 for (i = 0; i < completions; i++) {
938 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
939 if (!compl) {
940 ath10k_warn("No memory for completion state\n");
941 ath10k_pci_cleanup_ce(ar);
942 return -ENOMEM;
943 }
944
945 compl->state = ATH10K_PCI_COMPL_FREE;
946 list_add_tail(&compl->list, &pipe_info->compl_free);
947 }
948 }
949
950 return 0;
951}
952
953static int ath10k_pci_setup_ce_irq(struct ath10k *ar) 889static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
954{ 890{
955 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -994,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
994 tasklet_kill(&ar_pci->pipe_info[i].intr); 930 tasklet_kill(&ar_pci->pipe_info[i].intr);
995} 931}
996 932
997static void ath10k_pci_stop_ce(struct ath10k *ar)
998{
999 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1000 struct ath10k_pci_compl *compl;
1001 struct sk_buff *skb;
1002
1003 /* Mark pending completions as aborted, so that upper layers free up
1004 * their associated resources */
1005 spin_lock_bh(&ar_pci->compl_lock);
1006 list_for_each_entry(compl, &ar_pci->compl_process, list) {
1007 skb = compl->skb;
1008 ATH10K_SKB_CB(skb)->is_aborted = true;
1009 }
1010 spin_unlock_bh(&ar_pci->compl_lock);
1011}
1012
1013static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1014{
1015 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1016 struct ath10k_pci_compl *compl, *tmp;
1017 struct ath10k_pci_pipe *pipe_info;
1018 struct sk_buff *netbuf;
1019 int pipe_num;
1020
1021 /* Free pending completions. */
1022 spin_lock_bh(&ar_pci->compl_lock);
1023 if (!list_empty(&ar_pci->compl_process))
1024 ath10k_warn("pending completions still present! possible memory leaks.\n");
1025
1026 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1027 list_del(&compl->list);
1028 netbuf = compl->skb;
1029 dev_kfree_skb_any(netbuf);
1030 kfree(compl);
1031 }
1032 spin_unlock_bh(&ar_pci->compl_lock);
1033
1034 /* Free unused completions for each pipe. */
1035 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1036 pipe_info = &ar_pci->pipe_info[pipe_num];
1037
1038 spin_lock_bh(&pipe_info->pipe_lock);
1039 list_for_each_entry_safe(compl, tmp,
1040 &pipe_info->compl_free, list) {
1041 list_del(&compl->list);
1042 kfree(compl);
1043 }
1044 spin_unlock_bh(&pipe_info->pipe_lock);
1045 }
1046}
1047
1048static void ath10k_pci_process_ce(struct ath10k *ar)
1049{
1050 struct ath10k_pci *ar_pci = ar->hif.priv;
1051 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1052 struct ath10k_pci_compl *compl;
1053 struct sk_buff *skb;
1054 unsigned int nbytes;
1055 int ret, send_done = 0;
1056
1057 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1058 * we must serialize all completion processing. */
1059
1060 spin_lock_bh(&ar_pci->compl_lock);
1061 if (ar_pci->compl_processing) {
1062 spin_unlock_bh(&ar_pci->compl_lock);
1063 return;
1064 }
1065 ar_pci->compl_processing = true;
1066 spin_unlock_bh(&ar_pci->compl_lock);
1067
1068 for (;;) {
1069 spin_lock_bh(&ar_pci->compl_lock);
1070 if (list_empty(&ar_pci->compl_process)) {
1071 spin_unlock_bh(&ar_pci->compl_lock);
1072 break;
1073 }
1074 compl = list_first_entry(&ar_pci->compl_process,
1075 struct ath10k_pci_compl, list);
1076 list_del(&compl->list);
1077 spin_unlock_bh(&ar_pci->compl_lock);
1078
1079 switch (compl->state) {
1080 case ATH10K_PCI_COMPL_SEND:
1081 cb->tx_completion(ar,
1082 compl->skb,
1083 compl->transfer_id);
1084 send_done = 1;
1085 break;
1086 case ATH10K_PCI_COMPL_RECV:
1087 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1088 if (ret) {
1089 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1090 compl->pipe_info->pipe_num, ret);
1091 break;
1092 }
1093
1094 skb = compl->skb;
1095 nbytes = compl->nbytes;
1096
1097 ath10k_dbg(ATH10K_DBG_PCI,
1098 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1099 skb, nbytes);
1100 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1101 "ath10k rx: ", skb->data, nbytes);
1102
1103 if (skb->len + skb_tailroom(skb) >= nbytes) {
1104 skb_trim(skb, 0);
1105 skb_put(skb, nbytes);
1106 cb->rx_completion(ar, skb,
1107 compl->pipe_info->pipe_num);
1108 } else {
1109 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1110 nbytes,
1111 skb->len + skb_tailroom(skb));
1112 }
1113 break;
1114 case ATH10K_PCI_COMPL_FREE:
1115 ath10k_warn("free completion cannot be processed\n");
1116 break;
1117 default:
1118 ath10k_warn("invalid completion state (%d)\n",
1119 compl->state);
1120 break;
1121 }
1122
1123 compl->state = ATH10K_PCI_COMPL_FREE;
1124
1125 /*
1126 * Add completion back to the pipe's free list.
1127 */
1128 spin_lock_bh(&compl->pipe_info->pipe_lock);
1129 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1130 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1131 }
1132
1133 spin_lock_bh(&ar_pci->compl_lock);
1134 ar_pci->compl_processing = false;
1135 spin_unlock_bh(&ar_pci->compl_lock);
1136}
1137
1138/* TODO - temporary mapping while we have too few CE's */ 933/* TODO - temporary mapping while we have too few CE's */
1139static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, 934static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1140 u16 service_id, u8 *ul_pipe, 935 u16 service_id, u8 *ul_pipe,
@@ -1306,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1306 ath10k_pci_free_early_irq(ar); 1101 ath10k_pci_free_early_irq(ar);
1307 ath10k_pci_kill_tasklet(ar); 1102 ath10k_pci_kill_tasklet(ar);
1308 1103
1309 ret = ath10k_pci_alloc_compl(ar);
1310 if (ret) {
1311 ath10k_warn("failed to allocate CE completions: %d\n", ret);
1312 goto err_early_irq;
1313 }
1314
1315 ret = ath10k_pci_request_irq(ar); 1104 ret = ath10k_pci_request_irq(ar);
1316 if (ret) { 1105 if (ret) {
1317 ath10k_warn("failed to post RX buffers for all pipes: %d\n", 1106 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1318 ret); 1107 ret);
1319 goto err_free_compl; 1108 goto err_early_irq;
1320 } 1109 }
1321 1110
1322 ret = ath10k_pci_setup_ce_irq(ar); 1111 ret = ath10k_pci_setup_ce_irq(ar);
@@ -1340,10 +1129,6 @@ err_stop:
1340 ath10k_ce_disable_interrupts(ar); 1129 ath10k_ce_disable_interrupts(ar);
1341 ath10k_pci_free_irq(ar); 1130 ath10k_pci_free_irq(ar);
1342 ath10k_pci_kill_tasklet(ar); 1131 ath10k_pci_kill_tasklet(ar);
1343 ath10k_pci_stop_ce(ar);
1344 ath10k_pci_process_ce(ar);
1345err_free_compl:
1346 ath10k_pci_cleanup_ce(ar);
1347err_early_irq: 1132err_early_irq:
1348 /* Though there should be no interrupts (device was reset) 1133 /* Though there should be no interrupts (device was reset)
1349 * power_down() expects the early IRQ to be installed as per the 1134 * power_down() expects the early IRQ to be installed as per the
@@ -1414,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1414 1199
1415 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, 1200 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1416 &ce_data, &nbytes, &id) == 0) { 1201 &ce_data, &nbytes, &id) == 0) {
1417 /* 1202 /* no need to call tx completion for NULL pointers */
1418 * Indicate the completion to higer layer to free 1203 if (!netbuf)
1419 * the buffer
1420 */
1421
1422 if (!netbuf) {
1423 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1424 ce_hdl->id);
1425 continue; 1204 continue;
1426 }
1427 1205
1428 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1429 ar_pci->msg_callbacks_current.tx_completion(ar, 1206 ar_pci->msg_callbacks_current.tx_completion(ar,
1430 netbuf, 1207 netbuf,
1431 id); 1208 id);
@@ -1483,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1483 1260
1484 ath10k_pci_free_irq(ar); 1261 ath10k_pci_free_irq(ar);
1485 ath10k_pci_kill_tasklet(ar); 1262 ath10k_pci_kill_tasklet(ar);
1486 ath10k_pci_stop_ce(ar);
1487 1263
1488 ret = ath10k_pci_request_early_irq(ar); 1264 ret = ath10k_pci_request_early_irq(ar);
1489 if (ret) 1265 if (ret)
@@ -1493,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1493 * not DMA nor interrupt. We process the leftovers and then free 1269 * not DMA nor interrupt. We process the leftovers and then free
1494 * everything else up. */ 1270 * everything else up. */
1495 1271
1496 ath10k_pci_process_ce(ar);
1497 ath10k_pci_cleanup_ce(ar);
1498 ath10k_pci_buffer_cleanup(ar); 1272 ath10k_pci_buffer_cleanup(ar);
1499 1273
1500 /* Make the sure the device won't access any structures on the host by 1274 /* Make the sure the device won't access any structures on the host by
@@ -1502,7 +1276,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1502 * configuration during init. If ringbuffers are freed and the device 1276 * configuration during init. If ringbuffers are freed and the device
1503 * were to access them this could lead to memory corruption on the 1277 * were to access them this could lead to memory corruption on the
1504 * host. */ 1278 * host. */
1505 ath10k_pci_device_reset(ar); 1279 ath10k_pci_warm_reset(ar);
1506 1280
1507 ar_pci->started = 0; 1281 ar_pci->started = 0;
1508} 1282}
@@ -1993,7 +1767,94 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1993 ath10k_pci_sleep(ar); 1767 ath10k_pci_sleep(ar);
1994} 1768}
1995 1769
1996static int ath10k_pci_hif_power_up(struct ath10k *ar) 1770static int ath10k_pci_warm_reset(struct ath10k *ar)
1771{
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 int ret = 0;
1774 u32 val;
1775
1776 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1777
1778 ret = ath10k_do_pci_wake(ar);
1779 if (ret) {
1780 ath10k_err("failed to wake up target: %d\n", ret);
1781 return ret;
1782 }
1783
1784 /* debug */
1785 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1786 PCIE_INTR_CAUSE_ADDRESS);
1787 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1788
1789 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1790 CPU_INTR_ADDRESS);
1791 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1792 val);
1793
1794 /* disable pending irqs */
1795 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1796 PCIE_INTR_ENABLE_ADDRESS, 0);
1797
1798 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1799 PCIE_INTR_CLR_ADDRESS, ~0);
1800
1801 msleep(100);
1802
1803 /* clear fw indicator */
1804 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
1805
1806 /* clear target LF timer interrupts */
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808 SOC_LF_TIMER_CONTROL0_ADDRESS);
1809 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1810 SOC_LF_TIMER_CONTROL0_ADDRESS,
1811 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1812
1813 /* reset CE */
1814 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1815 SOC_RESET_CONTROL_ADDRESS);
1816 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1817 val | SOC_RESET_CONTROL_CE_RST_MASK);
1818 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1819 SOC_RESET_CONTROL_ADDRESS);
1820 msleep(10);
1821
1822 /* unreset CE */
1823 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1824 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1825 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1826 SOC_RESET_CONTROL_ADDRESS);
1827 msleep(10);
1828
1829 /* debug */
1830 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 PCIE_INTR_CAUSE_ADDRESS);
1832 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1833
1834 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1835 CPU_INTR_ADDRESS);
1836 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1837 val);
1838
1839 /* CPU warm reset */
1840 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1841 SOC_RESET_CONTROL_ADDRESS);
1842 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1843 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1844
1845 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1846 SOC_RESET_CONTROL_ADDRESS);
1847 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1848
1849 msleep(100);
1850
1851 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1852
1853 ath10k_do_pci_sleep(ar);
1854 return ret;
1855}
1856
1857static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1997{ 1858{
1998 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1859 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1999 const char *irq_mode; 1860 const char *irq_mode;
@@ -2009,7 +1870,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
2009 * is in an unexpected state. We try to catch that here in order to 1870 * is in an unexpected state. We try to catch that here in order to
2010 * reset the Target and retry the probe. 1871 * reset the Target and retry the probe.
2011 */ 1872 */
2012 ret = ath10k_pci_device_reset(ar); 1873 if (cold_reset)
1874 ret = ath10k_pci_cold_reset(ar);
1875 else
1876 ret = ath10k_pci_warm_reset(ar);
1877
2013 if (ret) { 1878 if (ret) {
2014 ath10k_err("failed to reset target: %d\n", ret); 1879 ath10k_err("failed to reset target: %d\n", ret);
2015 goto err; 1880 goto err;
@@ -2079,7 +1944,7 @@ err_deinit_irq:
2079 ath10k_pci_deinit_irq(ar); 1944 ath10k_pci_deinit_irq(ar);
2080err_ce: 1945err_ce:
2081 ath10k_pci_ce_deinit(ar); 1946 ath10k_pci_ce_deinit(ar);
2082 ath10k_pci_device_reset(ar); 1947 ath10k_pci_warm_reset(ar);
2083err_ps: 1948err_ps:
2084 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 1949 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2085 ath10k_do_pci_sleep(ar); 1950 ath10k_do_pci_sleep(ar);
@@ -2087,6 +1952,34 @@ err:
2087 return ret; 1952 return ret;
2088} 1953}
2089 1954
1955static int ath10k_pci_hif_power_up(struct ath10k *ar)
1956{
1957 int ret;
1958
1959 /*
1960 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 * preferred (and safer) way to perform a device reset is through a
1962 * warm reset.
1963 *
1964 * Warm reset doesn't always work though (notably after a firmware
1965 * crash) so fall back to cold reset if necessary.
1966 */
1967 ret = __ath10k_pci_hif_power_up(ar, false);
1968 if (ret) {
1969 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
1970 ret);
1971
1972 ret = __ath10k_pci_hif_power_up(ar, true);
1973 if (ret) {
1974 ath10k_err("failed to power up target using cold reset too (%d)\n",
1975 ret);
1976 return ret;
1977 }
1978 }
1979
1980 return 0;
1981}
1982
2090static void ath10k_pci_hif_power_down(struct ath10k *ar) 1983static void ath10k_pci_hif_power_down(struct ath10k *ar)
2091{ 1984{
2092 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2094,7 +1987,7 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
2094 ath10k_pci_free_early_irq(ar); 1987 ath10k_pci_free_early_irq(ar);
2095 ath10k_pci_kill_tasklet(ar); 1988 ath10k_pci_kill_tasklet(ar);
2096 ath10k_pci_deinit_irq(ar); 1989 ath10k_pci_deinit_irq(ar);
2097 ath10k_pci_device_reset(ar); 1990 ath10k_pci_warm_reset(ar);
2098 1991
2099 ath10k_pci_ce_deinit(ar); 1992 ath10k_pci_ce_deinit(ar);
2100 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 1993 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2151,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
2151#endif 2044#endif
2152 2045
2153static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2046static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2154 .send_head = ath10k_pci_hif_send_head, 2047 .tx_sg = ath10k_pci_hif_tx_sg,
2155 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2048 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2156 .start = ath10k_pci_hif_start, 2049 .start = ath10k_pci_hif_start,
2157 .stop = ath10k_pci_hif_stop, 2050 .stop = ath10k_pci_hif_stop,
@@ -2411,11 +2304,10 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
2411 /* Try MSI-X */ 2304 /* Try MSI-X */
2412 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { 2305 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2413 ar_pci->num_msi_intrs = MSI_NUM_REQUEST; 2306 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2414 ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs); 2307 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2415 if (ret == 0) 2308 ar_pci->num_msi_intrs);
2416 return 0;
2417 if (ret > 0) 2309 if (ret > 0)
2418 pci_disable_msi(ar_pci->pdev); 2310 return 0;
2419 2311
2420 /* fall-through */ 2312 /* fall-through */
2421 } 2313 }
@@ -2482,6 +2374,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
2482 case MSI_NUM_REQUEST: 2374 case MSI_NUM_REQUEST:
2483 pci_disable_msi(ar_pci->pdev); 2375 pci_disable_msi(ar_pci->pdev);
2484 return 0; 2376 return 0;
2377 default:
2378 pci_disable_msi(ar_pci->pdev);
2485 } 2379 }
2486 2380
2487 ath10k_warn("unknown irq configuration upon deinit\n"); 2381 ath10k_warn("unknown irq configuration upon deinit\n");
@@ -2523,7 +2417,7 @@ out:
2523 return ret; 2417 return ret;
2524} 2418}
2525 2419
2526static int ath10k_pci_device_reset(struct ath10k *ar) 2420static int ath10k_pci_cold_reset(struct ath10k *ar)
2527{ 2421{
2528 int i, ret; 2422 int i, ret;
2529 u32 val; 2423 u32 val;