aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2014-03-28 04:02:38 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2014-03-28 08:32:10 -0400
commit25d0dbcbd5c746631ec1ee08bbbc4eba86bb9163 (patch)
treee7325922a9bba365c3eb0ad1a88fcde66d66abfa
parent68c03249f388aafe74f0e87e2743294d4384c00c (diff)
ath10k: split ce initialization and allocation
Definitions by which copy engine structure are allocated do not change so it doesn't make much sense to re-create those structures each time device is booted (e.g. due to firmware recovery). This should decrease chance of memory allocation failures. While at it remove per_transfer_context pointer indirection. The array has been trailing the copy engine ringbuffer structure anyway. This also saves pointer size worth of bytes for each copy engine ringbuffer. Reported-By: Avery Pennarun <apenwarr@gmail.com> Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c307
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c64
3 files changed, 228 insertions, 158 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 653a240142e5..1e4cad8632b5 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -840,34 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
840 840
841static int ath10k_ce_init_src_ring(struct ath10k *ar, 841static int ath10k_ce_init_src_ring(struct ath10k *ar,
842 unsigned int ce_id, 842 unsigned int ce_id,
843 struct ath10k_ce_pipe *ce_state,
844 const struct ce_attr *attr) 843 const struct ce_attr *attr)
845{ 844{
846 struct ath10k_ce_ring *src_ring; 845 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
847 unsigned int nentries = attr->src_nentries; 846 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
848 unsigned int ce_nbytes; 847 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
849 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 848 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
850 dma_addr_t base_addr;
851 char *ptr;
852
853 nentries = roundup_pow_of_two(nentries);
854
855 if (ce_state->src_ring) {
856 WARN_ON(ce_state->src_ring->nentries != nentries);
857 return 0;
858 }
859 849
860 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); 850 nentries = roundup_pow_of_two(attr->src_nentries);
861 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
862 if (ptr == NULL)
863 return -ENOMEM;
864 851
865 ce_state->src_ring = (struct ath10k_ce_ring *)ptr; 852 memset(src_ring->per_transfer_context, 0,
866 src_ring = ce_state->src_ring; 853 nentries * sizeof(*src_ring->per_transfer_context));
867
868 ptr += sizeof(struct ath10k_ce_ring);
869 src_ring->nentries = nentries;
870 src_ring->nentries_mask = nentries - 1;
871 854
872 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 855 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
873 src_ring->sw_index &= src_ring->nentries_mask; 856 src_ring->sw_index &= src_ring->nentries_mask;
@@ -877,7 +860,74 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
877 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 860 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
878 src_ring->write_index &= src_ring->nentries_mask; 861 src_ring->write_index &= src_ring->nentries_mask;
879 862
880 src_ring->per_transfer_context = (void **)ptr; 863 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
864 src_ring->base_addr_ce_space);
865 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
866 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
867 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
868 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
869 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
870
871 ath10k_dbg(ATH10K_DBG_BOOT,
872 "boot init ce src ring id %d entries %d base_addr %p\n",
873 ce_id, nentries, src_ring->base_addr_owner_space);
874
875 return 0;
876}
877
878static int ath10k_ce_init_dest_ring(struct ath10k *ar,
879 unsigned int ce_id,
880 const struct ce_attr *attr)
881{
882 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
883 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
884 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
885 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
886
887 nentries = roundup_pow_of_two(attr->dest_nentries);
888
889 memset(dest_ring->per_transfer_context, 0,
890 nentries * sizeof(*dest_ring->per_transfer_context));
891
892 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
893 dest_ring->sw_index &= dest_ring->nentries_mask;
894 dest_ring->write_index =
895 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
896 dest_ring->write_index &= dest_ring->nentries_mask;
897
898 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
899 dest_ring->base_addr_ce_space);
900 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
901 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
902 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
903 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
904
905 ath10k_dbg(ATH10K_DBG_BOOT,
906 "boot ce dest ring id %d entries %d base_addr %p\n",
907 ce_id, nentries, dest_ring->base_addr_owner_space);
908
909 return 0;
910}
911
912static struct ath10k_ce_ring *
913ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
914 const struct ce_attr *attr)
915{
916 struct ath10k_ce_ring *src_ring;
917 u32 nentries = attr->src_nentries;
918 dma_addr_t base_addr;
919
920 nentries = roundup_pow_of_two(nentries);
921
922 src_ring = kzalloc(sizeof(*src_ring) +
923 (nentries *
924 sizeof(*src_ring->per_transfer_context)),
925 GFP_KERNEL);
926 if (src_ring == NULL)
927 return ERR_PTR(-ENOMEM);
928
929 src_ring->nentries = nentries;
930 src_ring->nentries_mask = nentries - 1;
881 931
882 /* 932 /*
883 * Legacy platforms that do not support cache 933 * Legacy platforms that do not support cache
@@ -889,9 +939,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
889 CE_DESC_RING_ALIGN), 939 CE_DESC_RING_ALIGN),
890 &base_addr, GFP_KERNEL); 940 &base_addr, GFP_KERNEL);
891 if (!src_ring->base_addr_owner_space_unaligned) { 941 if (!src_ring->base_addr_owner_space_unaligned) {
892 kfree(ce_state->src_ring); 942 kfree(src_ring);
893 ce_state->src_ring = NULL; 943 return ERR_PTR(-ENOMEM);
894 return -ENOMEM;
895 } 944 }
896 945
897 src_ring->base_addr_ce_space_unaligned = base_addr; 946 src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -916,69 +965,37 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
916 CE_DESC_RING_ALIGN), 965 CE_DESC_RING_ALIGN),
917 src_ring->base_addr_owner_space, 966 src_ring->base_addr_owner_space,
918 src_ring->base_addr_ce_space); 967 src_ring->base_addr_ce_space);
919 kfree(ce_state->src_ring); 968 kfree(src_ring);
920 ce_state->src_ring = NULL; 969 return ERR_PTR(-ENOMEM);
921 return -ENOMEM;
922 } 970 }
923 971
924 src_ring->shadow_base = PTR_ALIGN( 972 src_ring->shadow_base = PTR_ALIGN(
925 src_ring->shadow_base_unaligned, 973 src_ring->shadow_base_unaligned,
926 CE_DESC_RING_ALIGN); 974 CE_DESC_RING_ALIGN);
927 975
928 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 976 return src_ring;
929 src_ring->base_addr_ce_space);
930 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
931 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
932 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
933 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
934 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
935
936 ath10k_dbg(ATH10K_DBG_BOOT,
937 "boot ce src ring id %d entries %d base_addr %p\n",
938 ce_id, nentries, src_ring->base_addr_owner_space);
939
940 return 0;
941} 977}
942 978
943static int ath10k_ce_init_dest_ring(struct ath10k *ar, 979static struct ath10k_ce_ring *
944 unsigned int ce_id, 980ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
945 struct ath10k_ce_pipe *ce_state, 981 const struct ce_attr *attr)
946 const struct ce_attr *attr)
947{ 982{
948 struct ath10k_ce_ring *dest_ring; 983 struct ath10k_ce_ring *dest_ring;
949 unsigned int nentries = attr->dest_nentries; 984 u32 nentries;
950 unsigned int ce_nbytes;
951 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
952 dma_addr_t base_addr; 985 dma_addr_t base_addr;
953 char *ptr;
954 986
955 nentries = roundup_pow_of_two(nentries); 987 nentries = roundup_pow_of_two(attr->dest_nentries);
956 988
957 if (ce_state->dest_ring) { 989 dest_ring = kzalloc(sizeof(*dest_ring) +
958 WARN_ON(ce_state->dest_ring->nentries != nentries); 990 (nentries *
959 return 0; 991 sizeof(*dest_ring->per_transfer_context)),
960 } 992 GFP_KERNEL);
961 993 if (dest_ring == NULL)
962 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); 994 return ERR_PTR(-ENOMEM);
963 ptr = kzalloc(ce_nbytes, GFP_KERNEL);
964 if (ptr == NULL)
965 return -ENOMEM;
966
967 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
968 dest_ring = ce_state->dest_ring;
969 995
970 ptr += sizeof(struct ath10k_ce_ring);
971 dest_ring->nentries = nentries; 996 dest_ring->nentries = nentries;
972 dest_ring->nentries_mask = nentries - 1; 997 dest_ring->nentries_mask = nentries - 1;
973 998
974 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
975 dest_ring->sw_index &= dest_ring->nentries_mask;
976 dest_ring->write_index =
977 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
978 dest_ring->write_index &= dest_ring->nentries_mask;
979
980 dest_ring->per_transfer_context = (void **)ptr;
981
982 /* 999 /*
983 * Legacy platforms that do not support cache 1000 * Legacy platforms that do not support cache
984 * coherent DMA are unsupported 1001 * coherent DMA are unsupported
@@ -989,9 +1006,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
989 CE_DESC_RING_ALIGN), 1006 CE_DESC_RING_ALIGN),
990 &base_addr, GFP_KERNEL); 1007 &base_addr, GFP_KERNEL);
991 if (!dest_ring->base_addr_owner_space_unaligned) { 1008 if (!dest_ring->base_addr_owner_space_unaligned) {
992 kfree(ce_state->dest_ring); 1009 kfree(dest_ring);
993 ce_state->dest_ring = NULL; 1010 return ERR_PTR(-ENOMEM);
994 return -ENOMEM;
995 } 1011 }
996 1012
997 dest_ring->base_addr_ce_space_unaligned = base_addr; 1013 dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1010,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1010 dest_ring->base_addr_ce_space_unaligned, 1026 dest_ring->base_addr_ce_space_unaligned,
1011 CE_DESC_RING_ALIGN); 1027 CE_DESC_RING_ALIGN);
1012 1028
1013 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 1029 return dest_ring;
1014 dest_ring->base_addr_ce_space);
1015 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1016 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1017 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1018 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1019
1020 ath10k_dbg(ATH10K_DBG_BOOT,
1021 "boot ce dest ring id %d entries %d base_addr %p\n",
1022 ce_id, nentries, dest_ring->base_addr_owner_space);
1023
1024 return 0;
1025}
1026
1027static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1028 unsigned int ce_id,
1029 const struct ce_attr *attr)
1030{
1031 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1032 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1033 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1034
1035 spin_lock_bh(&ar_pci->ce_lock);
1036
1037 ce_state->ar = ar;
1038 ce_state->id = ce_id;
1039 ce_state->ctrl_addr = ctrl_addr;
1040 ce_state->attr_flags = attr->flags;
1041 ce_state->src_sz_max = attr->src_sz_max;
1042
1043 spin_unlock_bh(&ar_pci->ce_lock);
1044
1045 return ce_state;
1046} 1030}
1047 1031
1048/* 1032/*
@@ -1052,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
1052 * initialization. It may be that only one side or the other is 1036 * initialization. It may be that only one side or the other is
1053 * initialized by software/firmware. 1037 * initialized by software/firmware.
1054 */ 1038 */
1055struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 1039int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1056 unsigned int ce_id, 1040 const struct ce_attr *attr)
1057 const struct ce_attr *attr)
1058{ 1041{
1059 struct ath10k_ce_pipe *ce_state; 1042 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1043 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1060 int ret; 1044 int ret;
1061 1045
1062 /* 1046 /*
@@ -1072,44 +1056,109 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1072 1056
1073 ret = ath10k_pci_wake(ar); 1057 ret = ath10k_pci_wake(ar);
1074 if (ret) 1058 if (ret)
1075 return NULL; 1059 return ret;
1076 1060
1077 ce_state = ath10k_ce_init_state(ar, ce_id, attr); 1061 spin_lock_bh(&ar_pci->ce_lock);
1078 if (!ce_state) { 1062 ce_state->ar = ar;
1079 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); 1063 ce_state->id = ce_id;
1080 goto out; 1064 ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
1081 } 1065 ce_state->attr_flags = attr->flags;
1066 ce_state->src_sz_max = attr->src_sz_max;
1067 spin_unlock_bh(&ar_pci->ce_lock);
1082 1068
1083 if (attr->src_nentries) { 1069 if (attr->src_nentries) {
1084 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); 1070 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1085 if (ret) { 1071 if (ret) {
1086 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", 1072 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1087 ce_id, ret); 1073 ce_id, ret);
1088 ath10k_ce_deinit(ce_state);
1089 ce_state = NULL;
1090 goto out; 1074 goto out;
1091 } 1075 }
1092 } 1076 }
1093 1077
1094 if (attr->dest_nentries) { 1078 if (attr->dest_nentries) {
1095 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); 1079 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1096 if (ret) { 1080 if (ret) {
1097 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", 1081 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1098 ce_id, ret); 1082 ce_id, ret);
1099 ath10k_ce_deinit(ce_state);
1100 ce_state = NULL;
1101 goto out; 1083 goto out;
1102 } 1084 }
1103 } 1085 }
1104 1086
1105out: 1087out:
1106 ath10k_pci_sleep(ar); 1088 ath10k_pci_sleep(ar);
1107 return ce_state; 1089 return ret;
1108} 1090}
1109 1091
1110void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) 1092static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1111{ 1093{
1112 struct ath10k *ar = ce_state->ar; 1094 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1095
1096 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1097 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1098 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1099 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1100}
1101
1102static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1103{
1104 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1105
1106 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1107 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1108 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1109}
1110
1111void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1112{
1113 int ret;
1114
1115 ret = ath10k_pci_wake(ar);
1116 if (ret)
1117 return;
1118
1119 ath10k_ce_deinit_src_ring(ar, ce_id);
1120 ath10k_ce_deinit_dest_ring(ar, ce_id);
1121
1122 ath10k_pci_sleep(ar);
1123}
1124
1125int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1126 const struct ce_attr *attr)
1127{
1128 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1129 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1130 int ret;
1131
1132 if (attr->src_nentries) {
1133 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1134 if (IS_ERR(ce_state->src_ring)) {
1135 ret = PTR_ERR(ce_state->src_ring);
1136 ath10k_err("failed to allocate copy engine source ring %d: %d\n",
1137 ce_id, ret);
1138 ce_state->src_ring = NULL;
1139 return ret;
1140 }
1141 }
1142
1143 if (attr->dest_nentries) {
1144 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1145 attr);
1146 if (IS_ERR(ce_state->dest_ring)) {
1147 ret = PTR_ERR(ce_state->dest_ring);
1148 ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
1149 ce_id, ret);
1150 ce_state->dest_ring = NULL;
1151 return ret;
1152 }
1153 }
1154
1155 return 0;
1156}
1157
1158void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1159{
1160 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1161 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1113 1162
1114 if (ce_state->src_ring) { 1163 if (ce_state->src_ring) {
1115 kfree(ce_state->src_ring->shadow_base_unaligned); 1164 kfree(ce_state->src_ring->shadow_base_unaligned);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 8eb7f99ed992..fd0bc3561e42 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
104 void *shadow_base_unaligned; 104 void *shadow_base_unaligned;
105 struct ce_desc *shadow_base; 105 struct ce_desc *shadow_base;
106 106
107 void **per_transfer_context; 107 /* keep last */
108 void *per_transfer_context[0];
108}; 109};
109 110
110struct ath10k_ce_pipe { 111struct ath10k_ce_pipe {
@@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
210 211
211/*==================CE Engine Initialization=======================*/ 212/*==================CE Engine Initialization=======================*/
212 213
213/* Initialize an instance of a CE */ 214int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
214struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 215 const struct ce_attr *attr);
215 unsigned int ce_id, 216void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
216 const struct ce_attr *attr); 217int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
218 const struct ce_attr *attr);
219void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
217 220
218/*==================CE Engine Shutdown=======================*/ 221/*==================CE Engine Shutdown=======================*/
219/* 222/*
@@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
236 unsigned int *nbytesp, 239 unsigned int *nbytesp,
237 unsigned int *transfer_idp); 240 unsigned int *transfer_idp);
238 241
239void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
240
241/*==================CE Interrupt Handlers====================*/ 242/*==================CE Interrupt Handlers====================*/
242void ath10k_ce_per_engine_service_any(struct ath10k *ar); 243void ath10k_ce_per_engine_service_any(struct ath10k *ar);
243void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); 244void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 337af7e22b30..7995b0d23b1e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1258,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1258 1258
1259static void ath10k_pci_ce_deinit(struct ath10k *ar) 1259static void ath10k_pci_ce_deinit(struct ath10k *ar)
1260{ 1260{
1261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1261 int i;
1262 struct ath10k_pci_pipe *pipe_info;
1263 int pipe_num;
1264 1262
1265 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1263 for (i = 0; i < CE_COUNT; i++)
1266 pipe_info = &ar_pci->pipe_info[pipe_num]; 1264 ath10k_ce_deinit_pipe(ar, i);
1267 if (pipe_info->ce_hdl) {
1268 ath10k_ce_deinit(pipe_info->ce_hdl);
1269 pipe_info->ce_hdl = NULL;
1270 pipe_info->buf_sz = 0;
1271 }
1272 }
1273} 1265}
1274 1266
1275static void ath10k_pci_hif_stop(struct ath10k *ar) 1267static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1722,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
1722 return 0; 1714 return 0;
1723} 1715}
1724 1716
1717static int ath10k_pci_alloc_ce(struct ath10k *ar)
1718{
1719 int i, ret;
1720
1721 for (i = 0; i < CE_COUNT; i++) {
1722 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1723 if (ret) {
1724 ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1725 i, ret);
1726 return ret;
1727 }
1728 }
1729
1730 return 0;
1731}
1732
1733static void ath10k_pci_free_ce(struct ath10k *ar)
1734{
1735 int i;
1725 1736
1737 for (i = 0; i < CE_COUNT; i++)
1738 ath10k_ce_free_pipe(ar, i);
1739}
1726 1740
1727static int ath10k_pci_ce_init(struct ath10k *ar) 1741static int ath10k_pci_ce_init(struct ath10k *ar)
1728{ 1742{
1729 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1730 struct ath10k_pci_pipe *pipe_info; 1744 struct ath10k_pci_pipe *pipe_info;
1731 const struct ce_attr *attr; 1745 const struct ce_attr *attr;
1732 int pipe_num; 1746 int pipe_num, ret;
1733 1747
1734 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1748 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1735 pipe_info = &ar_pci->pipe_info[pipe_num]; 1749 pipe_info = &ar_pci->pipe_info[pipe_num];
1750 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1736 pipe_info->pipe_num = pipe_num; 1751 pipe_info->pipe_num = pipe_num;
1737 pipe_info->hif_ce_state = ar; 1752 pipe_info->hif_ce_state = ar;
1738 attr = &host_ce_config_wlan[pipe_num]; 1753 attr = &host_ce_config_wlan[pipe_num];
1739 1754
1740 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); 1755 ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1741 if (pipe_info->ce_hdl == NULL) { 1756 if (ret) {
1742 ath10k_err("failed to initialize CE for pipe: %d\n", 1757 ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1743 pipe_num); 1758 pipe_num, ret);
1744 1759 return ret;
1745 /* It is safe to call it here. It checks if ce_hdl is
1746 * valid for each pipe */
1747 ath10k_pci_ce_deinit(ar);
1748 return -1;
1749 } 1760 }
1750 1761
1751 if (pipe_num == CE_COUNT - 1) { 1762 if (pipe_num == CE_COUNT - 1) {
@@ -2648,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2648 2659
2649 ath10k_do_pci_sleep(ar); 2660 ath10k_do_pci_sleep(ar);
2650 2661
2662 ret = ath10k_pci_alloc_ce(ar);
2663 if (ret) {
2664 ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2665 goto err_iomap;
2666 }
2667
2651 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 2668 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2652 2669
2653 ret = ath10k_core_register(ar, chip_id); 2670 ret = ath10k_core_register(ar, chip_id);
2654 if (ret) { 2671 if (ret) {
2655 ath10k_err("failed to register driver core: %d\n", ret); 2672 ath10k_err("failed to register driver core: %d\n", ret);
2656 goto err_iomap; 2673 goto err_free_ce;
2657 } 2674 }
2658 2675
2659 return 0; 2676 return 0;
2660 2677
2678err_free_ce:
2679 ath10k_pci_free_ce(ar);
2661err_iomap: 2680err_iomap:
2662 pci_iounmap(pdev, mem); 2681 pci_iounmap(pdev, mem);
2663err_master: 2682err_master:
@@ -2693,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2693 tasklet_kill(&ar_pci->msi_fw_err); 2712 tasklet_kill(&ar_pci->msi_fw_err);
2694 2713
2695 ath10k_core_unregister(ar); 2714 ath10k_core_unregister(ar);
2715 ath10k_pci_free_ce(ar);
2696 2716
2697 pci_iounmap(pdev, ar_pci->mem); 2717 pci_iounmap(pdev, ar_pci->mem);
2698 pci_release_region(pdev, BAR_NUM); 2718 pci_release_region(pdev, BAR_NUM);