diff options
author | John W. Linville <linville@tuxdriver.com> | 2014-04-22 15:02:03 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2014-04-22 15:02:03 -0400 |
commit | 22b3b9578da730272d5ca2f3bbf718c7324ff27e (patch) | |
tree | dcdb3a13f5ceb89b124e0745097699575ccd5a9f /drivers/net/wireless/ath/ath10k/ce.c | |
parent | bf4c69f7dd8e0c3427262cc11652227ec7256a75 (diff) | |
parent | 4bfee8e8c13fc9477eb9420efd5a5d12e32ac614 (diff) |
Merge branch 'for-linville' of git://github.com/kvalo/ath
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/ce.c | 356 |
1 files changed, 201 insertions, 155 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index a79499c82350..1e4cad8632b5 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c | |||
@@ -840,35 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, | |||
840 | 840 | ||
841 | static int ath10k_ce_init_src_ring(struct ath10k *ar, | 841 | static int ath10k_ce_init_src_ring(struct ath10k *ar, |
842 | unsigned int ce_id, | 842 | unsigned int ce_id, |
843 | struct ath10k_ce_pipe *ce_state, | ||
844 | const struct ce_attr *attr) | 843 | const struct ce_attr *attr) |
845 | { | 844 | { |
846 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 845 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
847 | struct ath10k_ce_ring *src_ring; | 846 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
848 | unsigned int nentries = attr->src_nentries; | 847 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
849 | unsigned int ce_nbytes; | 848 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); |
850 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
851 | dma_addr_t base_addr; | ||
852 | char *ptr; | ||
853 | |||
854 | nentries = roundup_pow_of_two(nentries); | ||
855 | |||
856 | if (ce_state->src_ring) { | ||
857 | WARN_ON(ce_state->src_ring->nentries != nentries); | ||
858 | return 0; | ||
859 | } | ||
860 | 849 | ||
861 | ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); | 850 | nentries = roundup_pow_of_two(attr->src_nentries); |
862 | ptr = kzalloc(ce_nbytes, GFP_KERNEL); | ||
863 | if (ptr == NULL) | ||
864 | return -ENOMEM; | ||
865 | 851 | ||
866 | ce_state->src_ring = (struct ath10k_ce_ring *)ptr; | 852 | memset(src_ring->per_transfer_context, 0, |
867 | src_ring = ce_state->src_ring; | 853 | nentries * sizeof(*src_ring->per_transfer_context)); |
868 | |||
869 | ptr += sizeof(struct ath10k_ce_ring); | ||
870 | src_ring->nentries = nentries; | ||
871 | src_ring->nentries_mask = nentries - 1; | ||
872 | 854 | ||
873 | src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); | 855 | src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); |
874 | src_ring->sw_index &= src_ring->nentries_mask; | 856 | src_ring->sw_index &= src_ring->nentries_mask; |
@@ -878,21 +860,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, | |||
878 | ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); | 860 | ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); |
879 | src_ring->write_index &= src_ring->nentries_mask; | 861 | src_ring->write_index &= src_ring->nentries_mask; |
880 | 862 | ||
881 | src_ring->per_transfer_context = (void **)ptr; | 863 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, |
864 | src_ring->base_addr_ce_space); | ||
865 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); | ||
866 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); | ||
867 | ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); | ||
868 | ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); | ||
869 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); | ||
870 | |||
871 | ath10k_dbg(ATH10K_DBG_BOOT, | ||
872 | "boot init ce src ring id %d entries %d base_addr %p\n", | ||
873 | ce_id, nentries, src_ring->base_addr_owner_space); | ||
874 | |||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static int ath10k_ce_init_dest_ring(struct ath10k *ar, | ||
879 | unsigned int ce_id, | ||
880 | const struct ce_attr *attr) | ||
881 | { | ||
882 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
883 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | ||
884 | struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; | ||
885 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); | ||
886 | |||
887 | nentries = roundup_pow_of_two(attr->dest_nentries); | ||
888 | |||
889 | memset(dest_ring->per_transfer_context, 0, | ||
890 | nentries * sizeof(*dest_ring->per_transfer_context)); | ||
891 | |||
892 | dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); | ||
893 | dest_ring->sw_index &= dest_ring->nentries_mask; | ||
894 | dest_ring->write_index = | ||
895 | ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); | ||
896 | dest_ring->write_index &= dest_ring->nentries_mask; | ||
897 | |||
898 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, | ||
899 | dest_ring->base_addr_ce_space); | ||
900 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); | ||
901 | ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); | ||
902 | ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); | ||
903 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); | ||
904 | |||
905 | ath10k_dbg(ATH10K_DBG_BOOT, | ||
906 | "boot ce dest ring id %d entries %d base_addr %p\n", | ||
907 | ce_id, nentries, dest_ring->base_addr_owner_space); | ||
908 | |||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | static struct ath10k_ce_ring * | ||
913 | ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, | ||
914 | const struct ce_attr *attr) | ||
915 | { | ||
916 | struct ath10k_ce_ring *src_ring; | ||
917 | u32 nentries = attr->src_nentries; | ||
918 | dma_addr_t base_addr; | ||
919 | |||
920 | nentries = roundup_pow_of_two(nentries); | ||
921 | |||
922 | src_ring = kzalloc(sizeof(*src_ring) + | ||
923 | (nentries * | ||
924 | sizeof(*src_ring->per_transfer_context)), | ||
925 | GFP_KERNEL); | ||
926 | if (src_ring == NULL) | ||
927 | return ERR_PTR(-ENOMEM); | ||
928 | |||
929 | src_ring->nentries = nentries; | ||
930 | src_ring->nentries_mask = nentries - 1; | ||
882 | 931 | ||
883 | /* | 932 | /* |
884 | * Legacy platforms that do not support cache | 933 | * Legacy platforms that do not support cache |
885 | * coherent DMA are unsupported | 934 | * coherent DMA are unsupported |
886 | */ | 935 | */ |
887 | src_ring->base_addr_owner_space_unaligned = | 936 | src_ring->base_addr_owner_space_unaligned = |
888 | pci_alloc_consistent(ar_pci->pdev, | 937 | dma_alloc_coherent(ar->dev, |
889 | (nentries * sizeof(struct ce_desc) + | 938 | (nentries * sizeof(struct ce_desc) + |
890 | CE_DESC_RING_ALIGN), | 939 | CE_DESC_RING_ALIGN), |
891 | &base_addr); | 940 | &base_addr, GFP_KERNEL); |
892 | if (!src_ring->base_addr_owner_space_unaligned) { | 941 | if (!src_ring->base_addr_owner_space_unaligned) { |
893 | kfree(ce_state->src_ring); | 942 | kfree(src_ring); |
894 | ce_state->src_ring = NULL; | 943 | return ERR_PTR(-ENOMEM); |
895 | return -ENOMEM; | ||
896 | } | 944 | } |
897 | 945 | ||
898 | src_ring->base_addr_ce_space_unaligned = base_addr; | 946 | src_ring->base_addr_ce_space_unaligned = base_addr; |
@@ -912,88 +960,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, | |||
912 | kmalloc((nentries * sizeof(struct ce_desc) + | 960 | kmalloc((nentries * sizeof(struct ce_desc) + |
913 | CE_DESC_RING_ALIGN), GFP_KERNEL); | 961 | CE_DESC_RING_ALIGN), GFP_KERNEL); |
914 | if (!src_ring->shadow_base_unaligned) { | 962 | if (!src_ring->shadow_base_unaligned) { |
915 | pci_free_consistent(ar_pci->pdev, | 963 | dma_free_coherent(ar->dev, |
916 | (nentries * sizeof(struct ce_desc) + | 964 | (nentries * sizeof(struct ce_desc) + |
917 | CE_DESC_RING_ALIGN), | 965 | CE_DESC_RING_ALIGN), |
918 | src_ring->base_addr_owner_space, | 966 | src_ring->base_addr_owner_space, |
919 | src_ring->base_addr_ce_space); | 967 | src_ring->base_addr_ce_space); |
920 | kfree(ce_state->src_ring); | 968 | kfree(src_ring); |
921 | ce_state->src_ring = NULL; | 969 | return ERR_PTR(-ENOMEM); |
922 | return -ENOMEM; | ||
923 | } | 970 | } |
924 | 971 | ||
925 | src_ring->shadow_base = PTR_ALIGN( | 972 | src_ring->shadow_base = PTR_ALIGN( |
926 | src_ring->shadow_base_unaligned, | 973 | src_ring->shadow_base_unaligned, |
927 | CE_DESC_RING_ALIGN); | 974 | CE_DESC_RING_ALIGN); |
928 | 975 | ||
929 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, | 976 | return src_ring; |
930 | src_ring->base_addr_ce_space); | ||
931 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); | ||
932 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); | ||
933 | ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); | ||
934 | ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); | ||
935 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); | ||
936 | |||
937 | ath10k_dbg(ATH10K_DBG_BOOT, | ||
938 | "boot ce src ring id %d entries %d base_addr %p\n", | ||
939 | ce_id, nentries, src_ring->base_addr_owner_space); | ||
940 | |||
941 | return 0; | ||
942 | } | 977 | } |
943 | 978 | ||
944 | static int ath10k_ce_init_dest_ring(struct ath10k *ar, | 979 | static struct ath10k_ce_ring * |
945 | unsigned int ce_id, | 980 | ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, |
946 | struct ath10k_ce_pipe *ce_state, | 981 | const struct ce_attr *attr) |
947 | const struct ce_attr *attr) | ||
948 | { | 982 | { |
949 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
950 | struct ath10k_ce_ring *dest_ring; | 983 | struct ath10k_ce_ring *dest_ring; |
951 | unsigned int nentries = attr->dest_nentries; | 984 | u32 nentries; |
952 | unsigned int ce_nbytes; | ||
953 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
954 | dma_addr_t base_addr; | 985 | dma_addr_t base_addr; |
955 | char *ptr; | ||
956 | 986 | ||
957 | nentries = roundup_pow_of_two(nentries); | 987 | nentries = roundup_pow_of_two(attr->dest_nentries); |
958 | 988 | ||
959 | if (ce_state->dest_ring) { | 989 | dest_ring = kzalloc(sizeof(*dest_ring) + |
960 | WARN_ON(ce_state->dest_ring->nentries != nentries); | 990 | (nentries * |
961 | return 0; | 991 | sizeof(*dest_ring->per_transfer_context)), |
962 | } | 992 | GFP_KERNEL); |
963 | 993 | if (dest_ring == NULL) | |
964 | ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); | 994 | return ERR_PTR(-ENOMEM); |
965 | ptr = kzalloc(ce_nbytes, GFP_KERNEL); | ||
966 | if (ptr == NULL) | ||
967 | return -ENOMEM; | ||
968 | |||
969 | ce_state->dest_ring = (struct ath10k_ce_ring *)ptr; | ||
970 | dest_ring = ce_state->dest_ring; | ||
971 | 995 | ||
972 | ptr += sizeof(struct ath10k_ce_ring); | ||
973 | dest_ring->nentries = nentries; | 996 | dest_ring->nentries = nentries; |
974 | dest_ring->nentries_mask = nentries - 1; | 997 | dest_ring->nentries_mask = nentries - 1; |
975 | 998 | ||
976 | dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); | ||
977 | dest_ring->sw_index &= dest_ring->nentries_mask; | ||
978 | dest_ring->write_index = | ||
979 | ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); | ||
980 | dest_ring->write_index &= dest_ring->nentries_mask; | ||
981 | |||
982 | dest_ring->per_transfer_context = (void **)ptr; | ||
983 | |||
984 | /* | 999 | /* |
985 | * Legacy platforms that do not support cache | 1000 | * Legacy platforms that do not support cache |
986 | * coherent DMA are unsupported | 1001 | * coherent DMA are unsupported |
987 | */ | 1002 | */ |
988 | dest_ring->base_addr_owner_space_unaligned = | 1003 | dest_ring->base_addr_owner_space_unaligned = |
989 | pci_alloc_consistent(ar_pci->pdev, | 1004 | dma_alloc_coherent(ar->dev, |
990 | (nentries * sizeof(struct ce_desc) + | 1005 | (nentries * sizeof(struct ce_desc) + |
991 | CE_DESC_RING_ALIGN), | 1006 | CE_DESC_RING_ALIGN), |
992 | &base_addr); | 1007 | &base_addr, GFP_KERNEL); |
993 | if (!dest_ring->base_addr_owner_space_unaligned) { | 1008 | if (!dest_ring->base_addr_owner_space_unaligned) { |
994 | kfree(ce_state->dest_ring); | 1009 | kfree(dest_ring); |
995 | ce_state->dest_ring = NULL; | 1010 | return ERR_PTR(-ENOMEM); |
996 | return -ENOMEM; | ||
997 | } | 1011 | } |
998 | 1012 | ||
999 | dest_ring->base_addr_ce_space_unaligned = base_addr; | 1013 | dest_ring->base_addr_ce_space_unaligned = base_addr; |
@@ -1012,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, | |||
1012 | dest_ring->base_addr_ce_space_unaligned, | 1026 | dest_ring->base_addr_ce_space_unaligned, |
1013 | CE_DESC_RING_ALIGN); | 1027 | CE_DESC_RING_ALIGN); |
1014 | 1028 | ||
1015 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, | 1029 | return dest_ring; |
1016 | dest_ring->base_addr_ce_space); | ||
1017 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); | ||
1018 | ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); | ||
1019 | ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); | ||
1020 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); | ||
1021 | |||
1022 | ath10k_dbg(ATH10K_DBG_BOOT, | ||
1023 | "boot ce dest ring id %d entries %d base_addr %p\n", | ||
1024 | ce_id, nentries, dest_ring->base_addr_owner_space); | ||
1025 | |||
1026 | return 0; | ||
1027 | } | ||
1028 | |||
1029 | static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, | ||
1030 | unsigned int ce_id, | ||
1031 | const struct ce_attr *attr) | ||
1032 | { | ||
1033 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1034 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | ||
1035 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1036 | |||
1037 | spin_lock_bh(&ar_pci->ce_lock); | ||
1038 | |||
1039 | ce_state->ar = ar; | ||
1040 | ce_state->id = ce_id; | ||
1041 | ce_state->ctrl_addr = ctrl_addr; | ||
1042 | ce_state->attr_flags = attr->flags; | ||
1043 | ce_state->src_sz_max = attr->src_sz_max; | ||
1044 | |||
1045 | spin_unlock_bh(&ar_pci->ce_lock); | ||
1046 | |||
1047 | return ce_state; | ||
1048 | } | 1030 | } |
1049 | 1031 | ||
1050 | /* | 1032 | /* |
@@ -1054,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, | |||
1054 | * initialization. It may be that only one side or the other is | 1036 | * initialization. It may be that only one side or the other is |
1055 | * initialized by software/firmware. | 1037 | * initialized by software/firmware. |
1056 | */ | 1038 | */ |
1057 | struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, | 1039 | int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, |
1058 | unsigned int ce_id, | 1040 | const struct ce_attr *attr) |
1059 | const struct ce_attr *attr) | ||
1060 | { | 1041 | { |
1061 | struct ath10k_ce_pipe *ce_state; | 1042 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1043 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | ||
1062 | int ret; | 1044 | int ret; |
1063 | 1045 | ||
1064 | /* | 1046 | /* |
@@ -1074,64 +1056,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, | |||
1074 | 1056 | ||
1075 | ret = ath10k_pci_wake(ar); | 1057 | ret = ath10k_pci_wake(ar); |
1076 | if (ret) | 1058 | if (ret) |
1077 | return NULL; | 1059 | return ret; |
1078 | 1060 | ||
1079 | ce_state = ath10k_ce_init_state(ar, ce_id, attr); | 1061 | spin_lock_bh(&ar_pci->ce_lock); |
1080 | if (!ce_state) { | 1062 | ce_state->ar = ar; |
1081 | ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); | 1063 | ce_state->id = ce_id; |
1082 | goto out; | 1064 | ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); |
1083 | } | 1065 | ce_state->attr_flags = attr->flags; |
1066 | ce_state->src_sz_max = attr->src_sz_max; | ||
1067 | spin_unlock_bh(&ar_pci->ce_lock); | ||
1084 | 1068 | ||
1085 | if (attr->src_nentries) { | 1069 | if (attr->src_nentries) { |
1086 | ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); | 1070 | ret = ath10k_ce_init_src_ring(ar, ce_id, attr); |
1087 | if (ret) { | 1071 | if (ret) { |
1088 | ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", | 1072 | ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", |
1089 | ce_id, ret); | 1073 | ce_id, ret); |
1090 | ath10k_ce_deinit(ce_state); | ||
1091 | ce_state = NULL; | ||
1092 | goto out; | 1074 | goto out; |
1093 | } | 1075 | } |
1094 | } | 1076 | } |
1095 | 1077 | ||
1096 | if (attr->dest_nentries) { | 1078 | if (attr->dest_nentries) { |
1097 | ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); | 1079 | ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); |
1098 | if (ret) { | 1080 | if (ret) { |
1099 | ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", | 1081 | ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", |
1100 | ce_id, ret); | 1082 | ce_id, ret); |
1101 | ath10k_ce_deinit(ce_state); | ||
1102 | ce_state = NULL; | ||
1103 | goto out; | 1083 | goto out; |
1104 | } | 1084 | } |
1105 | } | 1085 | } |
1106 | 1086 | ||
1107 | out: | 1087 | out: |
1108 | ath10k_pci_sleep(ar); | 1088 | ath10k_pci_sleep(ar); |
1109 | return ce_state; | 1089 | return ret; |
1110 | } | 1090 | } |
1111 | 1091 | ||
1112 | void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) | 1092 | static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) |
1093 | { | ||
1094 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1095 | |||
1096 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); | ||
1097 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); | ||
1098 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); | ||
1099 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); | ||
1100 | } | ||
1101 | |||
1102 | static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) | ||
1103 | { | ||
1104 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1105 | |||
1106 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); | ||
1107 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); | ||
1108 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); | ||
1109 | } | ||
1110 | |||
1111 | void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) | ||
1112 | { | ||
1113 | int ret; | ||
1114 | |||
1115 | ret = ath10k_pci_wake(ar); | ||
1116 | if (ret) | ||
1117 | return; | ||
1118 | |||
1119 | ath10k_ce_deinit_src_ring(ar, ce_id); | ||
1120 | ath10k_ce_deinit_dest_ring(ar, ce_id); | ||
1121 | |||
1122 | ath10k_pci_sleep(ar); | ||
1123 | } | ||
1124 | |||
1125 | int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, | ||
1126 | const struct ce_attr *attr) | ||
1127 | { | ||
1128 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1129 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | ||
1130 | int ret; | ||
1131 | |||
1132 | if (attr->src_nentries) { | ||
1133 | ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); | ||
1134 | if (IS_ERR(ce_state->src_ring)) { | ||
1135 | ret = PTR_ERR(ce_state->src_ring); | ||
1136 | ath10k_err("failed to allocate copy engine source ring %d: %d\n", | ||
1137 | ce_id, ret); | ||
1138 | ce_state->src_ring = NULL; | ||
1139 | return ret; | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | if (attr->dest_nentries) { | ||
1144 | ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, | ||
1145 | attr); | ||
1146 | if (IS_ERR(ce_state->dest_ring)) { | ||
1147 | ret = PTR_ERR(ce_state->dest_ring); | ||
1148 | ath10k_err("failed to allocate copy engine destination ring %d: %d\n", | ||
1149 | ce_id, ret); | ||
1150 | ce_state->dest_ring = NULL; | ||
1151 | return ret; | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1158 | void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) | ||
1113 | { | 1159 | { |
1114 | struct ath10k *ar = ce_state->ar; | ||
1115 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 1160 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1161 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | ||
1116 | 1162 | ||
1117 | if (ce_state->src_ring) { | 1163 | if (ce_state->src_ring) { |
1118 | kfree(ce_state->src_ring->shadow_base_unaligned); | 1164 | kfree(ce_state->src_ring->shadow_base_unaligned); |
1119 | pci_free_consistent(ar_pci->pdev, | 1165 | dma_free_coherent(ar->dev, |
1120 | (ce_state->src_ring->nentries * | 1166 | (ce_state->src_ring->nentries * |
1121 | sizeof(struct ce_desc) + | 1167 | sizeof(struct ce_desc) + |
1122 | CE_DESC_RING_ALIGN), | 1168 | CE_DESC_RING_ALIGN), |
1123 | ce_state->src_ring->base_addr_owner_space, | 1169 | ce_state->src_ring->base_addr_owner_space, |
1124 | ce_state->src_ring->base_addr_ce_space); | 1170 | ce_state->src_ring->base_addr_ce_space); |
1125 | kfree(ce_state->src_ring); | 1171 | kfree(ce_state->src_ring); |
1126 | } | 1172 | } |
1127 | 1173 | ||
1128 | if (ce_state->dest_ring) { | 1174 | if (ce_state->dest_ring) { |
1129 | pci_free_consistent(ar_pci->pdev, | 1175 | dma_free_coherent(ar->dev, |
1130 | (ce_state->dest_ring->nentries * | 1176 | (ce_state->dest_ring->nentries * |
1131 | sizeof(struct ce_desc) + | 1177 | sizeof(struct ce_desc) + |
1132 | CE_DESC_RING_ALIGN), | 1178 | CE_DESC_RING_ALIGN), |
1133 | ce_state->dest_ring->base_addr_owner_space, | 1179 | ce_state->dest_ring->base_addr_owner_space, |
1134 | ce_state->dest_ring->base_addr_ce_space); | 1180 | ce_state->dest_ring->base_addr_ce_space); |
1135 | kfree(ce_state->dest_ring); | 1181 | kfree(ce_state->dest_ring); |
1136 | } | 1182 | } |
1137 | 1183 | ||