aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mwl8k.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2009-10-22 14:20:40 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-11-04 18:44:47 -0500
commit45eb400d50e1ad84a8e8f9e9a82cd8ae13d7d691 (patch)
tree00107777c23ef9fc652aea12794060c1fc018145 /drivers/net/wireless/mwl8k.c
parenta43c49a817f31ce1accc029239827b108319ecf9 (diff)
mwl8k: shorten receive/transmit state variable names
To conserve horizontal space. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/mwl8k.c')
-rw-r--r--drivers/net/wireless/mwl8k.c194
1 files changed, 93 insertions, 101 deletions
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 94cbf93a8bf5..80df2ebbd602 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -88,30 +88,30 @@ MODULE_DEVICE_TABLE(pci, mwl8k_table);
88#define MWL8K_TX_QUEUES 4 88#define MWL8K_TX_QUEUES 4
89 89
90struct mwl8k_rx_queue { 90struct mwl8k_rx_queue {
91 int rx_desc_count; 91 int rxd_count;
92 92
93 /* hw receives here */ 93 /* hw receives here */
94 int rx_head; 94 int head;
95 95
96 /* refill descs here */ 96 /* refill descs here */
97 int rx_tail; 97 int tail;
98 98
99 struct mwl8k_rx_desc *rx_desc_area; 99 struct mwl8k_rx_desc *rxd;
100 dma_addr_t rx_desc_dma; 100 dma_addr_t rxd_dma;
101 struct sk_buff **rx_skb; 101 struct sk_buff **skb;
102}; 102};
103 103
104struct mwl8k_tx_queue { 104struct mwl8k_tx_queue {
105 /* hw transmits here */ 105 /* hw transmits here */
106 int tx_head; 106 int head;
107 107
108 /* sw appends here */ 108 /* sw appends here */
109 int tx_tail; 109 int tail;
110 110
111 struct ieee80211_tx_queue_stats tx_stats; 111 struct ieee80211_tx_queue_stats stats;
112 struct mwl8k_tx_desc *tx_desc_area; 112 struct mwl8k_tx_desc *txd;
113 dma_addr_t tx_desc_dma; 113 dma_addr_t txd_dma;
114 struct sk_buff **tx_skb; 114 struct sk_buff **skb;
115}; 115};
116 116
117/* Pointers to the firmware data and meta information about it. */ 117/* Pointers to the firmware data and meta information about it. */
@@ -738,7 +738,7 @@ struct mwl8k_rx_desc {
738 __u8 link_quality; 738 __u8 link_quality;
739 __u8 noise_level; 739 __u8 noise_level;
740 __le32 pkt_phys_addr; 740 __le32 pkt_phys_addr;
741 __le32 next_rx_desc_phys_addr; 741 __le32 next_rxd_phys_addr;
742 __le16 qos_control; 742 __le16 qos_control;
743 __le16 rate_info; 743 __le16 rate_info;
744 __le32 pad0[4]; 744 __le32 pad0[4];
@@ -767,42 +767,38 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
767 int size; 767 int size;
768 int i; 768 int i;
769 769
770 rxq->rx_desc_count = 0; 770 rxq->rxd_count = 0;
771 rxq->rx_head = 0; 771 rxq->head = 0;
772 rxq->rx_tail = 0; 772 rxq->tail = 0;
773 773
774 size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc); 774 size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc);
775 775
776 rxq->rx_desc_area = 776 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
777 pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma); 777 if (rxq->rxd == NULL) {
778 if (rxq->rx_desc_area == NULL) {
779 printk(KERN_ERR "%s: failed to alloc RX descriptors\n", 778 printk(KERN_ERR "%s: failed to alloc RX descriptors\n",
780 wiphy_name(hw->wiphy)); 779 wiphy_name(hw->wiphy));
781 return -ENOMEM; 780 return -ENOMEM;
782 } 781 }
783 memset(rxq->rx_desc_area, 0, size); 782 memset(rxq->rxd, 0, size);
784 783
785 rxq->rx_skb = kmalloc(MWL8K_RX_DESCS * 784 rxq->skb = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->skb), GFP_KERNEL);
786 sizeof(*rxq->rx_skb), GFP_KERNEL); 785 if (rxq->skb == NULL) {
787 if (rxq->rx_skb == NULL) {
788 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", 786 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n",
789 wiphy_name(hw->wiphy)); 787 wiphy_name(hw->wiphy));
790 pci_free_consistent(priv->pdev, size, 788 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
791 rxq->rx_desc_area, rxq->rx_desc_dma);
792 return -ENOMEM; 789 return -ENOMEM;
793 } 790 }
794 memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb)); 791 memset(rxq->skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->skb));
795 792
796 for (i = 0; i < MWL8K_RX_DESCS; i++) { 793 for (i = 0; i < MWL8K_RX_DESCS; i++) {
797 struct mwl8k_rx_desc *rx_desc; 794 struct mwl8k_rx_desc *rx_desc;
798 int nexti; 795 int nexti;
799 796
800 rx_desc = rxq->rx_desc_area + i; 797 rx_desc = rxq->rxd + i;
801 nexti = (i + 1) % MWL8K_RX_DESCS; 798 nexti = (i + 1) % MWL8K_RX_DESCS;
802 799
803 rx_desc->next_rx_desc_phys_addr = 800 rx_desc->next_rxd_phys_addr =
804 cpu_to_le32(rxq->rx_desc_dma 801 cpu_to_le32(rxq->rxd_dma + nexti * sizeof(*rx_desc));
805 + nexti * sizeof(*rx_desc));
806 rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST; 802 rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
807 } 803 }
808 804
@@ -816,7 +812,7 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
816 int refilled; 812 int refilled;
817 813
818 refilled = 0; 814 refilled = 0;
819 while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) { 815 while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
820 struct sk_buff *skb; 816 struct sk_buff *skb;
821 int rx; 817 int rx;
822 818
@@ -824,19 +820,19 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
824 if (skb == NULL) 820 if (skb == NULL)
825 break; 821 break;
826 822
827 rxq->rx_desc_count++; 823 rxq->rxd_count++;
828 824
829 rx = rxq->rx_tail; 825 rx = rxq->tail;
830 rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS; 826 rxq->tail = (rx + 1) % MWL8K_RX_DESCS;
831 827
832 rxq->rx_desc_area[rx].pkt_phys_addr = 828 rxq->rxd[rx].pkt_phys_addr =
833 cpu_to_le32(pci_map_single(priv->pdev, skb->data, 829 cpu_to_le32(pci_map_single(priv->pdev, skb->data,
834 MWL8K_RX_MAXSZ, DMA_FROM_DEVICE)); 830 MWL8K_RX_MAXSZ, DMA_FROM_DEVICE));
835 831
836 rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ); 832 rxq->rxd[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ);
837 rxq->rx_skb[rx] = skb; 833 rxq->skb[rx] = skb;
838 wmb(); 834 wmb();
839 rxq->rx_desc_area[rx].rx_ctrl = 0; 835 rxq->rxd[rx].rx_ctrl = 0;
840 836
841 refilled++; 837 refilled++;
842 } 838 }
@@ -852,24 +848,24 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
852 int i; 848 int i;
853 849
854 for (i = 0; i < MWL8K_RX_DESCS; i++) { 850 for (i = 0; i < MWL8K_RX_DESCS; i++) {
855 if (rxq->rx_skb[i] != NULL) { 851 if (rxq->skb[i] != NULL) {
856 unsigned long addr; 852 unsigned long addr;
857 853
858 addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr); 854 addr = le32_to_cpu(rxq->rxd[i].pkt_phys_addr);
859 pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ, 855 pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ,
860 PCI_DMA_FROMDEVICE); 856 PCI_DMA_FROMDEVICE);
861 kfree_skb(rxq->rx_skb[i]); 857 kfree_skb(rxq->skb[i]);
862 rxq->rx_skb[i] = NULL; 858 rxq->skb[i] = NULL;
863 } 859 }
864 } 860 }
865 861
866 kfree(rxq->rx_skb); 862 kfree(rxq->skb);
867 rxq->rx_skb = NULL; 863 rxq->skb = NULL;
868 864
869 pci_free_consistent(priv->pdev, 865 pci_free_consistent(priv->pdev,
870 MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc), 866 MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc),
871 rxq->rx_desc_area, rxq->rx_desc_dma); 867 rxq->rxd, rxq->rxd_dma);
872 rxq->rx_desc_area = NULL; 868 rxq->rxd = NULL;
873} 869}
874 870
875 871
@@ -910,7 +906,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
910 int processed; 906 int processed;
911 907
912 processed = 0; 908 processed = 0;
913 while (rxq->rx_desc_count && limit--) { 909 while (rxq->rxd_count && limit--) {
914 struct mwl8k_rx_desc *rx_desc; 910 struct mwl8k_rx_desc *rx_desc;
915 struct sk_buff *skb; 911 struct sk_buff *skb;
916 struct ieee80211_rx_status status; 912 struct ieee80211_rx_status status;
@@ -918,18 +914,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
918 struct ieee80211_hdr *wh; 914 struct ieee80211_hdr *wh;
919 u16 rate_info; 915 u16 rate_info;
920 916
921 rx_desc = rxq->rx_desc_area + rxq->rx_head; 917 rx_desc = rxq->rxd + rxq->head;
922 if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST)) 918 if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST))
923 break; 919 break;
924 rmb(); 920 rmb();
925 921
926 skb = rxq->rx_skb[rxq->rx_head]; 922 skb = rxq->skb[rxq->head];
927 if (skb == NULL) 923 if (skb == NULL)
928 break; 924 break;
929 rxq->rx_skb[rxq->rx_head] = NULL; 925 rxq->skb[rxq->head] = NULL;
930 926
931 rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; 927 rxq->head = (rxq->head + 1) % MWL8K_RX_DESCS;
932 rxq->rx_desc_count--; 928 rxq->rxd_count--;
933 929
934 addr = le32_to_cpu(rx_desc->pkt_phys_addr); 930 addr = le32_to_cpu(rx_desc->pkt_phys_addr);
935 pci_unmap_single(priv->pdev, addr, 931 pci_unmap_single(priv->pdev, addr,
@@ -1000,7 +996,7 @@ struct mwl8k_tx_desc {
1000 __le32 pkt_phys_addr; 996 __le32 pkt_phys_addr;
1001 __le16 pkt_len; 997 __le16 pkt_len;
1002 __u8 dest_MAC_addr[ETH_ALEN]; 998 __u8 dest_MAC_addr[ETH_ALEN];
1003 __le32 next_tx_desc_phys_addr; 999 __le32 next_txd_phys_addr;
1004 __le32 reserved; 1000 __le32 reserved;
1005 __le16 rate_info; 1001 __le16 rate_info;
1006 __u8 peer_id; 1002 __u8 peer_id;
@@ -1016,44 +1012,40 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1016 int size; 1012 int size;
1017 int i; 1013 int i;
1018 1014
1019 memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats)); 1015 memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
1020 txq->tx_stats.limit = MWL8K_TX_DESCS; 1016 txq->stats.limit = MWL8K_TX_DESCS;
1021 txq->tx_head = 0; 1017 txq->head = 0;
1022 txq->tx_tail = 0; 1018 txq->tail = 0;
1023 1019
1024 size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); 1020 size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
1025 1021
1026 txq->tx_desc_area = 1022 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
1027 pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma); 1023 if (txq->txd == NULL) {
1028 if (txq->tx_desc_area == NULL) {
1029 printk(KERN_ERR "%s: failed to alloc TX descriptors\n", 1024 printk(KERN_ERR "%s: failed to alloc TX descriptors\n",
1030 wiphy_name(hw->wiphy)); 1025 wiphy_name(hw->wiphy));
1031 return -ENOMEM; 1026 return -ENOMEM;
1032 } 1027 }
1033 memset(txq->tx_desc_area, 0, size); 1028 memset(txq->txd, 0, size);
1034 1029
1035 txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb), 1030 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
1036 GFP_KERNEL); 1031 if (txq->skb == NULL) {
1037 if (txq->tx_skb == NULL) {
1038 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", 1032 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n",
1039 wiphy_name(hw->wiphy)); 1033 wiphy_name(hw->wiphy));
1040 pci_free_consistent(priv->pdev, size, 1034 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
1041 txq->tx_desc_area, txq->tx_desc_dma);
1042 return -ENOMEM; 1035 return -ENOMEM;
1043 } 1036 }
1044 memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb)); 1037 memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
1045 1038
1046 for (i = 0; i < MWL8K_TX_DESCS; i++) { 1039 for (i = 0; i < MWL8K_TX_DESCS; i++) {
1047 struct mwl8k_tx_desc *tx_desc; 1040 struct mwl8k_tx_desc *tx_desc;
1048 int nexti; 1041 int nexti;
1049 1042
1050 tx_desc = txq->tx_desc_area + i; 1043 tx_desc = txq->txd + i;
1051 nexti = (i + 1) % MWL8K_TX_DESCS; 1044 nexti = (i + 1) % MWL8K_TX_DESCS;
1052 1045
1053 tx_desc->status = 0; 1046 tx_desc->status = 0;
1054 tx_desc->next_tx_desc_phys_addr = 1047 tx_desc->next_txd_phys_addr =
1055 cpu_to_le32(txq->tx_desc_dma + 1048 cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc));
1056 nexti * sizeof(*tx_desc));
1057 } 1049 }
1058 1050
1059 return 0; 1051 return 0;
@@ -1089,11 +1081,11 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
1089 1081
1090 for (count = 0; count < MWL8K_TX_QUEUES; count++) { 1082 for (count = 0; count < MWL8K_TX_QUEUES; count++) {
1091 txq = priv->txq + count; 1083 txq = priv->txq + count;
1092 txinfo[count].len = txq->tx_stats.len; 1084 txinfo[count].len = txq->stats.len;
1093 txinfo[count].head = txq->tx_head; 1085 txinfo[count].head = txq->head;
1094 txinfo[count].tail = txq->tx_tail; 1086 txinfo[count].tail = txq->tail;
1095 for (desc = 0; desc < MWL8K_TX_DESCS; desc++) { 1087 for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
1096 tx_desc = txq->tx_desc_area + desc; 1088 tx_desc = txq->txd + desc;
1097 status = le32_to_cpu(tx_desc->status); 1089 status = le32_to_cpu(tx_desc->status);
1098 1090
1099 if (status & MWL8K_TXD_STATUS_FW_OWNED) 1091 if (status & MWL8K_TXD_STATUS_FW_OWNED)
@@ -1174,7 +1166,7 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1174 struct mwl8k_tx_queue *txq = priv->txq + index; 1166 struct mwl8k_tx_queue *txq = priv->txq + index;
1175 int wake = 0; 1167 int wake = 0;
1176 1168
1177 while (txq->tx_stats.len > 0) { 1169 while (txq->stats.len > 0) {
1178 int tx; 1170 int tx;
1179 struct mwl8k_tx_desc *tx_desc; 1171 struct mwl8k_tx_desc *tx_desc;
1180 unsigned long addr; 1172 unsigned long addr;
@@ -1183,8 +1175,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1183 struct ieee80211_tx_info *info; 1175 struct ieee80211_tx_info *info;
1184 u32 status; 1176 u32 status;
1185 1177
1186 tx = txq->tx_head; 1178 tx = txq->head;
1187 tx_desc = txq->tx_desc_area + tx; 1179 tx_desc = txq->txd + tx;
1188 1180
1189 status = le32_to_cpu(tx_desc->status); 1181 status = le32_to_cpu(tx_desc->status);
1190 1182
@@ -1195,15 +1187,15 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1195 ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED); 1187 ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
1196 } 1188 }
1197 1189
1198 txq->tx_head = (tx + 1) % MWL8K_TX_DESCS; 1190 txq->head = (tx + 1) % MWL8K_TX_DESCS;
1199 BUG_ON(txq->tx_stats.len == 0); 1191 BUG_ON(txq->stats.len == 0);
1200 txq->tx_stats.len--; 1192 txq->stats.len--;
1201 priv->pending_tx_pkts--; 1193 priv->pending_tx_pkts--;
1202 1194
1203 addr = le32_to_cpu(tx_desc->pkt_phys_addr); 1195 addr = le32_to_cpu(tx_desc->pkt_phys_addr);
1204 size = le16_to_cpu(tx_desc->pkt_len); 1196 size = le16_to_cpu(tx_desc->pkt_len);
1205 skb = txq->tx_skb[tx]; 1197 skb = txq->skb[tx];
1206 txq->tx_skb[tx] = NULL; 1198 txq->skb[tx] = NULL;
1207 1199
1208 BUG_ON(skb == NULL); 1200 BUG_ON(skb == NULL);
1209 pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); 1201 pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
@@ -1236,13 +1228,13 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1236 1228
1237 mwl8k_txq_reclaim(hw, index, 1); 1229 mwl8k_txq_reclaim(hw, index, 1);
1238 1230
1239 kfree(txq->tx_skb); 1231 kfree(txq->skb);
1240 txq->tx_skb = NULL; 1232 txq->skb = NULL;
1241 1233
1242 pci_free_consistent(priv->pdev, 1234 pci_free_consistent(priv->pdev,
1243 MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), 1235 MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
1244 txq->tx_desc_area, txq->tx_desc_dma); 1236 txq->txd, txq->txd_dma);
1245 txq->tx_desc_area = NULL; 1237 txq->txd = NULL;
1246} 1238}
1247 1239
1248static int 1240static int
@@ -1319,10 +1311,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1319 1311
1320 txq = priv->txq + index; 1312 txq = priv->txq + index;
1321 1313
1322 BUG_ON(txq->tx_skb[txq->tx_tail] != NULL); 1314 BUG_ON(txq->skb[txq->tail] != NULL);
1323 txq->tx_skb[txq->tx_tail] = skb; 1315 txq->skb[txq->tail] = skb;
1324 1316
1325 tx = txq->tx_desc_area + txq->tx_tail; 1317 tx = txq->txd + txq->tail;
1326 tx->data_rate = txdatarate; 1318 tx->data_rate = txdatarate;
1327 tx->tx_priority = index; 1319 tx->tx_priority = index;
1328 tx->qos_control = cpu_to_le16(qos); 1320 tx->qos_control = cpu_to_le16(qos);
@@ -1333,15 +1325,15 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1333 wmb(); 1325 wmb();
1334 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 1326 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1335 1327
1336 txq->tx_stats.count++; 1328 txq->stats.count++;
1337 txq->tx_stats.len++; 1329 txq->stats.len++;
1338 priv->pending_tx_pkts++; 1330 priv->pending_tx_pkts++;
1339 1331
1340 txq->tx_tail++; 1332 txq->tail++;
1341 if (txq->tx_tail == MWL8K_TX_DESCS) 1333 if (txq->tail == MWL8K_TX_DESCS)
1342 txq->tx_tail = 0; 1334 txq->tail = 0;
1343 1335
1344 if (txq->tx_head == txq->tx_tail) 1336 if (txq->head == txq->tail)
1345 ieee80211_stop_queue(hw, index); 1337 ieee80211_stop_queue(hw, index);
1346 1338
1347 mwl8k_tx_start(priv); 1339 mwl8k_tx_start(priv);
@@ -1492,7 +1484,7 @@ struct mwl8k_cmd_get_hw_spec {
1492 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 1484 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
1493 __le32 caps2; 1485 __le32 caps2;
1494 __le32 num_tx_desc_per_queue; 1486 __le32 num_tx_desc_per_queue;
1495 __le32 total_rx_desc; 1487 __le32 total_rxd;
1496} __attribute__((packed)); 1488} __attribute__((packed));
1497 1489
1498static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw) 1490static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
@@ -1511,12 +1503,12 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
1511 1503
1512 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); 1504 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1513 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 1505 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1514 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); 1506 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1515 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 1507 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1516 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1508 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1517 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); 1509 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1518 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 1510 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1519 cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS); 1511 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1520 1512
1521 rc = mwl8k_post_cmd(hw, &cmd->header); 1513 rc = mwl8k_post_cmd(hw, &cmd->header);
1522 1514
@@ -2888,7 +2880,7 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
2888 spin_lock_bh(&priv->tx_lock); 2880 spin_lock_bh(&priv->tx_lock);
2889 for (index = 0; index < MWL8K_TX_QUEUES; index++) { 2881 for (index = 0; index < MWL8K_TX_QUEUES; index++) {
2890 txq = priv->txq + index; 2882 txq = priv->txq + index;
2891 memcpy(&stats[index], &txq->tx_stats, 2883 memcpy(&stats[index], &txq->stats,
2892 sizeof(struct ieee80211_tx_queue_stats)); 2884 sizeof(struct ieee80211_tx_queue_stats));
2893 } 2885 }
2894 spin_unlock_bh(&priv->tx_lock); 2886 spin_unlock_bh(&priv->tx_lock);