diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath5k/base.c')
-rw-r--r-- | drivers/net/wireless/ath/ath5k/base.c | 3110 |
1 files changed, 3110 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c new file mode 100644 index 000000000000..ff6d4f839734 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -0,0 +1,3110 @@ | |||
1 | /*- | ||
2 | * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting | ||
3 | * Copyright (c) 2004-2005 Atheros Communications, Inc. | ||
4 | * Copyright (c) 2006 Devicescape Software, Inc. | ||
5 | * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> | ||
6 | * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or without | ||
11 | * modification, are permitted provided that the following conditions | ||
12 | * are met: | ||
13 | * 1. Redistributions of source code must retain the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer, | ||
15 | * without modification. | ||
16 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
17 | * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any | ||
18 | * redistribution must be conditioned upon including a substantially | ||
19 | * similar Disclaimer requirement for further binary redistribution. | ||
20 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
21 | * of any contributors may be used to endorse or promote products derived | ||
22 | * from this software without specific prior written permission. | ||
23 | * | ||
24 | * Alternatively, this software may be distributed under the terms of the | ||
25 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
26 | * Software Foundation. | ||
27 | * | ||
28 | * NO WARRANTY | ||
29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
30 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
31 | * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY | ||
32 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL | ||
33 | * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, | ||
34 | * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
36 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | ||
39 | * THE POSSIBILITY OF SUCH DAMAGES. | ||
40 | * | ||
41 | */ | ||
42 | |||
43 | #include <linux/module.h> | ||
44 | #include <linux/delay.h> | ||
45 | #include <linux/hardirq.h> | ||
46 | #include <linux/if.h> | ||
47 | #include <linux/io.h> | ||
48 | #include <linux/netdevice.h> | ||
49 | #include <linux/cache.h> | ||
50 | #include <linux/pci.h> | ||
51 | #include <linux/ethtool.h> | ||
52 | #include <linux/uaccess.h> | ||
53 | |||
54 | #include <net/ieee80211_radiotap.h> | ||
55 | |||
56 | #include <asm/unaligned.h> | ||
57 | |||
58 | #include "base.h" | ||
59 | #include "reg.h" | ||
60 | #include "debug.h" | ||
61 | |||
62 | static int ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */ | ||
63 | static int modparam_nohwcrypt; | ||
64 | module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); | ||
65 | MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); | ||
66 | |||
67 | static int modparam_all_channels; | ||
68 | module_param_named(all_channels, modparam_all_channels, int, 0444); | ||
69 | MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); | ||
70 | |||
71 | |||
72 | /******************\ | ||
73 | * Internal defines * | ||
74 | \******************/ | ||
75 | |||
76 | /* Module info */ | ||
77 | MODULE_AUTHOR("Jiri Slaby"); | ||
78 | MODULE_AUTHOR("Nick Kossifidis"); | ||
79 | MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); | ||
80 | MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); | ||
81 | MODULE_LICENSE("Dual BSD/GPL"); | ||
82 | MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); | ||
83 | |||
84 | |||
85 | /* Known PCI ids */ | ||
86 | static const struct pci_device_id ath5k_pci_id_table[] = { | ||
87 | { PCI_VDEVICE(ATHEROS, 0x0207), .driver_data = AR5K_AR5210 }, /* 5210 early */ | ||
88 | { PCI_VDEVICE(ATHEROS, 0x0007), .driver_data = AR5K_AR5210 }, /* 5210 */ | ||
89 | { PCI_VDEVICE(ATHEROS, 0x0011), .driver_data = AR5K_AR5211 }, /* 5311 - this is on AHB bus !*/ | ||
90 | { PCI_VDEVICE(ATHEROS, 0x0012), .driver_data = AR5K_AR5211 }, /* 5211 */ | ||
91 | { PCI_VDEVICE(ATHEROS, 0x0013), .driver_data = AR5K_AR5212 }, /* 5212 */ | ||
92 | { PCI_VDEVICE(3COM_2, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 5212 */ | ||
93 | { PCI_VDEVICE(3COM, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 3CRDAG675 5212 */ | ||
94 | { PCI_VDEVICE(ATHEROS, 0x1014), .driver_data = AR5K_AR5212 }, /* IBM minipci 5212 */ | ||
95 | { PCI_VDEVICE(ATHEROS, 0x0014), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
96 | { PCI_VDEVICE(ATHEROS, 0x0015), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
97 | { PCI_VDEVICE(ATHEROS, 0x0016), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
98 | { PCI_VDEVICE(ATHEROS, 0x0017), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
99 | { PCI_VDEVICE(ATHEROS, 0x0018), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
100 | { PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
101 | { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ | ||
102 | { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ | ||
103 | { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* PCI-E cards */ | ||
104 | { PCI_VDEVICE(ATHEROS, 0x001d), .driver_data = AR5K_AR5212 }, /* 2417 Nala */ | ||
105 | { 0 } | ||
106 | }; | ||
107 | MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); | ||
108 | |||
109 | /* Known SREVs */ | ||
110 | static const struct ath5k_srev_name srev_names[] = { | ||
111 | { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, | ||
112 | { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, | ||
113 | { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, | ||
114 | { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, | ||
115 | { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, | ||
116 | { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, | ||
117 | { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, | ||
118 | { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, | ||
119 | { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, | ||
120 | { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, | ||
121 | { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, | ||
122 | { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, | ||
123 | { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, | ||
124 | { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, | ||
125 | { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, | ||
126 | { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, | ||
127 | { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, | ||
128 | { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, | ||
129 | { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, | ||
130 | { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, | ||
131 | { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, | ||
132 | { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, | ||
133 | { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, | ||
134 | { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, | ||
135 | { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, | ||
136 | { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, | ||
137 | { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, | ||
138 | { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, | ||
139 | { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, | ||
140 | { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, | ||
141 | { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, | ||
142 | { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, | ||
143 | { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, | ||
144 | { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, | ||
145 | { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, | ||
146 | { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, | ||
147 | }; | ||
148 | |||
149 | static const struct ieee80211_rate ath5k_rates[] = { | ||
150 | { .bitrate = 10, | ||
151 | .hw_value = ATH5K_RATE_CODE_1M, }, | ||
152 | { .bitrate = 20, | ||
153 | .hw_value = ATH5K_RATE_CODE_2M, | ||
154 | .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, | ||
155 | .flags = IEEE80211_RATE_SHORT_PREAMBLE }, | ||
156 | { .bitrate = 55, | ||
157 | .hw_value = ATH5K_RATE_CODE_5_5M, | ||
158 | .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, | ||
159 | .flags = IEEE80211_RATE_SHORT_PREAMBLE }, | ||
160 | { .bitrate = 110, | ||
161 | .hw_value = ATH5K_RATE_CODE_11M, | ||
162 | .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, | ||
163 | .flags = IEEE80211_RATE_SHORT_PREAMBLE }, | ||
164 | { .bitrate = 60, | ||
165 | .hw_value = ATH5K_RATE_CODE_6M, | ||
166 | .flags = 0 }, | ||
167 | { .bitrate = 90, | ||
168 | .hw_value = ATH5K_RATE_CODE_9M, | ||
169 | .flags = 0 }, | ||
170 | { .bitrate = 120, | ||
171 | .hw_value = ATH5K_RATE_CODE_12M, | ||
172 | .flags = 0 }, | ||
173 | { .bitrate = 180, | ||
174 | .hw_value = ATH5K_RATE_CODE_18M, | ||
175 | .flags = 0 }, | ||
176 | { .bitrate = 240, | ||
177 | .hw_value = ATH5K_RATE_CODE_24M, | ||
178 | .flags = 0 }, | ||
179 | { .bitrate = 360, | ||
180 | .hw_value = ATH5K_RATE_CODE_36M, | ||
181 | .flags = 0 }, | ||
182 | { .bitrate = 480, | ||
183 | .hw_value = ATH5K_RATE_CODE_48M, | ||
184 | .flags = 0 }, | ||
185 | { .bitrate = 540, | ||
186 | .hw_value = ATH5K_RATE_CODE_54M, | ||
187 | .flags = 0 }, | ||
188 | /* XR missing */ | ||
189 | }; | ||
190 | |||
191 | /* | ||
192 | * Prototypes - PCI stack related functions | ||
193 | */ | ||
194 | static int __devinit ath5k_pci_probe(struct pci_dev *pdev, | ||
195 | const struct pci_device_id *id); | ||
196 | static void __devexit ath5k_pci_remove(struct pci_dev *pdev); | ||
197 | #ifdef CONFIG_PM | ||
198 | static int ath5k_pci_suspend(struct pci_dev *pdev, | ||
199 | pm_message_t state); | ||
200 | static int ath5k_pci_resume(struct pci_dev *pdev); | ||
201 | #else | ||
202 | #define ath5k_pci_suspend NULL | ||
203 | #define ath5k_pci_resume NULL | ||
204 | #endif /* CONFIG_PM */ | ||
205 | |||
206 | static struct pci_driver ath5k_pci_driver = { | ||
207 | .name = KBUILD_MODNAME, | ||
208 | .id_table = ath5k_pci_id_table, | ||
209 | .probe = ath5k_pci_probe, | ||
210 | .remove = __devexit_p(ath5k_pci_remove), | ||
211 | .suspend = ath5k_pci_suspend, | ||
212 | .resume = ath5k_pci_resume, | ||
213 | }; | ||
214 | |||
215 | |||
216 | |||
217 | /* | ||
218 | * Prototypes - MAC 802.11 stack related functions | ||
219 | */ | ||
220 | static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); | ||
221 | static int ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel); | ||
222 | static int ath5k_reset_wake(struct ath5k_softc *sc); | ||
223 | static int ath5k_start(struct ieee80211_hw *hw); | ||
224 | static void ath5k_stop(struct ieee80211_hw *hw); | ||
225 | static int ath5k_add_interface(struct ieee80211_hw *hw, | ||
226 | struct ieee80211_if_init_conf *conf); | ||
227 | static void ath5k_remove_interface(struct ieee80211_hw *hw, | ||
228 | struct ieee80211_if_init_conf *conf); | ||
229 | static int ath5k_config(struct ieee80211_hw *hw, u32 changed); | ||
230 | static int ath5k_config_interface(struct ieee80211_hw *hw, | ||
231 | struct ieee80211_vif *vif, | ||
232 | struct ieee80211_if_conf *conf); | ||
233 | static void ath5k_configure_filter(struct ieee80211_hw *hw, | ||
234 | unsigned int changed_flags, | ||
235 | unsigned int *new_flags, | ||
236 | int mc_count, struct dev_mc_list *mclist); | ||
237 | static int ath5k_set_key(struct ieee80211_hw *hw, | ||
238 | enum set_key_cmd cmd, | ||
239 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, | ||
240 | struct ieee80211_key_conf *key); | ||
241 | static int ath5k_get_stats(struct ieee80211_hw *hw, | ||
242 | struct ieee80211_low_level_stats *stats); | ||
243 | static int ath5k_get_tx_stats(struct ieee80211_hw *hw, | ||
244 | struct ieee80211_tx_queue_stats *stats); | ||
245 | static u64 ath5k_get_tsf(struct ieee80211_hw *hw); | ||
246 | static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); | ||
247 | static void ath5k_reset_tsf(struct ieee80211_hw *hw); | ||
248 | static int ath5k_beacon_update(struct ath5k_softc *sc, | ||
249 | struct sk_buff *skb); | ||
250 | static void ath5k_bss_info_changed(struct ieee80211_hw *hw, | ||
251 | struct ieee80211_vif *vif, | ||
252 | struct ieee80211_bss_conf *bss_conf, | ||
253 | u32 changes); | ||
254 | |||
255 | static const struct ieee80211_ops ath5k_hw_ops = { | ||
256 | .tx = ath5k_tx, | ||
257 | .start = ath5k_start, | ||
258 | .stop = ath5k_stop, | ||
259 | .add_interface = ath5k_add_interface, | ||
260 | .remove_interface = ath5k_remove_interface, | ||
261 | .config = ath5k_config, | ||
262 | .config_interface = ath5k_config_interface, | ||
263 | .configure_filter = ath5k_configure_filter, | ||
264 | .set_key = ath5k_set_key, | ||
265 | .get_stats = ath5k_get_stats, | ||
266 | .conf_tx = NULL, | ||
267 | .get_tx_stats = ath5k_get_tx_stats, | ||
268 | .get_tsf = ath5k_get_tsf, | ||
269 | .set_tsf = ath5k_set_tsf, | ||
270 | .reset_tsf = ath5k_reset_tsf, | ||
271 | .bss_info_changed = ath5k_bss_info_changed, | ||
272 | }; | ||
273 | |||
274 | /* | ||
275 | * Prototypes - Internal functions | ||
276 | */ | ||
277 | /* Attach detach */ | ||
278 | static int ath5k_attach(struct pci_dev *pdev, | ||
279 | struct ieee80211_hw *hw); | ||
280 | static void ath5k_detach(struct pci_dev *pdev, | ||
281 | struct ieee80211_hw *hw); | ||
282 | /* Channel/mode setup */ | ||
283 | static inline short ath5k_ieee2mhz(short chan); | ||
284 | static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, | ||
285 | struct ieee80211_channel *channels, | ||
286 | unsigned int mode, | ||
287 | unsigned int max); | ||
288 | static int ath5k_setup_bands(struct ieee80211_hw *hw); | ||
289 | static int ath5k_chan_set(struct ath5k_softc *sc, | ||
290 | struct ieee80211_channel *chan); | ||
291 | static void ath5k_setcurmode(struct ath5k_softc *sc, | ||
292 | unsigned int mode); | ||
293 | static void ath5k_mode_setup(struct ath5k_softc *sc); | ||
294 | |||
295 | /* Descriptor setup */ | ||
296 | static int ath5k_desc_alloc(struct ath5k_softc *sc, | ||
297 | struct pci_dev *pdev); | ||
298 | static void ath5k_desc_free(struct ath5k_softc *sc, | ||
299 | struct pci_dev *pdev); | ||
300 | /* Buffers setup */ | ||
301 | static int ath5k_rxbuf_setup(struct ath5k_softc *sc, | ||
302 | struct ath5k_buf *bf); | ||
303 | static int ath5k_txbuf_setup(struct ath5k_softc *sc, | ||
304 | struct ath5k_buf *bf); | ||
305 | static inline void ath5k_txbuf_free(struct ath5k_softc *sc, | ||
306 | struct ath5k_buf *bf) | ||
307 | { | ||
308 | BUG_ON(!bf); | ||
309 | if (!bf->skb) | ||
310 | return; | ||
311 | pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, | ||
312 | PCI_DMA_TODEVICE); | ||
313 | dev_kfree_skb_any(bf->skb); | ||
314 | bf->skb = NULL; | ||
315 | } | ||
316 | |||
317 | static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, | ||
318 | struct ath5k_buf *bf) | ||
319 | { | ||
320 | BUG_ON(!bf); | ||
321 | if (!bf->skb) | ||
322 | return; | ||
323 | pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, | ||
324 | PCI_DMA_FROMDEVICE); | ||
325 | dev_kfree_skb_any(bf->skb); | ||
326 | bf->skb = NULL; | ||
327 | } | ||
328 | |||
329 | |||
330 | /* Queues setup */ | ||
331 | static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, | ||
332 | int qtype, int subtype); | ||
333 | static int ath5k_beaconq_setup(struct ath5k_hw *ah); | ||
334 | static int ath5k_beaconq_config(struct ath5k_softc *sc); | ||
335 | static void ath5k_txq_drainq(struct ath5k_softc *sc, | ||
336 | struct ath5k_txq *txq); | ||
337 | static void ath5k_txq_cleanup(struct ath5k_softc *sc); | ||
338 | static void ath5k_txq_release(struct ath5k_softc *sc); | ||
339 | /* Rx handling */ | ||
340 | static int ath5k_rx_start(struct ath5k_softc *sc); | ||
341 | static void ath5k_rx_stop(struct ath5k_softc *sc); | ||
342 | static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, | ||
343 | struct ath5k_desc *ds, | ||
344 | struct sk_buff *skb, | ||
345 | struct ath5k_rx_status *rs); | ||
346 | static void ath5k_tasklet_rx(unsigned long data); | ||
347 | /* Tx handling */ | ||
348 | static void ath5k_tx_processq(struct ath5k_softc *sc, | ||
349 | struct ath5k_txq *txq); | ||
350 | static void ath5k_tasklet_tx(unsigned long data); | ||
351 | /* Beacon handling */ | ||
352 | static int ath5k_beacon_setup(struct ath5k_softc *sc, | ||
353 | struct ath5k_buf *bf); | ||
354 | static void ath5k_beacon_send(struct ath5k_softc *sc); | ||
355 | static void ath5k_beacon_config(struct ath5k_softc *sc); | ||
356 | static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); | ||
357 | static void ath5k_tasklet_beacon(unsigned long data); | ||
358 | |||
359 | static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) | ||
360 | { | ||
361 | u64 tsf = ath5k_hw_get_tsf64(ah); | ||
362 | |||
363 | if ((tsf & 0x7fff) < rstamp) | ||
364 | tsf -= 0x8000; | ||
365 | |||
366 | return (tsf & ~0x7fff) | rstamp; | ||
367 | } | ||
368 | |||
369 | /* Interrupt handling */ | ||
370 | static int ath5k_init(struct ath5k_softc *sc); | ||
371 | static int ath5k_stop_locked(struct ath5k_softc *sc); | ||
372 | static int ath5k_stop_hw(struct ath5k_softc *sc); | ||
373 | static irqreturn_t ath5k_intr(int irq, void *dev_id); | ||
374 | static void ath5k_tasklet_reset(unsigned long data); | ||
375 | |||
376 | static void ath5k_calibrate(unsigned long data); | ||
377 | |||
378 | /* | ||
379 | * Module init/exit functions | ||
380 | */ | ||
381 | static int __init | ||
382 | init_ath5k_pci(void) | ||
383 | { | ||
384 | int ret; | ||
385 | |||
386 | ath5k_debug_init(); | ||
387 | |||
388 | ret = pci_register_driver(&ath5k_pci_driver); | ||
389 | if (ret) { | ||
390 | printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | static void __exit | ||
398 | exit_ath5k_pci(void) | ||
399 | { | ||
400 | pci_unregister_driver(&ath5k_pci_driver); | ||
401 | |||
402 | ath5k_debug_finish(); | ||
403 | } | ||
404 | |||
405 | module_init(init_ath5k_pci); | ||
406 | module_exit(exit_ath5k_pci); | ||
407 | |||
408 | |||
409 | /********************\ | ||
410 | * PCI Initialization * | ||
411 | \********************/ | ||
412 | |||
413 | static const char * | ||
414 | ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) | ||
415 | { | ||
416 | const char *name = "xxxxx"; | ||
417 | unsigned int i; | ||
418 | |||
419 | for (i = 0; i < ARRAY_SIZE(srev_names); i++) { | ||
420 | if (srev_names[i].sr_type != type) | ||
421 | continue; | ||
422 | |||
423 | if ((val & 0xf0) == srev_names[i].sr_val) | ||
424 | name = srev_names[i].sr_name; | ||
425 | |||
426 | if ((val & 0xff) == srev_names[i].sr_val) { | ||
427 | name = srev_names[i].sr_name; | ||
428 | break; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | return name; | ||
433 | } | ||
434 | |||
435 | static int __devinit | ||
436 | ath5k_pci_probe(struct pci_dev *pdev, | ||
437 | const struct pci_device_id *id) | ||
438 | { | ||
439 | void __iomem *mem; | ||
440 | struct ath5k_softc *sc; | ||
441 | struct ieee80211_hw *hw; | ||
442 | int ret; | ||
443 | u8 csz; | ||
444 | |||
445 | ret = pci_enable_device(pdev); | ||
446 | if (ret) { | ||
447 | dev_err(&pdev->dev, "can't enable device\n"); | ||
448 | goto err; | ||
449 | } | ||
450 | |||
451 | /* XXX 32-bit addressing only */ | ||
452 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
453 | if (ret) { | ||
454 | dev_err(&pdev->dev, "32-bit DMA not available\n"); | ||
455 | goto err_dis; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * Cache line size is used to size and align various | ||
460 | * structures used to communicate with the hardware. | ||
461 | */ | ||
462 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); | ||
463 | if (csz == 0) { | ||
464 | /* | ||
465 | * Linux 2.4.18 (at least) writes the cache line size | ||
466 | * register as a 16-bit wide register which is wrong. | ||
467 | * We must have this setup properly for rx buffer | ||
468 | * DMA to work so force a reasonable value here if it | ||
469 | * comes up zero. | ||
470 | */ | ||
471 | csz = L1_CACHE_BYTES / sizeof(u32); | ||
472 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); | ||
473 | } | ||
474 | /* | ||
475 | * The default setting of latency timer yields poor results, | ||
476 | * set it to the value used by other systems. It may be worth | ||
477 | * tweaking this setting more. | ||
478 | */ | ||
479 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); | ||
480 | |||
481 | /* Enable bus mastering */ | ||
482 | pci_set_master(pdev); | ||
483 | |||
484 | /* | ||
485 | * Disable the RETRY_TIMEOUT register (0x41) to keep | ||
486 | * PCI Tx retries from interfering with C3 CPU state. | ||
487 | */ | ||
488 | pci_write_config_byte(pdev, 0x41, 0); | ||
489 | |||
490 | ret = pci_request_region(pdev, 0, "ath5k"); | ||
491 | if (ret) { | ||
492 | dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); | ||
493 | goto err_dis; | ||
494 | } | ||
495 | |||
496 | mem = pci_iomap(pdev, 0, 0); | ||
497 | if (!mem) { | ||
498 | dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; | ||
499 | ret = -EIO; | ||
500 | goto err_reg; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Allocate hw (mac80211 main struct) | ||
505 | * and hw->priv (driver private data) | ||
506 | */ | ||
507 | hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops); | ||
508 | if (hw == NULL) { | ||
509 | dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); | ||
510 | ret = -ENOMEM; | ||
511 | goto err_map; | ||
512 | } | ||
513 | |||
514 | dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy)); | ||
515 | |||
516 | /* Initialize driver private data */ | ||
517 | SET_IEEE80211_DEV(hw, &pdev->dev); | ||
518 | hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | | ||
519 | IEEE80211_HW_SIGNAL_DBM | | ||
520 | IEEE80211_HW_NOISE_DBM; | ||
521 | |||
522 | hw->wiphy->interface_modes = | ||
523 | BIT(NL80211_IFTYPE_STATION) | | ||
524 | BIT(NL80211_IFTYPE_ADHOC) | | ||
525 | BIT(NL80211_IFTYPE_MESH_POINT); | ||
526 | |||
527 | hw->extra_tx_headroom = 2; | ||
528 | hw->channel_change_time = 5000; | ||
529 | sc = hw->priv; | ||
530 | sc->hw = hw; | ||
531 | sc->pdev = pdev; | ||
532 | |||
533 | ath5k_debug_init_device(sc); | ||
534 | |||
535 | /* | ||
536 | * Mark the device as detached to avoid processing | ||
537 | * interrupts until setup is complete. | ||
538 | */ | ||
539 | __set_bit(ATH_STAT_INVALID, sc->status); | ||
540 | |||
541 | sc->iobase = mem; /* So we can unmap it on detach */ | ||
542 | sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ | ||
543 | sc->opmode = NL80211_IFTYPE_STATION; | ||
544 | mutex_init(&sc->lock); | ||
545 | spin_lock_init(&sc->rxbuflock); | ||
546 | spin_lock_init(&sc->txbuflock); | ||
547 | spin_lock_init(&sc->block); | ||
548 | |||
549 | /* Set private data */ | ||
550 | pci_set_drvdata(pdev, hw); | ||
551 | |||
552 | /* Setup interrupt handler */ | ||
553 | ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); | ||
554 | if (ret) { | ||
555 | ATH5K_ERR(sc, "request_irq failed\n"); | ||
556 | goto err_free; | ||
557 | } | ||
558 | |||
559 | /* Initialize device */ | ||
560 | sc->ah = ath5k_hw_attach(sc, id->driver_data); | ||
561 | if (IS_ERR(sc->ah)) { | ||
562 | ret = PTR_ERR(sc->ah); | ||
563 | goto err_irq; | ||
564 | } | ||
565 | |||
566 | /* set up multi-rate retry capabilities */ | ||
567 | if (sc->ah->ah_version == AR5K_AR5212) { | ||
568 | hw->max_rates = 4; | ||
569 | hw->max_rate_tries = 11; | ||
570 | } | ||
571 | |||
572 | /* Finish private driver data initialization */ | ||
573 | ret = ath5k_attach(pdev, hw); | ||
574 | if (ret) | ||
575 | goto err_ah; | ||
576 | |||
577 | ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", | ||
578 | ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev), | ||
579 | sc->ah->ah_mac_srev, | ||
580 | sc->ah->ah_phy_revision); | ||
581 | |||
582 | if (!sc->ah->ah_single_chip) { | ||
583 | /* Single chip radio (!RF5111) */ | ||
584 | if (sc->ah->ah_radio_5ghz_revision && | ||
585 | !sc->ah->ah_radio_2ghz_revision) { | ||
586 | /* No 5GHz support -> report 2GHz radio */ | ||
587 | if (!test_bit(AR5K_MODE_11A, | ||
588 | sc->ah->ah_capabilities.cap_mode)) { | ||
589 | ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", | ||
590 | ath5k_chip_name(AR5K_VERSION_RAD, | ||
591 | sc->ah->ah_radio_5ghz_revision), | ||
592 | sc->ah->ah_radio_5ghz_revision); | ||
593 | /* No 2GHz support (5110 and some | ||
594 | * 5Ghz only cards) -> report 5Ghz radio */ | ||
595 | } else if (!test_bit(AR5K_MODE_11B, | ||
596 | sc->ah->ah_capabilities.cap_mode)) { | ||
597 | ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", | ||
598 | ath5k_chip_name(AR5K_VERSION_RAD, | ||
599 | sc->ah->ah_radio_5ghz_revision), | ||
600 | sc->ah->ah_radio_5ghz_revision); | ||
601 | /* Multiband radio */ | ||
602 | } else { | ||
603 | ATH5K_INFO(sc, "RF%s multiband radio found" | ||
604 | " (0x%x)\n", | ||
605 | ath5k_chip_name(AR5K_VERSION_RAD, | ||
606 | sc->ah->ah_radio_5ghz_revision), | ||
607 | sc->ah->ah_radio_5ghz_revision); | ||
608 | } | ||
609 | } | ||
610 | /* Multi chip radio (RF5111 - RF2111) -> | ||
611 | * report both 2GHz/5GHz radios */ | ||
612 | else if (sc->ah->ah_radio_5ghz_revision && | ||
613 | sc->ah->ah_radio_2ghz_revision){ | ||
614 | ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", | ||
615 | ath5k_chip_name(AR5K_VERSION_RAD, | ||
616 | sc->ah->ah_radio_5ghz_revision), | ||
617 | sc->ah->ah_radio_5ghz_revision); | ||
618 | ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", | ||
619 | ath5k_chip_name(AR5K_VERSION_RAD, | ||
620 | sc->ah->ah_radio_2ghz_revision), | ||
621 | sc->ah->ah_radio_2ghz_revision); | ||
622 | } | ||
623 | } | ||
624 | |||
625 | |||
626 | /* ready to process interrupts */ | ||
627 | __clear_bit(ATH_STAT_INVALID, sc->status); | ||
628 | |||
629 | return 0; | ||
630 | err_ah: | ||
631 | ath5k_hw_detach(sc->ah); | ||
632 | err_irq: | ||
633 | free_irq(pdev->irq, sc); | ||
634 | err_free: | ||
635 | ieee80211_free_hw(hw); | ||
636 | err_map: | ||
637 | pci_iounmap(pdev, mem); | ||
638 | err_reg: | ||
639 | pci_release_region(pdev, 0); | ||
640 | err_dis: | ||
641 | pci_disable_device(pdev); | ||
642 | err: | ||
643 | return ret; | ||
644 | } | ||
645 | |||
646 | static void __devexit | ||
647 | ath5k_pci_remove(struct pci_dev *pdev) | ||
648 | { | ||
649 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
650 | struct ath5k_softc *sc = hw->priv; | ||
651 | |||
652 | ath5k_debug_finish_device(sc); | ||
653 | ath5k_detach(pdev, hw); | ||
654 | ath5k_hw_detach(sc->ah); | ||
655 | free_irq(pdev->irq, sc); | ||
656 | pci_iounmap(pdev, sc->iobase); | ||
657 | pci_release_region(pdev, 0); | ||
658 | pci_disable_device(pdev); | ||
659 | ieee80211_free_hw(hw); | ||
660 | } | ||
661 | |||
662 | #ifdef CONFIG_PM | ||
663 | static int | ||
664 | ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
665 | { | ||
666 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
667 | struct ath5k_softc *sc = hw->priv; | ||
668 | |||
669 | ath5k_led_off(sc); | ||
670 | |||
671 | free_irq(pdev->irq, sc); | ||
672 | pci_save_state(pdev); | ||
673 | pci_disable_device(pdev); | ||
674 | pci_set_power_state(pdev, PCI_D3hot); | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | static int | ||
680 | ath5k_pci_resume(struct pci_dev *pdev) | ||
681 | { | ||
682 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
683 | struct ath5k_softc *sc = hw->priv; | ||
684 | int err; | ||
685 | |||
686 | pci_restore_state(pdev); | ||
687 | |||
688 | err = pci_enable_device(pdev); | ||
689 | if (err) | ||
690 | return err; | ||
691 | |||
692 | err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); | ||
693 | if (err) { | ||
694 | ATH5K_ERR(sc, "request_irq failed\n"); | ||
695 | goto err_no_irq; | ||
696 | } | ||
697 | |||
698 | ath5k_led_enable(sc); | ||
699 | return 0; | ||
700 | |||
701 | err_no_irq: | ||
702 | pci_disable_device(pdev); | ||
703 | return err; | ||
704 | } | ||
705 | #endif /* CONFIG_PM */ | ||
706 | |||
707 | |||
708 | /***********************\ | ||
709 | * Driver Initialization * | ||
710 | \***********************/ | ||
711 | |||
712 | static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) | ||
713 | { | ||
714 | struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); | ||
715 | struct ath5k_softc *sc = hw->priv; | ||
716 | struct ath_regulatory *reg = &sc->ah->ah_regulatory; | ||
717 | |||
718 | return ath_reg_notifier_apply(wiphy, request, reg); | ||
719 | } | ||
720 | |||
721 | static int | ||
722 | ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) | ||
723 | { | ||
724 | struct ath5k_softc *sc = hw->priv; | ||
725 | struct ath5k_hw *ah = sc->ah; | ||
726 | u8 mac[ETH_ALEN] = {}; | ||
727 | int ret; | ||
728 | |||
729 | ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); | ||
730 | |||
731 | /* | ||
732 | * Check if the MAC has multi-rate retry support. | ||
733 | * We do this by trying to setup a fake extended | ||
734 | * descriptor. MAC's that don't have support will | ||
735 | * return false w/o doing anything. MAC's that do | ||
736 | * support it will return true w/o doing anything. | ||
737 | */ | ||
738 | ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); | ||
739 | if (ret < 0) | ||
740 | goto err; | ||
741 | if (ret > 0) | ||
742 | __set_bit(ATH_STAT_MRRETRY, sc->status); | ||
743 | |||
744 | /* | ||
745 | * Collect the channel list. The 802.11 layer | ||
746 | * is resposible for filtering this list based | ||
747 | * on settings like the phy mode and regulatory | ||
748 | * domain restrictions. | ||
749 | */ | ||
750 | ret = ath5k_setup_bands(hw); | ||
751 | if (ret) { | ||
752 | ATH5K_ERR(sc, "can't get channels\n"); | ||
753 | goto err; | ||
754 | } | ||
755 | |||
756 | /* NB: setup here so ath5k_rate_update is happy */ | ||
757 | if (test_bit(AR5K_MODE_11A, ah->ah_modes)) | ||
758 | ath5k_setcurmode(sc, AR5K_MODE_11A); | ||
759 | else | ||
760 | ath5k_setcurmode(sc, AR5K_MODE_11B); | ||
761 | |||
762 | /* | ||
763 | * Allocate tx+rx descriptors and populate the lists. | ||
764 | */ | ||
765 | ret = ath5k_desc_alloc(sc, pdev); | ||
766 | if (ret) { | ||
767 | ATH5K_ERR(sc, "can't allocate descriptors\n"); | ||
768 | goto err; | ||
769 | } | ||
770 | |||
771 | /* | ||
772 | * Allocate hardware transmit queues: one queue for | ||
773 | * beacon frames and one data queue for each QoS | ||
774 | * priority. Note that hw functions handle reseting | ||
775 | * these queues at the needed time. | ||
776 | */ | ||
777 | ret = ath5k_beaconq_setup(ah); | ||
778 | if (ret < 0) { | ||
779 | ATH5K_ERR(sc, "can't setup a beacon xmit queue\n"); | ||
780 | goto err_desc; | ||
781 | } | ||
782 | sc->bhalq = ret; | ||
783 | |||
784 | sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); | ||
785 | if (IS_ERR(sc->txq)) { | ||
786 | ATH5K_ERR(sc, "can't setup xmit queue\n"); | ||
787 | ret = PTR_ERR(sc->txq); | ||
788 | goto err_bhal; | ||
789 | } | ||
790 | |||
791 | tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); | ||
792 | tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); | ||
793 | tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); | ||
794 | tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); | ||
795 | setup_timer(&sc->calib_tim, ath5k_calibrate, (unsigned long)sc); | ||
796 | |||
797 | ret = ath5k_eeprom_read_mac(ah, mac); | ||
798 | if (ret) { | ||
799 | ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", | ||
800 | sc->pdev->device); | ||
801 | goto err_queues; | ||
802 | } | ||
803 | |||
804 | SET_IEEE80211_PERM_ADDR(hw, mac); | ||
805 | /* All MAC address bits matter for ACKs */ | ||
806 | memset(sc->bssidmask, 0xff, ETH_ALEN); | ||
807 | ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); | ||
808 | |||
809 | ah->ah_regulatory.current_rd = | ||
810 | ah->ah_capabilities.cap_eeprom.ee_regdomain; | ||
811 | ret = ath_regd_init(&ah->ah_regulatory, hw->wiphy, ath5k_reg_notifier); | ||
812 | if (ret) { | ||
813 | ATH5K_ERR(sc, "can't initialize regulatory system\n"); | ||
814 | goto err_queues; | ||
815 | } | ||
816 | |||
817 | ret = ieee80211_register_hw(hw); | ||
818 | if (ret) { | ||
819 | ATH5K_ERR(sc, "can't register ieee80211 hw\n"); | ||
820 | goto err_queues; | ||
821 | } | ||
822 | |||
823 | if (!ath_is_world_regd(&sc->ah->ah_regulatory)) | ||
824 | regulatory_hint(hw->wiphy, sc->ah->ah_regulatory.alpha2); | ||
825 | |||
826 | ath5k_init_leds(sc); | ||
827 | |||
828 | return 0; | ||
829 | err_queues: | ||
830 | ath5k_txq_release(sc); | ||
831 | err_bhal: | ||
832 | ath5k_hw_release_tx_queue(ah, sc->bhalq); | ||
833 | err_desc: | ||
834 | ath5k_desc_free(sc, pdev); | ||
835 | err: | ||
836 | return ret; | ||
837 | } | ||
838 | |||
839 | static void | ||
840 | ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) | ||
841 | { | ||
842 | struct ath5k_softc *sc = hw->priv; | ||
843 | |||
844 | /* | ||
845 | * NB: the order of these is important: | ||
846 | * o call the 802.11 layer before detaching ath5k_hw to | ||
847 | * insure callbacks into the driver to delete global | ||
848 | * key cache entries can be handled | ||
849 | * o reclaim the tx queue data structures after calling | ||
850 | * the 802.11 layer as we'll get called back to reclaim | ||
851 | * node state and potentially want to use them | ||
852 | * o to cleanup the tx queues the hal is called, so detach | ||
853 | * it last | ||
854 | * XXX: ??? detach ath5k_hw ??? | ||
855 | * Other than that, it's straightforward... | ||
856 | */ | ||
857 | ieee80211_unregister_hw(hw); | ||
858 | ath5k_desc_free(sc, pdev); | ||
859 | ath5k_txq_release(sc); | ||
860 | ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); | ||
861 | ath5k_unregister_leds(sc); | ||
862 | |||
863 | /* | ||
864 | * NB: can't reclaim these until after ieee80211_ifdetach | ||
865 | * returns because we'll get called back to reclaim node | ||
866 | * state and potentially want to use them. | ||
867 | */ | ||
868 | } | ||
869 | |||
870 | |||
871 | |||
872 | |||
873 | /********************\ | ||
874 | * Channel/mode setup * | ||
875 | \********************/ | ||
876 | |||
877 | /* | ||
878 | * Convert IEEE channel number to MHz frequency. | ||
879 | */ | ||
880 | static inline short | ||
881 | ath5k_ieee2mhz(short chan) | ||
882 | { | ||
883 | if (chan <= 14 || chan >= 27) | ||
884 | return ieee80211chan2mhz(chan); | ||
885 | else | ||
886 | return 2212 + chan * 20; | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * Returns true for the channel numbers used without all_channels modparam. | ||
891 | */ | ||
892 | static bool ath5k_is_standard_channel(short chan) | ||
893 | { | ||
894 | return ((chan <= 14) || | ||
895 | /* UNII 1,2 */ | ||
896 | ((chan & 3) == 0 && chan >= 36 && chan <= 64) || | ||
897 | /* midband */ | ||
898 | ((chan & 3) == 0 && chan >= 100 && chan <= 140) || | ||
899 | /* UNII-3 */ | ||
900 | ((chan & 3) == 1 && chan >= 149 && chan <= 165)); | ||
901 | } | ||
902 | |||
903 | static unsigned int | ||
904 | ath5k_copy_channels(struct ath5k_hw *ah, | ||
905 | struct ieee80211_channel *channels, | ||
906 | unsigned int mode, | ||
907 | unsigned int max) | ||
908 | { | ||
909 | unsigned int i, count, size, chfreq, freq, ch; | ||
910 | |||
911 | if (!test_bit(mode, ah->ah_modes)) | ||
912 | return 0; | ||
913 | |||
914 | switch (mode) { | ||
915 | case AR5K_MODE_11A: | ||
916 | case AR5K_MODE_11A_TURBO: | ||
917 | /* 1..220, but 2GHz frequencies are filtered by check_channel */ | ||
918 | size = 220 ; | ||
919 | chfreq = CHANNEL_5GHZ; | ||
920 | break; | ||
921 | case AR5K_MODE_11B: | ||
922 | case AR5K_MODE_11G: | ||
923 | case AR5K_MODE_11G_TURBO: | ||
924 | size = 26; | ||
925 | chfreq = CHANNEL_2GHZ; | ||
926 | break; | ||
927 | default: | ||
928 | ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | for (i = 0, count = 0; i < size && max > 0; i++) { | ||
933 | ch = i + 1 ; | ||
934 | freq = ath5k_ieee2mhz(ch); | ||
935 | |||
936 | /* Check if channel is supported by the chipset */ | ||
937 | if (!ath5k_channel_ok(ah, freq, chfreq)) | ||
938 | continue; | ||
939 | |||
940 | if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) | ||
941 | continue; | ||
942 | |||
943 | /* Write channel info and increment counter */ | ||
944 | channels[count].center_freq = freq; | ||
945 | channels[count].band = (chfreq == CHANNEL_2GHZ) ? | ||
946 | IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; | ||
947 | switch (mode) { | ||
948 | case AR5K_MODE_11A: | ||
949 | case AR5K_MODE_11G: | ||
950 | channels[count].hw_value = chfreq | CHANNEL_OFDM; | ||
951 | break; | ||
952 | case AR5K_MODE_11A_TURBO: | ||
953 | case AR5K_MODE_11G_TURBO: | ||
954 | channels[count].hw_value = chfreq | | ||
955 | CHANNEL_OFDM | CHANNEL_TURBO; | ||
956 | break; | ||
957 | case AR5K_MODE_11B: | ||
958 | channels[count].hw_value = CHANNEL_B; | ||
959 | } | ||
960 | |||
961 | count++; | ||
962 | max--; | ||
963 | } | ||
964 | |||
965 | return count; | ||
966 | } | ||
967 | |||
968 | static void | ||
969 | ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b) | ||
970 | { | ||
971 | u8 i; | ||
972 | |||
973 | for (i = 0; i < AR5K_MAX_RATES; i++) | ||
974 | sc->rate_idx[b->band][i] = -1; | ||
975 | |||
976 | for (i = 0; i < b->n_bitrates; i++) { | ||
977 | sc->rate_idx[b->band][b->bitrates[i].hw_value] = i; | ||
978 | if (b->bitrates[i].hw_value_short) | ||
979 | sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; | ||
980 | } | ||
981 | } | ||
982 | |||
983 | static int | ||
984 | ath5k_setup_bands(struct ieee80211_hw *hw) | ||
985 | { | ||
986 | struct ath5k_softc *sc = hw->priv; | ||
987 | struct ath5k_hw *ah = sc->ah; | ||
988 | struct ieee80211_supported_band *sband; | ||
989 | int max_c, count_c = 0; | ||
990 | int i; | ||
991 | |||
992 | BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS); | ||
993 | max_c = ARRAY_SIZE(sc->channels); | ||
994 | |||
995 | /* 2GHz band */ | ||
996 | sband = &sc->sbands[IEEE80211_BAND_2GHZ]; | ||
997 | sband->band = IEEE80211_BAND_2GHZ; | ||
998 | sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0]; | ||
999 | |||
1000 | if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) { | ||
1001 | /* G mode */ | ||
1002 | memcpy(sband->bitrates, &ath5k_rates[0], | ||
1003 | sizeof(struct ieee80211_rate) * 12); | ||
1004 | sband->n_bitrates = 12; | ||
1005 | |||
1006 | sband->channels = sc->channels; | ||
1007 | sband->n_channels = ath5k_copy_channels(ah, sband->channels, | ||
1008 | AR5K_MODE_11G, max_c); | ||
1009 | |||
1010 | hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; | ||
1011 | count_c = sband->n_channels; | ||
1012 | max_c -= count_c; | ||
1013 | } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) { | ||
1014 | /* B mode */ | ||
1015 | memcpy(sband->bitrates, &ath5k_rates[0], | ||
1016 | sizeof(struct ieee80211_rate) * 4); | ||
1017 | sband->n_bitrates = 4; | ||
1018 | |||
1019 | /* 5211 only supports B rates and uses 4bit rate codes | ||
1020 | * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) | ||
1021 | * fix them up here: | ||
1022 | */ | ||
1023 | if (ah->ah_version == AR5K_AR5211) { | ||
1024 | for (i = 0; i < 4; i++) { | ||
1025 | sband->bitrates[i].hw_value = | ||
1026 | sband->bitrates[i].hw_value & 0xF; | ||
1027 | sband->bitrates[i].hw_value_short = | ||
1028 | sband->bitrates[i].hw_value_short & 0xF; | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | sband->channels = sc->channels; | ||
1033 | sband->n_channels = ath5k_copy_channels(ah, sband->channels, | ||
1034 | AR5K_MODE_11B, max_c); | ||
1035 | |||
1036 | hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; | ||
1037 | count_c = sband->n_channels; | ||
1038 | max_c -= count_c; | ||
1039 | } | ||
1040 | ath5k_setup_rate_idx(sc, sband); | ||
1041 | |||
1042 | /* 5GHz band, A mode */ | ||
1043 | if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { | ||
1044 | sband = &sc->sbands[IEEE80211_BAND_5GHZ]; | ||
1045 | sband->band = IEEE80211_BAND_5GHZ; | ||
1046 | sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0]; | ||
1047 | |||
1048 | memcpy(sband->bitrates, &ath5k_rates[4], | ||
1049 | sizeof(struct ieee80211_rate) * 8); | ||
1050 | sband->n_bitrates = 8; | ||
1051 | |||
1052 | sband->channels = &sc->channels[count_c]; | ||
1053 | sband->n_channels = ath5k_copy_channels(ah, sband->channels, | ||
1054 | AR5K_MODE_11A, max_c); | ||
1055 | |||
1056 | hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; | ||
1057 | } | ||
1058 | ath5k_setup_rate_idx(sc, sband); | ||
1059 | |||
1060 | ath5k_debug_dump_bands(sc); | ||
1061 | |||
1062 | return 0; | ||
1063 | } | ||
1064 | |||
1065 | /* | ||
1066 | * Set/change channels. If the channel is really being changed, | ||
1067 | * it's done by reseting the chip. To accomplish this we must | ||
1068 | * first cleanup any pending DMA, then restart stuff after a la | ||
1069 | * ath5k_init. | ||
1070 | * | ||
1071 | * Called with sc->lock. | ||
1072 | */ | ||
1073 | static int | ||
1074 | ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) | ||
1075 | { | ||
1076 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", | ||
1077 | sc->curchan->center_freq, chan->center_freq); | ||
1078 | |||
1079 | if (chan->center_freq != sc->curchan->center_freq || | ||
1080 | chan->hw_value != sc->curchan->hw_value) { | ||
1081 | |||
1082 | sc->curchan = chan; | ||
1083 | sc->curband = &sc->sbands[chan->band]; | ||
1084 | |||
1085 | /* | ||
1086 | * To switch channels clear any pending DMA operations; | ||
1087 | * wait long enough for the RX fifo to drain, reset the | ||
1088 | * hardware at the new frequency, and then re-enable | ||
1089 | * the relevant bits of the h/w. | ||
1090 | */ | ||
1091 | return ath5k_reset(sc, true, true); | ||
1092 | } | ||
1093 | |||
1094 | return 0; | ||
1095 | } | ||
1096 | |||
1097 | static void | ||
1098 | ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) | ||
1099 | { | ||
1100 | sc->curmode = mode; | ||
1101 | |||
1102 | if (mode == AR5K_MODE_11A) { | ||
1103 | sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ]; | ||
1104 | } else { | ||
1105 | sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ]; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | static void | ||
1110 | ath5k_mode_setup(struct ath5k_softc *sc) | ||
1111 | { | ||
1112 | struct ath5k_hw *ah = sc->ah; | ||
1113 | u32 rfilt; | ||
1114 | |||
1115 | /* configure rx filter */ | ||
1116 | rfilt = sc->filter_flags; | ||
1117 | ath5k_hw_set_rx_filter(ah, rfilt); | ||
1118 | |||
1119 | if (ath5k_hw_hasbssidmask(ah)) | ||
1120 | ath5k_hw_set_bssid_mask(ah, sc->bssidmask); | ||
1121 | |||
1122 | /* configure operational mode */ | ||
1123 | ath5k_hw_set_opmode(ah); | ||
1124 | |||
1125 | ath5k_hw_set_mcast_filter(ah, 0, 0); | ||
1126 | ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); | ||
1127 | } | ||
1128 | |||
1129 | static inline int | ||
1130 | ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) | ||
1131 | { | ||
1132 | int rix; | ||
1133 | |||
1134 | /* return base rate on errors */ | ||
1135 | if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, | ||
1136 | "hw_rix out of bounds: %x\n", hw_rix)) | ||
1137 | return 0; | ||
1138 | |||
1139 | rix = sc->rate_idx[sc->curband->band][hw_rix]; | ||
1140 | if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) | ||
1141 | rix = 0; | ||
1142 | |||
1143 | return rix; | ||
1144 | } | ||
1145 | |||
1146 | /***************\ | ||
1147 | * Buffers setup * | ||
1148 | \***************/ | ||
1149 | |||
1150 | static | ||
1151 | struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) | ||
1152 | { | ||
1153 | struct sk_buff *skb; | ||
1154 | unsigned int off; | ||
1155 | |||
1156 | /* | ||
1157 | * Allocate buffer with headroom_needed space for the | ||
1158 | * fake physical layer header at the start. | ||
1159 | */ | ||
1160 | skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1); | ||
1161 | |||
1162 | if (!skb) { | ||
1163 | ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", | ||
1164 | sc->rxbufsize + sc->cachelsz - 1); | ||
1165 | return NULL; | ||
1166 | } | ||
1167 | /* | ||
1168 | * Cache-line-align. This is important (for the | ||
1169 | * 5210 at least) as not doing so causes bogus data | ||
1170 | * in rx'd frames. | ||
1171 | */ | ||
1172 | off = ((unsigned long)skb->data) % sc->cachelsz; | ||
1173 | if (off != 0) | ||
1174 | skb_reserve(skb, sc->cachelsz - off); | ||
1175 | |||
1176 | *skb_addr = pci_map_single(sc->pdev, | ||
1177 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | ||
1178 | if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { | ||
1179 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); | ||
1180 | dev_kfree_skb(skb); | ||
1181 | return NULL; | ||
1182 | } | ||
1183 | return skb; | ||
1184 | } | ||
1185 | |||
1186 | static int | ||
1187 | ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | ||
1188 | { | ||
1189 | struct ath5k_hw *ah = sc->ah; | ||
1190 | struct sk_buff *skb = bf->skb; | ||
1191 | struct ath5k_desc *ds; | ||
1192 | |||
1193 | if (!skb) { | ||
1194 | skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); | ||
1195 | if (!skb) | ||
1196 | return -ENOMEM; | ||
1197 | bf->skb = skb; | ||
1198 | } | ||
1199 | |||
1200 | /* | ||
1201 | * Setup descriptors. For receive we always terminate | ||
1202 | * the descriptor list with a self-linked entry so we'll | ||
1203 | * not get overrun under high load (as can happen with a | ||
1204 | * 5212 when ANI processing enables PHY error frames). | ||
1205 | * | ||
1206 | * To insure the last descriptor is self-linked we create | ||
1207 | * each descriptor as self-linked and add it to the end. As | ||
1208 | * each additional descriptor is added the previous self-linked | ||
1209 | * entry is ``fixed'' naturally. This should be safe even | ||
1210 | * if DMA is happening. When processing RX interrupts we | ||
1211 | * never remove/process the last, self-linked, entry on the | ||
1212 | * descriptor list. This insures the hardware always has | ||
1213 | * someplace to write a new frame. | ||
1214 | */ | ||
1215 | ds = bf->desc; | ||
1216 | ds->ds_link = bf->daddr; /* link to self */ | ||
1217 | ds->ds_data = bf->skbaddr; | ||
1218 | ah->ah_setup_rx_desc(ah, ds, | ||
1219 | skb_tailroom(skb), /* buffer size */ | ||
1220 | 0); | ||
1221 | |||
1222 | if (sc->rxlink != NULL) | ||
1223 | *sc->rxlink = bf->daddr; | ||
1224 | sc->rxlink = &ds->ds_link; | ||
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | static int | ||
1229 | ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | ||
1230 | { | ||
1231 | struct ath5k_hw *ah = sc->ah; | ||
1232 | struct ath5k_txq *txq = sc->txq; | ||
1233 | struct ath5k_desc *ds = bf->desc; | ||
1234 | struct sk_buff *skb = bf->skb; | ||
1235 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1236 | unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; | ||
1237 | struct ieee80211_rate *rate; | ||
1238 | unsigned int mrr_rate[3], mrr_tries[3]; | ||
1239 | int i, ret; | ||
1240 | u16 hw_rate; | ||
1241 | u16 cts_rate = 0; | ||
1242 | u16 duration = 0; | ||
1243 | u8 rc_flags; | ||
1244 | |||
1245 | flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; | ||
1246 | |||
1247 | /* XXX endianness */ | ||
1248 | bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, | ||
1249 | PCI_DMA_TODEVICE); | ||
1250 | |||
1251 | rate = ieee80211_get_tx_rate(sc->hw, info); | ||
1252 | |||
1253 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
1254 | flags |= AR5K_TXDESC_NOACK; | ||
1255 | |||
1256 | rc_flags = info->control.rates[0].flags; | ||
1257 | hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? | ||
1258 | rate->hw_value_short : rate->hw_value; | ||
1259 | |||
1260 | pktlen = skb->len; | ||
1261 | |||
1262 | /* FIXME: If we are in g mode and rate is a CCK rate | ||
1263 | * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta | ||
1264 | * from tx power (value is in dB units already) */ | ||
1265 | if (info->control.hw_key) { | ||
1266 | keyidx = info->control.hw_key->hw_key_idx; | ||
1267 | pktlen += info->control.hw_key->icv_len; | ||
1268 | } | ||
1269 | if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { | ||
1270 | flags |= AR5K_TXDESC_RTSENA; | ||
1271 | cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; | ||
1272 | duration = le16_to_cpu(ieee80211_rts_duration(sc->hw, | ||
1273 | sc->vif, pktlen, info)); | ||
1274 | } | ||
1275 | if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { | ||
1276 | flags |= AR5K_TXDESC_CTSENA; | ||
1277 | cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; | ||
1278 | duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw, | ||
1279 | sc->vif, pktlen, info)); | ||
1280 | } | ||
1281 | ret = ah->ah_setup_tx_desc(ah, ds, pktlen, | ||
1282 | ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, | ||
1283 | (sc->power_level * 2), | ||
1284 | hw_rate, | ||
1285 | info->control.rates[0].count, keyidx, 0, flags, | ||
1286 | cts_rate, duration); | ||
1287 | if (ret) | ||
1288 | goto err_unmap; | ||
1289 | |||
1290 | memset(mrr_rate, 0, sizeof(mrr_rate)); | ||
1291 | memset(mrr_tries, 0, sizeof(mrr_tries)); | ||
1292 | for (i = 0; i < 3; i++) { | ||
1293 | rate = ieee80211_get_alt_retry_rate(sc->hw, info, i); | ||
1294 | if (!rate) | ||
1295 | break; | ||
1296 | |||
1297 | mrr_rate[i] = rate->hw_value; | ||
1298 | mrr_tries[i] = info->control.rates[i + 1].count; | ||
1299 | } | ||
1300 | |||
1301 | ah->ah_setup_mrr_tx_desc(ah, ds, | ||
1302 | mrr_rate[0], mrr_tries[0], | ||
1303 | mrr_rate[1], mrr_tries[1], | ||
1304 | mrr_rate[2], mrr_tries[2]); | ||
1305 | |||
1306 | ds->ds_link = 0; | ||
1307 | ds->ds_data = bf->skbaddr; | ||
1308 | |||
1309 | spin_lock_bh(&txq->lock); | ||
1310 | list_add_tail(&bf->list, &txq->q); | ||
1311 | sc->tx_stats[txq->qnum].len++; | ||
1312 | if (txq->link == NULL) /* is this first packet? */ | ||
1313 | ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); | ||
1314 | else /* no, so only link it */ | ||
1315 | *txq->link = bf->daddr; | ||
1316 | |||
1317 | txq->link = &ds->ds_link; | ||
1318 | ath5k_hw_start_tx_dma(ah, txq->qnum); | ||
1319 | mmiowb(); | ||
1320 | spin_unlock_bh(&txq->lock); | ||
1321 | |||
1322 | return 0; | ||
1323 | err_unmap: | ||
1324 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); | ||
1325 | return ret; | ||
1326 | } | ||
1327 | |||
1328 | /*******************\ | ||
1329 | * Descriptors setup * | ||
1330 | \*******************/ | ||
1331 | |||
1332 | static int | ||
1333 | ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev) | ||
1334 | { | ||
1335 | struct ath5k_desc *ds; | ||
1336 | struct ath5k_buf *bf; | ||
1337 | dma_addr_t da; | ||
1338 | unsigned int i; | ||
1339 | int ret; | ||
1340 | |||
1341 | /* allocate descriptors */ | ||
1342 | sc->desc_len = sizeof(struct ath5k_desc) * | ||
1343 | (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); | ||
1344 | sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); | ||
1345 | if (sc->desc == NULL) { | ||
1346 | ATH5K_ERR(sc, "can't allocate descriptors\n"); | ||
1347 | ret = -ENOMEM; | ||
1348 | goto err; | ||
1349 | } | ||
1350 | ds = sc->desc; | ||
1351 | da = sc->desc_daddr; | ||
1352 | ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", | ||
1353 | ds, sc->desc_len, (unsigned long long)sc->desc_daddr); | ||
1354 | |||
1355 | bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, | ||
1356 | sizeof(struct ath5k_buf), GFP_KERNEL); | ||
1357 | if (bf == NULL) { | ||
1358 | ATH5K_ERR(sc, "can't allocate bufptr\n"); | ||
1359 | ret = -ENOMEM; | ||
1360 | goto err_free; | ||
1361 | } | ||
1362 | sc->bufptr = bf; | ||
1363 | |||
1364 | INIT_LIST_HEAD(&sc->rxbuf); | ||
1365 | for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { | ||
1366 | bf->desc = ds; | ||
1367 | bf->daddr = da; | ||
1368 | list_add_tail(&bf->list, &sc->rxbuf); | ||
1369 | } | ||
1370 | |||
1371 | INIT_LIST_HEAD(&sc->txbuf); | ||
1372 | sc->txbuf_len = ATH_TXBUF; | ||
1373 | for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, | ||
1374 | da += sizeof(*ds)) { | ||
1375 | bf->desc = ds; | ||
1376 | bf->daddr = da; | ||
1377 | list_add_tail(&bf->list, &sc->txbuf); | ||
1378 | } | ||
1379 | |||
1380 | /* beacon buffer */ | ||
1381 | bf->desc = ds; | ||
1382 | bf->daddr = da; | ||
1383 | sc->bbuf = bf; | ||
1384 | |||
1385 | return 0; | ||
1386 | err_free: | ||
1387 | pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); | ||
1388 | err: | ||
1389 | sc->desc = NULL; | ||
1390 | return ret; | ||
1391 | } | ||
1392 | |||
1393 | static void | ||
1394 | ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) | ||
1395 | { | ||
1396 | struct ath5k_buf *bf; | ||
1397 | |||
1398 | ath5k_txbuf_free(sc, sc->bbuf); | ||
1399 | list_for_each_entry(bf, &sc->txbuf, list) | ||
1400 | ath5k_txbuf_free(sc, bf); | ||
1401 | list_for_each_entry(bf, &sc->rxbuf, list) | ||
1402 | ath5k_rxbuf_free(sc, bf); | ||
1403 | |||
1404 | /* Free memory associated with all descriptors */ | ||
1405 | pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); | ||
1406 | |||
1407 | kfree(sc->bufptr); | ||
1408 | sc->bufptr = NULL; | ||
1409 | } | ||
1410 | |||
1411 | |||
1412 | |||
1413 | |||
1414 | |||
1415 | /**************\ | ||
1416 | * Queues setup * | ||
1417 | \**************/ | ||
1418 | |||
1419 | static struct ath5k_txq * | ||
1420 | ath5k_txq_setup(struct ath5k_softc *sc, | ||
1421 | int qtype, int subtype) | ||
1422 | { | ||
1423 | struct ath5k_hw *ah = sc->ah; | ||
1424 | struct ath5k_txq *txq; | ||
1425 | struct ath5k_txq_info qi = { | ||
1426 | .tqi_subtype = subtype, | ||
1427 | .tqi_aifs = AR5K_TXQ_USEDEFAULT, | ||
1428 | .tqi_cw_min = AR5K_TXQ_USEDEFAULT, | ||
1429 | .tqi_cw_max = AR5K_TXQ_USEDEFAULT | ||
1430 | }; | ||
1431 | int qnum; | ||
1432 | |||
1433 | /* | ||
1434 | * Enable interrupts only for EOL and DESC conditions. | ||
1435 | * We mark tx descriptors to receive a DESC interrupt | ||
1436 | * when a tx queue gets deep; otherwise waiting for the | ||
1437 | * EOL to reap descriptors. Note that this is done to | ||
1438 | * reduce interrupt load and this only defers reaping | ||
1439 | * descriptors, never transmitting frames. Aside from | ||
1440 | * reducing interrupts this also permits more concurrency. | ||
1441 | * The only potential downside is if the tx queue backs | ||
1442 | * up in which case the top half of the kernel may backup | ||
1443 | * due to a lack of tx descriptors. | ||
1444 | */ | ||
1445 | qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | | ||
1446 | AR5K_TXQ_FLAG_TXDESCINT_ENABLE; | ||
1447 | qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); | ||
1448 | if (qnum < 0) { | ||
1449 | /* | ||
1450 | * NB: don't print a message, this happens | ||
1451 | * normally on parts with too few tx queues | ||
1452 | */ | ||
1453 | return ERR_PTR(qnum); | ||
1454 | } | ||
1455 | if (qnum >= ARRAY_SIZE(sc->txqs)) { | ||
1456 | ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n", | ||
1457 | qnum, ARRAY_SIZE(sc->txqs)); | ||
1458 | ath5k_hw_release_tx_queue(ah, qnum); | ||
1459 | return ERR_PTR(-EINVAL); | ||
1460 | } | ||
1461 | txq = &sc->txqs[qnum]; | ||
1462 | if (!txq->setup) { | ||
1463 | txq->qnum = qnum; | ||
1464 | txq->link = NULL; | ||
1465 | INIT_LIST_HEAD(&txq->q); | ||
1466 | spin_lock_init(&txq->lock); | ||
1467 | txq->setup = true; | ||
1468 | } | ||
1469 | return &sc->txqs[qnum]; | ||
1470 | } | ||
1471 | |||
1472 | static int | ||
1473 | ath5k_beaconq_setup(struct ath5k_hw *ah) | ||
1474 | { | ||
1475 | struct ath5k_txq_info qi = { | ||
1476 | .tqi_aifs = AR5K_TXQ_USEDEFAULT, | ||
1477 | .tqi_cw_min = AR5K_TXQ_USEDEFAULT, | ||
1478 | .tqi_cw_max = AR5K_TXQ_USEDEFAULT, | ||
1479 | /* NB: for dynamic turbo, don't enable any other interrupts */ | ||
1480 | .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE | ||
1481 | }; | ||
1482 | |||
1483 | return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); | ||
1484 | } | ||
1485 | |||
1486 | static int | ||
1487 | ath5k_beaconq_config(struct ath5k_softc *sc) | ||
1488 | { | ||
1489 | struct ath5k_hw *ah = sc->ah; | ||
1490 | struct ath5k_txq_info qi; | ||
1491 | int ret; | ||
1492 | |||
1493 | ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); | ||
1494 | if (ret) | ||
1495 | return ret; | ||
1496 | if (sc->opmode == NL80211_IFTYPE_AP || | ||
1497 | sc->opmode == NL80211_IFTYPE_MESH_POINT) { | ||
1498 | /* | ||
1499 | * Always burst out beacon and CAB traffic | ||
1500 | * (aifs = cwmin = cwmax = 0) | ||
1501 | */ | ||
1502 | qi.tqi_aifs = 0; | ||
1503 | qi.tqi_cw_min = 0; | ||
1504 | qi.tqi_cw_max = 0; | ||
1505 | } else if (sc->opmode == NL80211_IFTYPE_ADHOC) { | ||
1506 | /* | ||
1507 | * Adhoc mode; backoff between 0 and (2 * cw_min). | ||
1508 | */ | ||
1509 | qi.tqi_aifs = 0; | ||
1510 | qi.tqi_cw_min = 0; | ||
1511 | qi.tqi_cw_max = 2 * ah->ah_cw_min; | ||
1512 | } | ||
1513 | |||
1514 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, | ||
1515 | "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", | ||
1516 | qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); | ||
1517 | |||
1518 | ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi); | ||
1519 | if (ret) { | ||
1520 | ATH5K_ERR(sc, "%s: unable to update parameters for beacon " | ||
1521 | "hardware queue!\n", __func__); | ||
1522 | return ret; | ||
1523 | } | ||
1524 | |||
1525 | return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */; | ||
1526 | } | ||
1527 | |||
1528 | static void | ||
1529 | ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) | ||
1530 | { | ||
1531 | struct ath5k_buf *bf, *bf0; | ||
1532 | |||
1533 | /* | ||
1534 | * NB: this assumes output has been stopped and | ||
1535 | * we do not need to block ath5k_tx_tasklet | ||
1536 | */ | ||
1537 | spin_lock_bh(&txq->lock); | ||
1538 | list_for_each_entry_safe(bf, bf0, &txq->q, list) { | ||
1539 | ath5k_debug_printtxbuf(sc, bf); | ||
1540 | |||
1541 | ath5k_txbuf_free(sc, bf); | ||
1542 | |||
1543 | spin_lock_bh(&sc->txbuflock); | ||
1544 | sc->tx_stats[txq->qnum].len--; | ||
1545 | list_move_tail(&bf->list, &sc->txbuf); | ||
1546 | sc->txbuf_len++; | ||
1547 | spin_unlock_bh(&sc->txbuflock); | ||
1548 | } | ||
1549 | txq->link = NULL; | ||
1550 | spin_unlock_bh(&txq->lock); | ||
1551 | } | ||
1552 | |||
1553 | /* | ||
1554 | * Drain the transmit queues and reclaim resources. | ||
1555 | */ | ||
1556 | static void | ||
1557 | ath5k_txq_cleanup(struct ath5k_softc *sc) | ||
1558 | { | ||
1559 | struct ath5k_hw *ah = sc->ah; | ||
1560 | unsigned int i; | ||
1561 | |||
1562 | /* XXX return value */ | ||
1563 | if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) { | ||
1564 | /* don't touch the hardware if marked invalid */ | ||
1565 | ath5k_hw_stop_tx_dma(ah, sc->bhalq); | ||
1566 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", | ||
1567 | ath5k_hw_get_txdp(ah, sc->bhalq)); | ||
1568 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) | ||
1569 | if (sc->txqs[i].setup) { | ||
1570 | ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); | ||
1571 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " | ||
1572 | "link %p\n", | ||
1573 | sc->txqs[i].qnum, | ||
1574 | ath5k_hw_get_txdp(ah, | ||
1575 | sc->txqs[i].qnum), | ||
1576 | sc->txqs[i].link); | ||
1577 | } | ||
1578 | } | ||
1579 | ieee80211_wake_queues(sc->hw); /* XXX move to callers */ | ||
1580 | |||
1581 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) | ||
1582 | if (sc->txqs[i].setup) | ||
1583 | ath5k_txq_drainq(sc, &sc->txqs[i]); | ||
1584 | } | ||
1585 | |||
1586 | static void | ||
1587 | ath5k_txq_release(struct ath5k_softc *sc) | ||
1588 | { | ||
1589 | struct ath5k_txq *txq = sc->txqs; | ||
1590 | unsigned int i; | ||
1591 | |||
1592 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) | ||
1593 | if (txq->setup) { | ||
1594 | ath5k_hw_release_tx_queue(sc->ah, txq->qnum); | ||
1595 | txq->setup = false; | ||
1596 | } | ||
1597 | } | ||
1598 | |||
1599 | |||
1600 | |||
1601 | |||
1602 | /*************\ | ||
1603 | * RX Handling * | ||
1604 | \*************/ | ||
1605 | |||
1606 | /* | ||
1607 | * Enable the receive h/w following a reset. | ||
1608 | */ | ||
1609 | static int | ||
1610 | ath5k_rx_start(struct ath5k_softc *sc) | ||
1611 | { | ||
1612 | struct ath5k_hw *ah = sc->ah; | ||
1613 | struct ath5k_buf *bf; | ||
1614 | int ret; | ||
1615 | |||
1616 | sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->cachelsz); | ||
1617 | |||
1618 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", | ||
1619 | sc->cachelsz, sc->rxbufsize); | ||
1620 | |||
1621 | sc->rxlink = NULL; | ||
1622 | |||
1623 | spin_lock_bh(&sc->rxbuflock); | ||
1624 | list_for_each_entry(bf, &sc->rxbuf, list) { | ||
1625 | ret = ath5k_rxbuf_setup(sc, bf); | ||
1626 | if (ret != 0) { | ||
1627 | spin_unlock_bh(&sc->rxbuflock); | ||
1628 | goto err; | ||
1629 | } | ||
1630 | } | ||
1631 | bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); | ||
1632 | spin_unlock_bh(&sc->rxbuflock); | ||
1633 | |||
1634 | ath5k_hw_set_rxdp(ah, bf->daddr); | ||
1635 | ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ | ||
1636 | ath5k_mode_setup(sc); /* set filters, etc. */ | ||
1637 | ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ | ||
1638 | |||
1639 | return 0; | ||
1640 | err: | ||
1641 | return ret; | ||
1642 | } | ||
1643 | |||
1644 | /* | ||
1645 | * Disable the receive h/w in preparation for a reset. | ||
1646 | */ | ||
1647 | static void | ||
1648 | ath5k_rx_stop(struct ath5k_softc *sc) | ||
1649 | { | ||
1650 | struct ath5k_hw *ah = sc->ah; | ||
1651 | |||
1652 | ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ | ||
1653 | ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ | ||
1654 | ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ | ||
1655 | |||
1656 | ath5k_debug_printrxbuffs(sc, ah); | ||
1657 | |||
1658 | sc->rxlink = NULL; /* just in case */ | ||
1659 | } | ||
1660 | |||
1661 | static unsigned int | ||
1662 | ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, | ||
1663 | struct sk_buff *skb, struct ath5k_rx_status *rs) | ||
1664 | { | ||
1665 | struct ieee80211_hdr *hdr = (void *)skb->data; | ||
1666 | unsigned int keyix, hlen; | ||
1667 | |||
1668 | if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && | ||
1669 | rs->rs_keyix != AR5K_RXKEYIX_INVALID) | ||
1670 | return RX_FLAG_DECRYPTED; | ||
1671 | |||
1672 | /* Apparently when a default key is used to decrypt the packet | ||
1673 | the hw does not set the index used to decrypt. In such cases | ||
1674 | get the index from the packet. */ | ||
1675 | hlen = ieee80211_hdrlen(hdr->frame_control); | ||
1676 | if (ieee80211_has_protected(hdr->frame_control) && | ||
1677 | !(rs->rs_status & AR5K_RXERR_DECRYPT) && | ||
1678 | skb->len >= hlen + 4) { | ||
1679 | keyix = skb->data[hlen + 3] >> 6; | ||
1680 | |||
1681 | if (test_bit(keyix, sc->keymap)) | ||
1682 | return RX_FLAG_DECRYPTED; | ||
1683 | } | ||
1684 | |||
1685 | return 0; | ||
1686 | } | ||
1687 | |||
1688 | |||
1689 | static void | ||
1690 | ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb, | ||
1691 | struct ieee80211_rx_status *rxs) | ||
1692 | { | ||
1693 | u64 tsf, bc_tstamp; | ||
1694 | u32 hw_tu; | ||
1695 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; | ||
1696 | |||
1697 | if (ieee80211_is_beacon(mgmt->frame_control) && | ||
1698 | le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && | ||
1699 | memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) { | ||
1700 | /* | ||
1701 | * Received an IBSS beacon with the same BSSID. Hardware *must* | ||
1702 | * have updated the local TSF. We have to work around various | ||
1703 | * hardware bugs, though... | ||
1704 | */ | ||
1705 | tsf = ath5k_hw_get_tsf64(sc->ah); | ||
1706 | bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); | ||
1707 | hw_tu = TSF_TO_TU(tsf); | ||
1708 | |||
1709 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
1710 | "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", | ||
1711 | (unsigned long long)bc_tstamp, | ||
1712 | (unsigned long long)rxs->mactime, | ||
1713 | (unsigned long long)(rxs->mactime - bc_tstamp), | ||
1714 | (unsigned long long)tsf); | ||
1715 | |||
1716 | /* | ||
1717 | * Sometimes the HW will give us a wrong tstamp in the rx | ||
1718 | * status, causing the timestamp extension to go wrong. | ||
1719 | * (This seems to happen especially with beacon frames bigger | ||
1720 | * than 78 byte (incl. FCS)) | ||
1721 | * But we know that the receive timestamp must be later than the | ||
1722 | * timestamp of the beacon since HW must have synced to that. | ||
1723 | * | ||
1724 | * NOTE: here we assume mactime to be after the frame was | ||
1725 | * received, not like mac80211 which defines it at the start. | ||
1726 | */ | ||
1727 | if (bc_tstamp > rxs->mactime) { | ||
1728 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
1729 | "fixing mactime from %llx to %llx\n", | ||
1730 | (unsigned long long)rxs->mactime, | ||
1731 | (unsigned long long)tsf); | ||
1732 | rxs->mactime = tsf; | ||
1733 | } | ||
1734 | |||
1735 | /* | ||
1736 | * Local TSF might have moved higher than our beacon timers, | ||
1737 | * in that case we have to update them to continue sending | ||
1738 | * beacons. This also takes care of synchronizing beacon sending | ||
1739 | * times with other stations. | ||
1740 | */ | ||
1741 | if (hw_tu >= sc->nexttbtt) | ||
1742 | ath5k_beacon_update_timers(sc, bc_tstamp); | ||
1743 | } | ||
1744 | } | ||
1745 | |||
1746 | static void ath5k_tasklet_beacon(unsigned long data) | ||
1747 | { | ||
1748 | struct ath5k_softc *sc = (struct ath5k_softc *) data; | ||
1749 | |||
1750 | /* | ||
1751 | * Software beacon alert--time to send a beacon. | ||
1752 | * | ||
1753 | * In IBSS mode we use this interrupt just to | ||
1754 | * keep track of the next TBTT (target beacon | ||
1755 | * transmission time) in order to detect wether | ||
1756 | * automatic TSF updates happened. | ||
1757 | */ | ||
1758 | if (sc->opmode == NL80211_IFTYPE_ADHOC) { | ||
1759 | /* XXX: only if VEOL suppported */ | ||
1760 | u64 tsf = ath5k_hw_get_tsf64(sc->ah); | ||
1761 | sc->nexttbtt += sc->bintval; | ||
1762 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, | ||
1763 | "SWBA nexttbtt: %x hw_tu: %x " | ||
1764 | "TSF: %llx\n", | ||
1765 | sc->nexttbtt, | ||
1766 | TSF_TO_TU(tsf), | ||
1767 | (unsigned long long) tsf); | ||
1768 | } else { | ||
1769 | spin_lock(&sc->block); | ||
1770 | ath5k_beacon_send(sc); | ||
1771 | spin_unlock(&sc->block); | ||
1772 | } | ||
1773 | } | ||
1774 | |||
1775 | static void | ||
1776 | ath5k_tasklet_rx(unsigned long data) | ||
1777 | { | ||
1778 | struct ieee80211_rx_status rxs = {}; | ||
1779 | struct ath5k_rx_status rs = {}; | ||
1780 | struct sk_buff *skb, *next_skb; | ||
1781 | dma_addr_t next_skb_addr; | ||
1782 | struct ath5k_softc *sc = (void *)data; | ||
1783 | struct ath5k_buf *bf, *bf_last; | ||
1784 | struct ath5k_desc *ds; | ||
1785 | int ret; | ||
1786 | int hdrlen; | ||
1787 | int padsize; | ||
1788 | |||
1789 | spin_lock(&sc->rxbuflock); | ||
1790 | if (list_empty(&sc->rxbuf)) { | ||
1791 | ATH5K_WARN(sc, "empty rx buf pool\n"); | ||
1792 | goto unlock; | ||
1793 | } | ||
1794 | bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list); | ||
1795 | do { | ||
1796 | rxs.flag = 0; | ||
1797 | |||
1798 | bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); | ||
1799 | BUG_ON(bf->skb == NULL); | ||
1800 | skb = bf->skb; | ||
1801 | ds = bf->desc; | ||
1802 | |||
1803 | /* | ||
1804 | * last buffer must not be freed to ensure proper hardware | ||
1805 | * function. When the hardware finishes also a packet next to | ||
1806 | * it, we are sure, it doesn't use it anymore and we can go on. | ||
1807 | */ | ||
1808 | if (bf_last == bf) | ||
1809 | bf->flags |= 1; | ||
1810 | if (bf->flags) { | ||
1811 | struct ath5k_buf *bf_next = list_entry(bf->list.next, | ||
1812 | struct ath5k_buf, list); | ||
1813 | ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc, | ||
1814 | &rs); | ||
1815 | if (ret) | ||
1816 | break; | ||
1817 | bf->flags &= ~1; | ||
1818 | /* skip the overwritten one (even status is martian) */ | ||
1819 | goto next; | ||
1820 | } | ||
1821 | |||
1822 | ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs); | ||
1823 | if (unlikely(ret == -EINPROGRESS)) | ||
1824 | break; | ||
1825 | else if (unlikely(ret)) { | ||
1826 | ATH5K_ERR(sc, "error in processing rx descriptor\n"); | ||
1827 | spin_unlock(&sc->rxbuflock); | ||
1828 | return; | ||
1829 | } | ||
1830 | |||
1831 | if (unlikely(rs.rs_more)) { | ||
1832 | ATH5K_WARN(sc, "unsupported jumbo\n"); | ||
1833 | goto next; | ||
1834 | } | ||
1835 | |||
1836 | if (unlikely(rs.rs_status)) { | ||
1837 | if (rs.rs_status & AR5K_RXERR_PHY) | ||
1838 | goto next; | ||
1839 | if (rs.rs_status & AR5K_RXERR_DECRYPT) { | ||
1840 | /* | ||
1841 | * Decrypt error. If the error occurred | ||
1842 | * because there was no hardware key, then | ||
1843 | * let the frame through so the upper layers | ||
1844 | * can process it. This is necessary for 5210 | ||
1845 | * parts which have no way to setup a ``clear'' | ||
1846 | * key cache entry. | ||
1847 | * | ||
1848 | * XXX do key cache faulting | ||
1849 | */ | ||
1850 | if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && | ||
1851 | !(rs.rs_status & AR5K_RXERR_CRC)) | ||
1852 | goto accept; | ||
1853 | } | ||
1854 | if (rs.rs_status & AR5K_RXERR_MIC) { | ||
1855 | rxs.flag |= RX_FLAG_MMIC_ERROR; | ||
1856 | goto accept; | ||
1857 | } | ||
1858 | |||
1859 | /* let crypto-error packets fall through in MNTR */ | ||
1860 | if ((rs.rs_status & | ||
1861 | ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || | ||
1862 | sc->opmode != NL80211_IFTYPE_MONITOR) | ||
1863 | goto next; | ||
1864 | } | ||
1865 | accept: | ||
1866 | next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); | ||
1867 | |||
1868 | /* | ||
1869 | * If we can't replace bf->skb with a new skb under memory | ||
1870 | * pressure, just skip this packet | ||
1871 | */ | ||
1872 | if (!next_skb) | ||
1873 | goto next; | ||
1874 | |||
1875 | pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, | ||
1876 | PCI_DMA_FROMDEVICE); | ||
1877 | skb_put(skb, rs.rs_datalen); | ||
1878 | |||
1879 | /* The MAC header is padded to have 32-bit boundary if the | ||
1880 | * packet payload is non-zero. The general calculation for | ||
1881 | * padsize would take into account odd header lengths: | ||
1882 | * padsize = (4 - hdrlen % 4) % 4; However, since only | ||
1883 | * even-length headers are used, padding can only be 0 or 2 | ||
1884 | * bytes and we can optimize this a bit. In addition, we must | ||
1885 | * not try to remove padding from short control frames that do | ||
1886 | * not have payload. */ | ||
1887 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1888 | padsize = ath5k_pad_size(hdrlen); | ||
1889 | if (padsize) { | ||
1890 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
1891 | skb_pull(skb, padsize); | ||
1892 | } | ||
1893 | |||
1894 | /* | ||
1895 | * always extend the mac timestamp, since this information is | ||
1896 | * also needed for proper IBSS merging. | ||
1897 | * | ||
1898 | * XXX: it might be too late to do it here, since rs_tstamp is | ||
1899 | * 15bit only. that means TSF extension has to be done within | ||
1900 | * 32768usec (about 32ms). it might be necessary to move this to | ||
1901 | * the interrupt handler, like it is done in madwifi. | ||
1902 | * | ||
1903 | * Unfortunately we don't know when the hardware takes the rx | ||
1904 | * timestamp (beginning of phy frame, data frame, end of rx?). | ||
1905 | * The only thing we know is that it is hardware specific... | ||
1906 | * On AR5213 it seems the rx timestamp is at the end of the | ||
1907 | * frame, but i'm not sure. | ||
1908 | * | ||
1909 | * NOTE: mac80211 defines mactime at the beginning of the first | ||
1910 | * data symbol. Since we don't have any time references it's | ||
1911 | * impossible to comply to that. This affects IBSS merge only | ||
1912 | * right now, so it's not too bad... | ||
1913 | */ | ||
1914 | rxs.mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp); | ||
1915 | rxs.flag |= RX_FLAG_TSFT; | ||
1916 | |||
1917 | rxs.freq = sc->curchan->center_freq; | ||
1918 | rxs.band = sc->curband->band; | ||
1919 | |||
1920 | rxs.noise = sc->ah->ah_noise_floor; | ||
1921 | rxs.signal = rxs.noise + rs.rs_rssi; | ||
1922 | |||
1923 | /* An rssi of 35 indicates you should be able use | ||
1924 | * 54 Mbps reliably. A more elaborate scheme can be used | ||
1925 | * here but it requires a map of SNR/throughput for each | ||
1926 | * possible mode used */ | ||
1927 | rxs.qual = rs.rs_rssi * 100 / 35; | ||
1928 | |||
1929 | /* rssi can be more than 35 though, anything above that | ||
1930 | * should be considered at 100% */ | ||
1931 | if (rxs.qual > 100) | ||
1932 | rxs.qual = 100; | ||
1933 | |||
1934 | rxs.antenna = rs.rs_antenna; | ||
1935 | rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); | ||
1936 | rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); | ||
1937 | |||
1938 | if (rxs.rate_idx >= 0 && rs.rs_rate == | ||
1939 | sc->curband->bitrates[rxs.rate_idx].hw_value_short) | ||
1940 | rxs.flag |= RX_FLAG_SHORTPRE; | ||
1941 | |||
1942 | ath5k_debug_dump_skb(sc, skb, "RX ", 0); | ||
1943 | |||
1944 | /* check beacons in IBSS mode */ | ||
1945 | if (sc->opmode == NL80211_IFTYPE_ADHOC) | ||
1946 | ath5k_check_ibss_tsf(sc, skb, &rxs); | ||
1947 | |||
1948 | __ieee80211_rx(sc->hw, skb, &rxs); | ||
1949 | |||
1950 | bf->skb = next_skb; | ||
1951 | bf->skbaddr = next_skb_addr; | ||
1952 | next: | ||
1953 | list_move_tail(&bf->list, &sc->rxbuf); | ||
1954 | } while (ath5k_rxbuf_setup(sc, bf) == 0); | ||
1955 | unlock: | ||
1956 | spin_unlock(&sc->rxbuflock); | ||
1957 | } | ||
1958 | |||
1959 | |||
1960 | |||
1961 | |||
1962 | /*************\ | ||
1963 | * TX Handling * | ||
1964 | \*************/ | ||
1965 | |||
1966 | static void | ||
1967 | ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) | ||
1968 | { | ||
1969 | struct ath5k_tx_status ts = {}; | ||
1970 | struct ath5k_buf *bf, *bf0; | ||
1971 | struct ath5k_desc *ds; | ||
1972 | struct sk_buff *skb; | ||
1973 | struct ieee80211_tx_info *info; | ||
1974 | int i, ret; | ||
1975 | |||
1976 | spin_lock(&txq->lock); | ||
1977 | list_for_each_entry_safe(bf, bf0, &txq->q, list) { | ||
1978 | ds = bf->desc; | ||
1979 | |||
1980 | ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); | ||
1981 | if (unlikely(ret == -EINPROGRESS)) | ||
1982 | break; | ||
1983 | else if (unlikely(ret)) { | ||
1984 | ATH5K_ERR(sc, "error %d while processing queue %u\n", | ||
1985 | ret, txq->qnum); | ||
1986 | break; | ||
1987 | } | ||
1988 | |||
1989 | skb = bf->skb; | ||
1990 | info = IEEE80211_SKB_CB(skb); | ||
1991 | bf->skb = NULL; | ||
1992 | |||
1993 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, | ||
1994 | PCI_DMA_TODEVICE); | ||
1995 | |||
1996 | ieee80211_tx_info_clear_status(info); | ||
1997 | for (i = 0; i < 4; i++) { | ||
1998 | struct ieee80211_tx_rate *r = | ||
1999 | &info->status.rates[i]; | ||
2000 | |||
2001 | if (ts.ts_rate[i]) { | ||
2002 | r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); | ||
2003 | r->count = ts.ts_retry[i]; | ||
2004 | } else { | ||
2005 | r->idx = -1; | ||
2006 | r->count = 0; | ||
2007 | } | ||
2008 | } | ||
2009 | |||
2010 | /* count the successful attempt as well */ | ||
2011 | info->status.rates[ts.ts_final_idx].count++; | ||
2012 | |||
2013 | if (unlikely(ts.ts_status)) { | ||
2014 | sc->ll_stats.dot11ACKFailureCount++; | ||
2015 | if (ts.ts_status & AR5K_TXERR_FILT) | ||
2016 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | ||
2017 | } else { | ||
2018 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
2019 | info->status.ack_signal = ts.ts_rssi; | ||
2020 | } | ||
2021 | |||
2022 | ieee80211_tx_status(sc->hw, skb); | ||
2023 | sc->tx_stats[txq->qnum].count++; | ||
2024 | |||
2025 | spin_lock(&sc->txbuflock); | ||
2026 | sc->tx_stats[txq->qnum].len--; | ||
2027 | list_move_tail(&bf->list, &sc->txbuf); | ||
2028 | sc->txbuf_len++; | ||
2029 | spin_unlock(&sc->txbuflock); | ||
2030 | } | ||
2031 | if (likely(list_empty(&txq->q))) | ||
2032 | txq->link = NULL; | ||
2033 | spin_unlock(&txq->lock); | ||
2034 | if (sc->txbuf_len > ATH_TXBUF / 5) | ||
2035 | ieee80211_wake_queues(sc->hw); | ||
2036 | } | ||
2037 | |||
2038 | static void | ||
2039 | ath5k_tasklet_tx(unsigned long data) | ||
2040 | { | ||
2041 | struct ath5k_softc *sc = (void *)data; | ||
2042 | |||
2043 | ath5k_tx_processq(sc, sc->txq); | ||
2044 | } | ||
2045 | |||
2046 | |||
2047 | /*****************\ | ||
2048 | * Beacon handling * | ||
2049 | \*****************/ | ||
2050 | |||
2051 | /* | ||
2052 | * Setup the beacon frame for transmit. | ||
2053 | */ | ||
2054 | static int | ||
2055 | ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | ||
2056 | { | ||
2057 | struct sk_buff *skb = bf->skb; | ||
2058 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2059 | struct ath5k_hw *ah = sc->ah; | ||
2060 | struct ath5k_desc *ds; | ||
2061 | int ret, antenna = 0; | ||
2062 | u32 flags; | ||
2063 | |||
2064 | bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, | ||
2065 | PCI_DMA_TODEVICE); | ||
2066 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " | ||
2067 | "skbaddr %llx\n", skb, skb->data, skb->len, | ||
2068 | (unsigned long long)bf->skbaddr); | ||
2069 | if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { | ||
2070 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); | ||
2071 | return -EIO; | ||
2072 | } | ||
2073 | |||
2074 | ds = bf->desc; | ||
2075 | |||
2076 | flags = AR5K_TXDESC_NOACK; | ||
2077 | if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { | ||
2078 | ds->ds_link = bf->daddr; /* self-linked */ | ||
2079 | flags |= AR5K_TXDESC_VEOL; | ||
2080 | /* | ||
2081 | * Let hardware handle antenna switching if txantenna is not set | ||
2082 | */ | ||
2083 | } else { | ||
2084 | ds->ds_link = 0; | ||
2085 | /* | ||
2086 | * Switch antenna every 4 beacons if txantenna is not set | ||
2087 | * XXX assumes two antennas | ||
2088 | */ | ||
2089 | if (antenna == 0) | ||
2090 | antenna = sc->bsent & 4 ? 2 : 1; | ||
2091 | } | ||
2092 | |||
2093 | /* FIXME: If we are in g mode and rate is a CCK rate | ||
2094 | * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta | ||
2095 | * from tx power (value is in dB units already) */ | ||
2096 | ds->ds_data = bf->skbaddr; | ||
2097 | ret = ah->ah_setup_tx_desc(ah, ds, skb->len, | ||
2098 | ieee80211_get_hdrlen_from_skb(skb), | ||
2099 | AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), | ||
2100 | ieee80211_get_tx_rate(sc->hw, info)->hw_value, | ||
2101 | 1, AR5K_TXKEYIX_INVALID, | ||
2102 | antenna, flags, 0, 0); | ||
2103 | if (ret) | ||
2104 | goto err_unmap; | ||
2105 | |||
2106 | return 0; | ||
2107 | err_unmap: | ||
2108 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); | ||
2109 | return ret; | ||
2110 | } | ||
2111 | |||
2112 | /* | ||
2113 | * Transmit a beacon frame at SWBA. Dynamic updates to the | ||
2114 | * frame contents are done as needed and the slot time is | ||
2115 | * also adjusted based on current state. | ||
2116 | * | ||
2117 | * This is called from software irq context (beacontq or restq | ||
2118 | * tasklets) or user context from ath5k_beacon_config. | ||
2119 | */ | ||
2120 | static void | ||
2121 | ath5k_beacon_send(struct ath5k_softc *sc) | ||
2122 | { | ||
2123 | struct ath5k_buf *bf = sc->bbuf; | ||
2124 | struct ath5k_hw *ah = sc->ah; | ||
2125 | |||
2126 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); | ||
2127 | |||
2128 | if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION || | ||
2129 | sc->opmode == NL80211_IFTYPE_MONITOR)) { | ||
2130 | ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); | ||
2131 | return; | ||
2132 | } | ||
2133 | /* | ||
2134 | * Check if the previous beacon has gone out. If | ||
2135 | * not don't don't try to post another, skip this | ||
2136 | * period and wait for the next. Missed beacons | ||
2137 | * indicate a problem and should not occur. If we | ||
2138 | * miss too many consecutive beacons reset the device. | ||
2139 | */ | ||
2140 | if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { | ||
2141 | sc->bmisscount++; | ||
2142 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, | ||
2143 | "missed %u consecutive beacons\n", sc->bmisscount); | ||
2144 | if (sc->bmisscount > 3) { /* NB: 3 is a guess */ | ||
2145 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, | ||
2146 | "stuck beacon time (%u missed)\n", | ||
2147 | sc->bmisscount); | ||
2148 | tasklet_schedule(&sc->restq); | ||
2149 | } | ||
2150 | return; | ||
2151 | } | ||
2152 | if (unlikely(sc->bmisscount != 0)) { | ||
2153 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, | ||
2154 | "resume beacon xmit after %u misses\n", | ||
2155 | sc->bmisscount); | ||
2156 | sc->bmisscount = 0; | ||
2157 | } | ||
2158 | |||
2159 | /* | ||
2160 | * Stop any current dma and put the new frame on the queue. | ||
2161 | * This should never fail since we check above that no frames | ||
2162 | * are still pending on the queue. | ||
2163 | */ | ||
2164 | if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { | ||
2165 | ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq); | ||
2166 | /* NB: hw still stops DMA, so proceed */ | ||
2167 | } | ||
2168 | |||
2169 | ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); | ||
2170 | ath5k_hw_start_tx_dma(ah, sc->bhalq); | ||
2171 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", | ||
2172 | sc->bhalq, (unsigned long long)bf->daddr, bf->desc); | ||
2173 | |||
2174 | sc->bsent++; | ||
2175 | } | ||
2176 | |||
2177 | |||
2178 | /** | ||
2179 | * ath5k_beacon_update_timers - update beacon timers | ||
2180 | * | ||
2181 | * @sc: struct ath5k_softc pointer we are operating on | ||
2182 | * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a | ||
2183 | * beacon timer update based on the current HW TSF. | ||
2184 | * | ||
2185 | * Calculate the next target beacon transmit time (TBTT) based on the timestamp | ||
2186 | * of a received beacon or the current local hardware TSF and write it to the | ||
2187 | * beacon timer registers. | ||
2188 | * | ||
2189 | * This is called in a variety of situations, e.g. when a beacon is received, | ||
2190 | * when a TSF update has been detected, but also when an new IBSS is created or | ||
2191 | * when we otherwise know we have to update the timers, but we keep it in this | ||
2192 | * function to have it all together in one place. | ||
2193 | */ | ||
2194 | static void | ||
2195 | ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf) | ||
2196 | { | ||
2197 | struct ath5k_hw *ah = sc->ah; | ||
2198 | u32 nexttbtt, intval, hw_tu, bc_tu; | ||
2199 | u64 hw_tsf; | ||
2200 | |||
2201 | intval = sc->bintval & AR5K_BEACON_PERIOD; | ||
2202 | if (WARN_ON(!intval)) | ||
2203 | return; | ||
2204 | |||
2205 | /* beacon TSF converted to TU */ | ||
2206 | bc_tu = TSF_TO_TU(bc_tsf); | ||
2207 | |||
2208 | /* current TSF converted to TU */ | ||
2209 | hw_tsf = ath5k_hw_get_tsf64(ah); | ||
2210 | hw_tu = TSF_TO_TU(hw_tsf); | ||
2211 | |||
2212 | #define FUDGE 3 | ||
2213 | /* we use FUDGE to make sure the next TBTT is ahead of the current TU */ | ||
2214 | if (bc_tsf == -1) { | ||
2215 | /* | ||
2216 | * no beacons received, called internally. | ||
2217 | * just need to refresh timers based on HW TSF. | ||
2218 | */ | ||
2219 | nexttbtt = roundup(hw_tu + FUDGE, intval); | ||
2220 | } else if (bc_tsf == 0) { | ||
2221 | /* | ||
2222 | * no beacon received, probably called by ath5k_reset_tsf(). | ||
2223 | * reset TSF to start with 0. | ||
2224 | */ | ||
2225 | nexttbtt = intval; | ||
2226 | intval |= AR5K_BEACON_RESET_TSF; | ||
2227 | } else if (bc_tsf > hw_tsf) { | ||
2228 | /* | ||
2229 | * beacon received, SW merge happend but HW TSF not yet updated. | ||
2230 | * not possible to reconfigure timers yet, but next time we | ||
2231 | * receive a beacon with the same BSSID, the hardware will | ||
2232 | * automatically update the TSF and then we need to reconfigure | ||
2233 | * the timers. | ||
2234 | */ | ||
2235 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
2236 | "need to wait for HW TSF sync\n"); | ||
2237 | return; | ||
2238 | } else { | ||
2239 | /* | ||
2240 | * most important case for beacon synchronization between STA. | ||
2241 | * | ||
2242 | * beacon received and HW TSF has been already updated by HW. | ||
2243 | * update next TBTT based on the TSF of the beacon, but make | ||
2244 | * sure it is ahead of our local TSF timer. | ||
2245 | */ | ||
2246 | nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); | ||
2247 | } | ||
2248 | #undef FUDGE | ||
2249 | |||
2250 | sc->nexttbtt = nexttbtt; | ||
2251 | |||
2252 | intval |= AR5K_BEACON_ENA; | ||
2253 | ath5k_hw_init_beacon(ah, nexttbtt, intval); | ||
2254 | |||
2255 | /* | ||
2256 | * debugging output last in order to preserve the time critical aspect | ||
2257 | * of this function | ||
2258 | */ | ||
2259 | if (bc_tsf == -1) | ||
2260 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
2261 | "reconfigured timers based on HW TSF\n"); | ||
2262 | else if (bc_tsf == 0) | ||
2263 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
2264 | "reset HW TSF and timers\n"); | ||
2265 | else | ||
2266 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
2267 | "updated timers based on beacon TSF\n"); | ||
2268 | |||
2269 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, | ||
2270 | "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", | ||
2271 | (unsigned long long) bc_tsf, | ||
2272 | (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); | ||
2273 | ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", | ||
2274 | intval & AR5K_BEACON_PERIOD, | ||
2275 | intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", | ||
2276 | intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); | ||
2277 | } | ||
2278 | |||
2279 | |||
2280 | /** | ||
2281 | * ath5k_beacon_config - Configure the beacon queues and interrupts | ||
2282 | * | ||
2283 | * @sc: struct ath5k_softc pointer we are operating on | ||
2284 | * | ||
2285 | * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA | ||
2286 | * interrupts to detect TSF updates only. | ||
2287 | */ | ||
2288 | static void | ||
2289 | ath5k_beacon_config(struct ath5k_softc *sc) | ||
2290 | { | ||
2291 | struct ath5k_hw *ah = sc->ah; | ||
2292 | unsigned long flags; | ||
2293 | |||
2294 | ath5k_hw_set_imr(ah, 0); | ||
2295 | sc->bmisscount = 0; | ||
2296 | sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); | ||
2297 | |||
2298 | if (sc->opmode == NL80211_IFTYPE_ADHOC || | ||
2299 | sc->opmode == NL80211_IFTYPE_MESH_POINT || | ||
2300 | sc->opmode == NL80211_IFTYPE_AP) { | ||
2301 | /* | ||
2302 | * In IBSS mode we use a self-linked tx descriptor and let the | ||
2303 | * hardware send the beacons automatically. We have to load it | ||
2304 | * only once here. | ||
2305 | * We use the SWBA interrupt only to keep track of the beacon | ||
2306 | * timers in order to detect automatic TSF updates. | ||
2307 | */ | ||
2308 | ath5k_beaconq_config(sc); | ||
2309 | |||
2310 | sc->imask |= AR5K_INT_SWBA; | ||
2311 | |||
2312 | if (sc->opmode == NL80211_IFTYPE_ADHOC) { | ||
2313 | if (ath5k_hw_hasveol(ah)) { | ||
2314 | spin_lock_irqsave(&sc->block, flags); | ||
2315 | ath5k_beacon_send(sc); | ||
2316 | spin_unlock_irqrestore(&sc->block, flags); | ||
2317 | } | ||
2318 | } else | ||
2319 | ath5k_beacon_update_timers(sc, -1); | ||
2320 | } | ||
2321 | |||
2322 | ath5k_hw_set_imr(ah, sc->imask); | ||
2323 | } | ||
2324 | |||
2325 | |||
2326 | /********************\ | ||
2327 | * Interrupt handling * | ||
2328 | \********************/ | ||
2329 | |||
2330 | static int | ||
2331 | ath5k_init(struct ath5k_softc *sc) | ||
2332 | { | ||
2333 | struct ath5k_hw *ah = sc->ah; | ||
2334 | int ret, i; | ||
2335 | |||
2336 | mutex_lock(&sc->lock); | ||
2337 | |||
2338 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); | ||
2339 | |||
2340 | /* | ||
2341 | * Stop anything previously setup. This is safe | ||
2342 | * no matter this is the first time through or not. | ||
2343 | */ | ||
2344 | ath5k_stop_locked(sc); | ||
2345 | |||
2346 | /* | ||
2347 | * The basic interface to setting the hardware in a good | ||
2348 | * state is ``reset''. On return the hardware is known to | ||
2349 | * be powered up and with interrupts disabled. This must | ||
2350 | * be followed by initialization of the appropriate bits | ||
2351 | * and then setup of the interrupt mask. | ||
2352 | */ | ||
2353 | sc->curchan = sc->hw->conf.channel; | ||
2354 | sc->curband = &sc->sbands[sc->curchan->band]; | ||
2355 | sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | | ||
2356 | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | | ||
2357 | AR5K_INT_FATAL | AR5K_INT_GLOBAL; | ||
2358 | ret = ath5k_reset(sc, false, false); | ||
2359 | if (ret) | ||
2360 | goto done; | ||
2361 | |||
2362 | /* | ||
2363 | * Reset the key cache since some parts do not reset the | ||
2364 | * contents on initial power up or resume from suspend. | ||
2365 | */ | ||
2366 | for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) | ||
2367 | ath5k_hw_reset_key(ah, i); | ||
2368 | |||
2369 | /* Set ack to be sent at low bit-rates */ | ||
2370 | ath5k_hw_set_ack_bitrate_high(ah, false); | ||
2371 | |||
2372 | mod_timer(&sc->calib_tim, round_jiffies(jiffies + | ||
2373 | msecs_to_jiffies(ath5k_calinterval * 1000))); | ||
2374 | |||
2375 | ret = 0; | ||
2376 | done: | ||
2377 | mmiowb(); | ||
2378 | mutex_unlock(&sc->lock); | ||
2379 | return ret; | ||
2380 | } | ||
2381 | |||
2382 | static int | ||
2383 | ath5k_stop_locked(struct ath5k_softc *sc) | ||
2384 | { | ||
2385 | struct ath5k_hw *ah = sc->ah; | ||
2386 | |||
2387 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n", | ||
2388 | test_bit(ATH_STAT_INVALID, sc->status)); | ||
2389 | |||
2390 | /* | ||
2391 | * Shutdown the hardware and driver: | ||
2392 | * stop output from above | ||
2393 | * disable interrupts | ||
2394 | * turn off timers | ||
2395 | * turn off the radio | ||
2396 | * clear transmit machinery | ||
2397 | * clear receive machinery | ||
2398 | * drain and release tx queues | ||
2399 | * reclaim beacon resources | ||
2400 | * power down hardware | ||
2401 | * | ||
2402 | * Note that some of this work is not possible if the | ||
2403 | * hardware is gone (invalid). | ||
2404 | */ | ||
2405 | ieee80211_stop_queues(sc->hw); | ||
2406 | |||
2407 | if (!test_bit(ATH_STAT_INVALID, sc->status)) { | ||
2408 | ath5k_led_off(sc); | ||
2409 | ath5k_hw_set_imr(ah, 0); | ||
2410 | synchronize_irq(sc->pdev->irq); | ||
2411 | } | ||
2412 | ath5k_txq_cleanup(sc); | ||
2413 | if (!test_bit(ATH_STAT_INVALID, sc->status)) { | ||
2414 | ath5k_rx_stop(sc); | ||
2415 | ath5k_hw_phy_disable(ah); | ||
2416 | } else | ||
2417 | sc->rxlink = NULL; | ||
2418 | |||
2419 | return 0; | ||
2420 | } | ||
2421 | |||
2422 | /* | ||
2423 | * Stop the device, grabbing the top-level lock to protect | ||
2424 | * against concurrent entry through ath5k_init (which can happen | ||
2425 | * if another thread does a system call and the thread doing the | ||
2426 | * stop is preempted). | ||
2427 | */ | ||
2428 | static int | ||
2429 | ath5k_stop_hw(struct ath5k_softc *sc) | ||
2430 | { | ||
2431 | int ret; | ||
2432 | |||
2433 | mutex_lock(&sc->lock); | ||
2434 | ret = ath5k_stop_locked(sc); | ||
2435 | if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) { | ||
2436 | /* | ||
2437 | * Set the chip in full sleep mode. Note that we are | ||
2438 | * careful to do this only when bringing the interface | ||
2439 | * completely to a stop. When the chip is in this state | ||
2440 | * it must be carefully woken up or references to | ||
2441 | * registers in the PCI clock domain may freeze the bus | ||
2442 | * (and system). This varies by chip and is mostly an | ||
2443 | * issue with newer parts that go to sleep more quickly. | ||
2444 | */ | ||
2445 | if (sc->ah->ah_mac_srev >= 0x78) { | ||
2446 | /* | ||
2447 | * XXX | ||
2448 | * don't put newer MAC revisions > 7.8 to sleep because | ||
2449 | * of the above mentioned problems | ||
2450 | */ | ||
2451 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mac version > 7.8, " | ||
2452 | "not putting device to sleep\n"); | ||
2453 | } else { | ||
2454 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, | ||
2455 | "putting device to full sleep\n"); | ||
2456 | ath5k_hw_set_power(sc->ah, AR5K_PM_FULL_SLEEP, true, 0); | ||
2457 | } | ||
2458 | } | ||
2459 | ath5k_txbuf_free(sc, sc->bbuf); | ||
2460 | |||
2461 | mmiowb(); | ||
2462 | mutex_unlock(&sc->lock); | ||
2463 | |||
2464 | del_timer_sync(&sc->calib_tim); | ||
2465 | tasklet_kill(&sc->rxtq); | ||
2466 | tasklet_kill(&sc->txtq); | ||
2467 | tasklet_kill(&sc->restq); | ||
2468 | tasklet_kill(&sc->beacontq); | ||
2469 | |||
2470 | return ret; | ||
2471 | } | ||
2472 | |||
2473 | static irqreturn_t | ||
2474 | ath5k_intr(int irq, void *dev_id) | ||
2475 | { | ||
2476 | struct ath5k_softc *sc = dev_id; | ||
2477 | struct ath5k_hw *ah = sc->ah; | ||
2478 | enum ath5k_int status; | ||
2479 | unsigned int counter = 1000; | ||
2480 | |||
2481 | if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) || | ||
2482 | !ath5k_hw_is_intr_pending(ah))) | ||
2483 | return IRQ_NONE; | ||
2484 | |||
2485 | do { | ||
2486 | ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ | ||
2487 | ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", | ||
2488 | status, sc->imask); | ||
2489 | if (unlikely(status & AR5K_INT_FATAL)) { | ||
2490 | /* | ||
2491 | * Fatal errors are unrecoverable. | ||
2492 | * Typically these are caused by DMA errors. | ||
2493 | */ | ||
2494 | tasklet_schedule(&sc->restq); | ||
2495 | } else if (unlikely(status & AR5K_INT_RXORN)) { | ||
2496 | tasklet_schedule(&sc->restq); | ||
2497 | } else { | ||
2498 | if (status & AR5K_INT_SWBA) { | ||
2499 | tasklet_schedule(&sc->beacontq); | ||
2500 | } | ||
2501 | if (status & AR5K_INT_RXEOL) { | ||
2502 | /* | ||
2503 | * NB: the hardware should re-read the link when | ||
2504 | * RXE bit is written, but it doesn't work at | ||
2505 | * least on older hardware revs. | ||
2506 | */ | ||
2507 | sc->rxlink = NULL; | ||
2508 | } | ||
2509 | if (status & AR5K_INT_TXURN) { | ||
2510 | /* bump tx trigger level */ | ||
2511 | ath5k_hw_update_tx_triglevel(ah, true); | ||
2512 | } | ||
2513 | if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) | ||
2514 | tasklet_schedule(&sc->rxtq); | ||
2515 | if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC | ||
2516 | | AR5K_INT_TXERR | AR5K_INT_TXEOL)) | ||
2517 | tasklet_schedule(&sc->txtq); | ||
2518 | if (status & AR5K_INT_BMISS) { | ||
2519 | /* TODO */ | ||
2520 | } | ||
2521 | if (status & AR5K_INT_MIB) { | ||
2522 | /* | ||
2523 | * These stats are also used for ANI i think | ||
2524 | * so how about updating them more often ? | ||
2525 | */ | ||
2526 | ath5k_hw_update_mib_counters(ah, &sc->ll_stats); | ||
2527 | } | ||
2528 | } | ||
2529 | } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0); | ||
2530 | |||
2531 | if (unlikely(!counter)) | ||
2532 | ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); | ||
2533 | |||
2534 | return IRQ_HANDLED; | ||
2535 | } | ||
2536 | |||
2537 | static void | ||
2538 | ath5k_tasklet_reset(unsigned long data) | ||
2539 | { | ||
2540 | struct ath5k_softc *sc = (void *)data; | ||
2541 | |||
2542 | ath5k_reset_wake(sc); | ||
2543 | } | ||
2544 | |||
2545 | /* | ||
2546 | * Periodically recalibrate the PHY to account | ||
2547 | * for temperature/environment changes. | ||
2548 | */ | ||
2549 | static void | ||
2550 | ath5k_calibrate(unsigned long data) | ||
2551 | { | ||
2552 | struct ath5k_softc *sc = (void *)data; | ||
2553 | struct ath5k_hw *ah = sc->ah; | ||
2554 | |||
2555 | ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", | ||
2556 | ieee80211_frequency_to_channel(sc->curchan->center_freq), | ||
2557 | sc->curchan->hw_value); | ||
2558 | |||
2559 | if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { | ||
2560 | /* | ||
2561 | * Rfgain is out of bounds, reset the chip | ||
2562 | * to load new gain values. | ||
2563 | */ | ||
2564 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); | ||
2565 | ath5k_reset_wake(sc); | ||
2566 | } | ||
2567 | if (ath5k_hw_phy_calibrate(ah, sc->curchan)) | ||
2568 | ATH5K_ERR(sc, "calibration of channel %u failed\n", | ||
2569 | ieee80211_frequency_to_channel( | ||
2570 | sc->curchan->center_freq)); | ||
2571 | |||
2572 | mod_timer(&sc->calib_tim, round_jiffies(jiffies + | ||
2573 | msecs_to_jiffies(ath5k_calinterval * 1000))); | ||
2574 | } | ||
2575 | |||
2576 | |||
2577 | /********************\ | ||
2578 | * Mac80211 functions * | ||
2579 | \********************/ | ||
2580 | |||
2581 | static int | ||
2582 | ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | ||
2583 | { | ||
2584 | struct ath5k_softc *sc = hw->priv; | ||
2585 | struct ath5k_buf *bf; | ||
2586 | unsigned long flags; | ||
2587 | int hdrlen; | ||
2588 | int padsize; | ||
2589 | |||
2590 | ath5k_debug_dump_skb(sc, skb, "TX ", 1); | ||
2591 | |||
2592 | if (sc->opmode == NL80211_IFTYPE_MONITOR) | ||
2593 | ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); | ||
2594 | |||
2595 | /* | ||
2596 | * the hardware expects the header padded to 4 byte boundaries | ||
2597 | * if this is not the case we add the padding after the header | ||
2598 | */ | ||
2599 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
2600 | padsize = ath5k_pad_size(hdrlen); | ||
2601 | if (padsize) { | ||
2602 | |||
2603 | if (skb_headroom(skb) < padsize) { | ||
2604 | ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" | ||
2605 | " headroom to pad %d\n", hdrlen, padsize); | ||
2606 | goto drop_packet; | ||
2607 | } | ||
2608 | skb_push(skb, padsize); | ||
2609 | memmove(skb->data, skb->data+padsize, hdrlen); | ||
2610 | } | ||
2611 | |||
2612 | spin_lock_irqsave(&sc->txbuflock, flags); | ||
2613 | if (list_empty(&sc->txbuf)) { | ||
2614 | ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); | ||
2615 | spin_unlock_irqrestore(&sc->txbuflock, flags); | ||
2616 | ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); | ||
2617 | goto drop_packet; | ||
2618 | } | ||
2619 | bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); | ||
2620 | list_del(&bf->list); | ||
2621 | sc->txbuf_len--; | ||
2622 | if (list_empty(&sc->txbuf)) | ||
2623 | ieee80211_stop_queues(hw); | ||
2624 | spin_unlock_irqrestore(&sc->txbuflock, flags); | ||
2625 | |||
2626 | bf->skb = skb; | ||
2627 | |||
2628 | if (ath5k_txbuf_setup(sc, bf)) { | ||
2629 | bf->skb = NULL; | ||
2630 | spin_lock_irqsave(&sc->txbuflock, flags); | ||
2631 | list_add_tail(&bf->list, &sc->txbuf); | ||
2632 | sc->txbuf_len++; | ||
2633 | spin_unlock_irqrestore(&sc->txbuflock, flags); | ||
2634 | goto drop_packet; | ||
2635 | } | ||
2636 | return NETDEV_TX_OK; | ||
2637 | |||
2638 | drop_packet: | ||
2639 | dev_kfree_skb_any(skb); | ||
2640 | return NETDEV_TX_OK; | ||
2641 | } | ||
2642 | |||
2643 | static int | ||
2644 | ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel) | ||
2645 | { | ||
2646 | struct ath5k_hw *ah = sc->ah; | ||
2647 | int ret; | ||
2648 | |||
2649 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); | ||
2650 | |||
2651 | if (stop) { | ||
2652 | ath5k_hw_set_imr(ah, 0); | ||
2653 | ath5k_txq_cleanup(sc); | ||
2654 | ath5k_rx_stop(sc); | ||
2655 | } | ||
2656 | ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); | ||
2657 | if (ret) { | ||
2658 | ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); | ||
2659 | goto err; | ||
2660 | } | ||
2661 | |||
2662 | ret = ath5k_rx_start(sc); | ||
2663 | if (ret) { | ||
2664 | ATH5K_ERR(sc, "can't start recv logic\n"); | ||
2665 | goto err; | ||
2666 | } | ||
2667 | |||
2668 | /* | ||
2669 | * Change channels and update the h/w rate map if we're switching; | ||
2670 | * e.g. 11a to 11b/g. | ||
2671 | * | ||
2672 | * We may be doing a reset in response to an ioctl that changes the | ||
2673 | * channel so update any state that might change as a result. | ||
2674 | * | ||
2675 | * XXX needed? | ||
2676 | */ | ||
2677 | /* ath5k_chan_change(sc, c); */ | ||
2678 | |||
2679 | ath5k_beacon_config(sc); | ||
2680 | /* intrs are enabled by ath5k_beacon_config */ | ||
2681 | |||
2682 | return 0; | ||
2683 | err: | ||
2684 | return ret; | ||
2685 | } | ||
2686 | |||
2687 | static int | ||
2688 | ath5k_reset_wake(struct ath5k_softc *sc) | ||
2689 | { | ||
2690 | int ret; | ||
2691 | |||
2692 | ret = ath5k_reset(sc, true, true); | ||
2693 | if (!ret) | ||
2694 | ieee80211_wake_queues(sc->hw); | ||
2695 | |||
2696 | return ret; | ||
2697 | } | ||
2698 | |||
2699 | static int ath5k_start(struct ieee80211_hw *hw) | ||
2700 | { | ||
2701 | return ath5k_init(hw->priv); | ||
2702 | } | ||
2703 | |||
2704 | static void ath5k_stop(struct ieee80211_hw *hw) | ||
2705 | { | ||
2706 | ath5k_stop_hw(hw->priv); | ||
2707 | } | ||
2708 | |||
2709 | static int ath5k_add_interface(struct ieee80211_hw *hw, | ||
2710 | struct ieee80211_if_init_conf *conf) | ||
2711 | { | ||
2712 | struct ath5k_softc *sc = hw->priv; | ||
2713 | int ret; | ||
2714 | |||
2715 | mutex_lock(&sc->lock); | ||
2716 | if (sc->vif) { | ||
2717 | ret = 0; | ||
2718 | goto end; | ||
2719 | } | ||
2720 | |||
2721 | sc->vif = conf->vif; | ||
2722 | |||
2723 | switch (conf->type) { | ||
2724 | case NL80211_IFTYPE_AP: | ||
2725 | case NL80211_IFTYPE_STATION: | ||
2726 | case NL80211_IFTYPE_ADHOC: | ||
2727 | case NL80211_IFTYPE_MESH_POINT: | ||
2728 | case NL80211_IFTYPE_MONITOR: | ||
2729 | sc->opmode = conf->type; | ||
2730 | break; | ||
2731 | default: | ||
2732 | ret = -EOPNOTSUPP; | ||
2733 | goto end; | ||
2734 | } | ||
2735 | |||
2736 | /* Set to a reasonable value. Note that this will | ||
2737 | * be set to mac80211's value at ath5k_config(). */ | ||
2738 | sc->bintval = 1000; | ||
2739 | ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); | ||
2740 | |||
2741 | ret = 0; | ||
2742 | end: | ||
2743 | mutex_unlock(&sc->lock); | ||
2744 | return ret; | ||
2745 | } | ||
2746 | |||
2747 | static void | ||
2748 | ath5k_remove_interface(struct ieee80211_hw *hw, | ||
2749 | struct ieee80211_if_init_conf *conf) | ||
2750 | { | ||
2751 | struct ath5k_softc *sc = hw->priv; | ||
2752 | u8 mac[ETH_ALEN] = {}; | ||
2753 | |||
2754 | mutex_lock(&sc->lock); | ||
2755 | if (sc->vif != conf->vif) | ||
2756 | goto end; | ||
2757 | |||
2758 | ath5k_hw_set_lladdr(sc->ah, mac); | ||
2759 | sc->vif = NULL; | ||
2760 | end: | ||
2761 | mutex_unlock(&sc->lock); | ||
2762 | } | ||
2763 | |||
2764 | /* | ||
2765 | * TODO: Phy disable/diversity etc | ||
2766 | */ | ||
2767 | static int | ||
2768 | ath5k_config(struct ieee80211_hw *hw, u32 changed) | ||
2769 | { | ||
2770 | struct ath5k_softc *sc = hw->priv; | ||
2771 | struct ieee80211_conf *conf = &hw->conf; | ||
2772 | int ret; | ||
2773 | |||
2774 | mutex_lock(&sc->lock); | ||
2775 | |||
2776 | sc->bintval = conf->beacon_int; | ||
2777 | sc->power_level = conf->power_level; | ||
2778 | |||
2779 | ret = ath5k_chan_set(sc, conf->channel); | ||
2780 | |||
2781 | mutex_unlock(&sc->lock); | ||
2782 | return ret; | ||
2783 | } | ||
2784 | |||
2785 | static int | ||
2786 | ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | ||
2787 | struct ieee80211_if_conf *conf) | ||
2788 | { | ||
2789 | struct ath5k_softc *sc = hw->priv; | ||
2790 | struct ath5k_hw *ah = sc->ah; | ||
2791 | int ret = 0; | ||
2792 | |||
2793 | mutex_lock(&sc->lock); | ||
2794 | if (sc->vif != vif) { | ||
2795 | ret = -EIO; | ||
2796 | goto unlock; | ||
2797 | } | ||
2798 | if (conf->changed & IEEE80211_IFCC_BSSID && conf->bssid) { | ||
2799 | /* Cache for later use during resets */ | ||
2800 | memcpy(ah->ah_bssid, conf->bssid, ETH_ALEN); | ||
2801 | /* XXX: assoc id is set to 0 for now, mac80211 doesn't have | ||
2802 | * a clean way of letting us retrieve this yet. */ | ||
2803 | ath5k_hw_set_associd(ah, ah->ah_bssid, 0); | ||
2804 | mmiowb(); | ||
2805 | } | ||
2806 | if (conf->changed & IEEE80211_IFCC_BEACON && | ||
2807 | (vif->type == NL80211_IFTYPE_ADHOC || | ||
2808 | vif->type == NL80211_IFTYPE_MESH_POINT || | ||
2809 | vif->type == NL80211_IFTYPE_AP)) { | ||
2810 | struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); | ||
2811 | if (!beacon) { | ||
2812 | ret = -ENOMEM; | ||
2813 | goto unlock; | ||
2814 | } | ||
2815 | ath5k_beacon_update(sc, beacon); | ||
2816 | } | ||
2817 | |||
2818 | unlock: | ||
2819 | mutex_unlock(&sc->lock); | ||
2820 | return ret; | ||
2821 | } | ||
2822 | |||
2823 | #define SUPPORTED_FIF_FLAGS \ | ||
2824 | FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ | ||
2825 | FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ | ||
2826 | FIF_BCN_PRBRESP_PROMISC | ||
2827 | /* | ||
2828 | * o always accept unicast, broadcast, and multicast traffic | ||
2829 | * o multicast traffic for all BSSIDs will be enabled if mac80211 | ||
2830 | * says it should be | ||
2831 | * o maintain current state of phy ofdm or phy cck error reception. | ||
2832 | * If the hardware detects any of these type of errors then | ||
2833 | * ath5k_hw_get_rx_filter() will pass to us the respective | ||
2834 | * hardware filters to be able to receive these type of frames. | ||
2835 | * o probe request frames are accepted only when operating in | ||
2836 | * hostap, adhoc, or monitor modes | ||
2837 | * o enable promiscuous mode according to the interface state | ||
2838 | * o accept beacons: | ||
2839 | * - when operating in adhoc mode so the 802.11 layer creates | ||
2840 | * node table entries for peers, | ||
2841 | * - when operating in station mode for collecting rssi data when | ||
2842 | * the station is otherwise quiet, or | ||
2843 | * - when scanning | ||
2844 | */ | ||
2845 | static void ath5k_configure_filter(struct ieee80211_hw *hw, | ||
2846 | unsigned int changed_flags, | ||
2847 | unsigned int *new_flags, | ||
2848 | int mc_count, struct dev_mc_list *mclist) | ||
2849 | { | ||
2850 | struct ath5k_softc *sc = hw->priv; | ||
2851 | struct ath5k_hw *ah = sc->ah; | ||
2852 | u32 mfilt[2], val, rfilt; | ||
2853 | u8 pos; | ||
2854 | int i; | ||
2855 | |||
2856 | mfilt[0] = 0; | ||
2857 | mfilt[1] = 0; | ||
2858 | |||
2859 | /* Only deal with supported flags */ | ||
2860 | changed_flags &= SUPPORTED_FIF_FLAGS; | ||
2861 | *new_flags &= SUPPORTED_FIF_FLAGS; | ||
2862 | |||
2863 | /* If HW detects any phy or radar errors, leave those filters on. | ||
2864 | * Also, always enable Unicast, Broadcasts and Multicast | ||
2865 | * XXX: move unicast, bssid broadcasts and multicast to mac80211 */ | ||
2866 | rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) | | ||
2867 | (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | | ||
2868 | AR5K_RX_FILTER_MCAST); | ||
2869 | |||
2870 | if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { | ||
2871 | if (*new_flags & FIF_PROMISC_IN_BSS) { | ||
2872 | rfilt |= AR5K_RX_FILTER_PROM; | ||
2873 | __set_bit(ATH_STAT_PROMISC, sc->status); | ||
2874 | } else { | ||
2875 | __clear_bit(ATH_STAT_PROMISC, sc->status); | ||
2876 | } | ||
2877 | } | ||
2878 | |||
2879 | /* Note, AR5K_RX_FILTER_MCAST is already enabled */ | ||
2880 | if (*new_flags & FIF_ALLMULTI) { | ||
2881 | mfilt[0] = ~0; | ||
2882 | mfilt[1] = ~0; | ||
2883 | } else { | ||
2884 | for (i = 0; i < mc_count; i++) { | ||
2885 | if (!mclist) | ||
2886 | break; | ||
2887 | /* calculate XOR of eight 6-bit values */ | ||
2888 | val = get_unaligned_le32(mclist->dmi_addr + 0); | ||
2889 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | ||
2890 | val = get_unaligned_le32(mclist->dmi_addr + 3); | ||
2891 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | ||
2892 | pos &= 0x3f; | ||
2893 | mfilt[pos / 32] |= (1 << (pos % 32)); | ||
2894 | /* XXX: we might be able to just do this instead, | ||
2895 | * but not sure, needs testing, if we do use this we'd | ||
2896 | * neet to inform below to not reset the mcast */ | ||
2897 | /* ath5k_hw_set_mcast_filterindex(ah, | ||
2898 | * mclist->dmi_addr[5]); */ | ||
2899 | mclist = mclist->next; | ||
2900 | } | ||
2901 | } | ||
2902 | |||
2903 | /* This is the best we can do */ | ||
2904 | if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)) | ||
2905 | rfilt |= AR5K_RX_FILTER_PHYERR; | ||
2906 | |||
2907 | /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons | ||
2908 | * and probes for any BSSID, this needs testing */ | ||
2909 | if (*new_flags & FIF_BCN_PRBRESP_PROMISC) | ||
2910 | rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; | ||
2911 | |||
2912 | /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not | ||
2913 | * set we should only pass on control frames for this | ||
2914 | * station. This needs testing. I believe right now this | ||
2915 | * enables *all* control frames, which is OK.. but | ||
2916 | * but we should see if we can improve on granularity */ | ||
2917 | if (*new_flags & FIF_CONTROL) | ||
2918 | rfilt |= AR5K_RX_FILTER_CONTROL; | ||
2919 | |||
2920 | /* Additional settings per mode -- this is per ath5k */ | ||
2921 | |||
2922 | /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ | ||
2923 | |||
2924 | if (sc->opmode == NL80211_IFTYPE_MONITOR) | ||
2925 | rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | | ||
2926 | AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; | ||
2927 | if (sc->opmode != NL80211_IFTYPE_STATION) | ||
2928 | rfilt |= AR5K_RX_FILTER_PROBEREQ; | ||
2929 | if (sc->opmode != NL80211_IFTYPE_AP && | ||
2930 | sc->opmode != NL80211_IFTYPE_MESH_POINT && | ||
2931 | test_bit(ATH_STAT_PROMISC, sc->status)) | ||
2932 | rfilt |= AR5K_RX_FILTER_PROM; | ||
2933 | if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) || | ||
2934 | sc->opmode == NL80211_IFTYPE_ADHOC || | ||
2935 | sc->opmode == NL80211_IFTYPE_AP) | ||
2936 | rfilt |= AR5K_RX_FILTER_BEACON; | ||
2937 | if (sc->opmode == NL80211_IFTYPE_MESH_POINT) | ||
2938 | rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | | ||
2939 | AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; | ||
2940 | |||
2941 | /* Set filters */ | ||
2942 | ath5k_hw_set_rx_filter(ah, rfilt); | ||
2943 | |||
2944 | /* Set multicast bits */ | ||
2945 | ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); | ||
2946 | /* Set the cached hw filter flags, this will alter actually | ||
2947 | * be set in HW */ | ||
2948 | sc->filter_flags = rfilt; | ||
2949 | } | ||
2950 | |||
2951 | static int | ||
2952 | ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | ||
2953 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, | ||
2954 | struct ieee80211_key_conf *key) | ||
2955 | { | ||
2956 | struct ath5k_softc *sc = hw->priv; | ||
2957 | int ret = 0; | ||
2958 | |||
2959 | if (modparam_nohwcrypt) | ||
2960 | return -EOPNOTSUPP; | ||
2961 | |||
2962 | switch (key->alg) { | ||
2963 | case ALG_WEP: | ||
2964 | case ALG_TKIP: | ||
2965 | break; | ||
2966 | case ALG_CCMP: | ||
2967 | return -EOPNOTSUPP; | ||
2968 | default: | ||
2969 | WARN_ON(1); | ||
2970 | return -EINVAL; | ||
2971 | } | ||
2972 | |||
2973 | mutex_lock(&sc->lock); | ||
2974 | |||
2975 | switch (cmd) { | ||
2976 | case SET_KEY: | ||
2977 | ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, | ||
2978 | sta ? sta->addr : NULL); | ||
2979 | if (ret) { | ||
2980 | ATH5K_ERR(sc, "can't set the key\n"); | ||
2981 | goto unlock; | ||
2982 | } | ||
2983 | __set_bit(key->keyidx, sc->keymap); | ||
2984 | key->hw_key_idx = key->keyidx; | ||
2985 | key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV | | ||
2986 | IEEE80211_KEY_FLAG_GENERATE_MMIC); | ||
2987 | break; | ||
2988 | case DISABLE_KEY: | ||
2989 | ath5k_hw_reset_key(sc->ah, key->keyidx); | ||
2990 | __clear_bit(key->keyidx, sc->keymap); | ||
2991 | break; | ||
2992 | default: | ||
2993 | ret = -EINVAL; | ||
2994 | goto unlock; | ||
2995 | } | ||
2996 | |||
2997 | unlock: | ||
2998 | mmiowb(); | ||
2999 | mutex_unlock(&sc->lock); | ||
3000 | return ret; | ||
3001 | } | ||
3002 | |||
3003 | static int | ||
3004 | ath5k_get_stats(struct ieee80211_hw *hw, | ||
3005 | struct ieee80211_low_level_stats *stats) | ||
3006 | { | ||
3007 | struct ath5k_softc *sc = hw->priv; | ||
3008 | struct ath5k_hw *ah = sc->ah; | ||
3009 | |||
3010 | /* Force update */ | ||
3011 | ath5k_hw_update_mib_counters(ah, &sc->ll_stats); | ||
3012 | |||
3013 | memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); | ||
3014 | |||
3015 | return 0; | ||
3016 | } | ||
3017 | |||
3018 | static int | ||
3019 | ath5k_get_tx_stats(struct ieee80211_hw *hw, | ||
3020 | struct ieee80211_tx_queue_stats *stats) | ||
3021 | { | ||
3022 | struct ath5k_softc *sc = hw->priv; | ||
3023 | |||
3024 | memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats)); | ||
3025 | |||
3026 | return 0; | ||
3027 | } | ||
3028 | |||
3029 | static u64 | ||
3030 | ath5k_get_tsf(struct ieee80211_hw *hw) | ||
3031 | { | ||
3032 | struct ath5k_softc *sc = hw->priv; | ||
3033 | |||
3034 | return ath5k_hw_get_tsf64(sc->ah); | ||
3035 | } | ||
3036 | |||
3037 | static void | ||
3038 | ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf) | ||
3039 | { | ||
3040 | struct ath5k_softc *sc = hw->priv; | ||
3041 | |||
3042 | ath5k_hw_set_tsf64(sc->ah, tsf); | ||
3043 | } | ||
3044 | |||
3045 | static void | ||
3046 | ath5k_reset_tsf(struct ieee80211_hw *hw) | ||
3047 | { | ||
3048 | struct ath5k_softc *sc = hw->priv; | ||
3049 | |||
3050 | /* | ||
3051 | * in IBSS mode we need to update the beacon timers too. | ||
3052 | * this will also reset the TSF if we call it with 0 | ||
3053 | */ | ||
3054 | if (sc->opmode == NL80211_IFTYPE_ADHOC) | ||
3055 | ath5k_beacon_update_timers(sc, 0); | ||
3056 | else | ||
3057 | ath5k_hw_reset_tsf(sc->ah); | ||
3058 | } | ||
3059 | |||
3060 | static int | ||
3061 | ath5k_beacon_update(struct ath5k_softc *sc, struct sk_buff *skb) | ||
3062 | { | ||
3063 | unsigned long flags; | ||
3064 | int ret; | ||
3065 | |||
3066 | ath5k_debug_dump_skb(sc, skb, "BC ", 1); | ||
3067 | |||
3068 | spin_lock_irqsave(&sc->block, flags); | ||
3069 | ath5k_txbuf_free(sc, sc->bbuf); | ||
3070 | sc->bbuf->skb = skb; | ||
3071 | ret = ath5k_beacon_setup(sc, sc->bbuf); | ||
3072 | if (ret) | ||
3073 | sc->bbuf->skb = NULL; | ||
3074 | spin_unlock_irqrestore(&sc->block, flags); | ||
3075 | if (!ret) { | ||
3076 | ath5k_beacon_config(sc); | ||
3077 | mmiowb(); | ||
3078 | } | ||
3079 | |||
3080 | return ret; | ||
3081 | } | ||
3082 | static void | ||
3083 | set_beacon_filter(struct ieee80211_hw *hw, bool enable) | ||
3084 | { | ||
3085 | struct ath5k_softc *sc = hw->priv; | ||
3086 | struct ath5k_hw *ah = sc->ah; | ||
3087 | u32 rfilt; | ||
3088 | rfilt = ath5k_hw_get_rx_filter(ah); | ||
3089 | if (enable) | ||
3090 | rfilt |= AR5K_RX_FILTER_BEACON; | ||
3091 | else | ||
3092 | rfilt &= ~AR5K_RX_FILTER_BEACON; | ||
3093 | ath5k_hw_set_rx_filter(ah, rfilt); | ||
3094 | sc->filter_flags = rfilt; | ||
3095 | } | ||
3096 | |||
3097 | static void ath5k_bss_info_changed(struct ieee80211_hw *hw, | ||
3098 | struct ieee80211_vif *vif, | ||
3099 | struct ieee80211_bss_conf *bss_conf, | ||
3100 | u32 changes) | ||
3101 | { | ||
3102 | struct ath5k_softc *sc = hw->priv; | ||
3103 | if (changes & BSS_CHANGED_ASSOC) { | ||
3104 | mutex_lock(&sc->lock); | ||
3105 | sc->assoc = bss_conf->assoc; | ||
3106 | if (sc->opmode == NL80211_IFTYPE_STATION) | ||
3107 | set_beacon_filter(hw, sc->assoc); | ||
3108 | mutex_unlock(&sc->lock); | ||
3109 | } | ||
3110 | } | ||