diff options
Diffstat (limited to 'drivers/net/wireless/ath5k/base.c')
-rw-r--r-- | drivers/net/wireless/ath5k/base.c | 2817 |
1 files changed, 2817 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c new file mode 100644 index 000000000000..d3d37282f3dc --- /dev/null +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -0,0 +1,2817 @@ | |||
1 | /*- | ||
2 | * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting | ||
3 | * Copyright (c) 2004-2005 Atheros Communications, Inc. | ||
4 | * Copyright (c) 2006 Devicescape Software, Inc. | ||
5 | * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> | ||
6 | * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or without | ||
11 | * modification, are permitted provided that the following conditions | ||
12 | * are met: | ||
13 | * 1. Redistributions of source code must retain the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer, | ||
15 | * without modification. | ||
16 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
17 | * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any | ||
18 | * redistribution must be conditioned upon including a substantially | ||
19 | * similar Disclaimer requirement for further binary redistribution. | ||
20 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
21 | * of any contributors may be used to endorse or promote products derived | ||
22 | * from this software without specific prior written permission. | ||
23 | * | ||
24 | * Alternatively, this software may be distributed under the terms of the | ||
25 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
26 | * Software Foundation. | ||
27 | * | ||
28 | * NO WARRANTY | ||
29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
30 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
31 | * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY | ||
32 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL | ||
33 | * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, | ||
34 | * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
36 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
37 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | ||
39 | * THE POSSIBILITY OF SUCH DAMAGES. | ||
40 | * | ||
41 | */ | ||
42 | |||
43 | #include <linux/version.h> | ||
44 | #include <linux/module.h> | ||
45 | #include <linux/delay.h> | ||
46 | #include <linux/if.h> | ||
47 | #include <linux/netdevice.h> | ||
48 | #include <linux/cache.h> | ||
49 | #include <linux/pci.h> | ||
50 | #include <linux/ethtool.h> | ||
51 | #include <linux/uaccess.h> | ||
52 | |||
53 | #include <net/ieee80211_radiotap.h> | ||
54 | |||
55 | #include <asm/unaligned.h> | ||
56 | |||
57 | #include "base.h" | ||
58 | #include "reg.h" | ||
59 | #include "debug.h" | ||
60 | |||
61 | /* unaligned little endian access */ | ||
62 | #define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p)))) | ||
63 | #define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p)))) | ||
64 | |||
65 | enum { | ||
66 | ATH_LED_TX, | ||
67 | ATH_LED_RX, | ||
68 | }; | ||
69 | |||
70 | static int ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */ | ||
71 | |||
72 | |||
73 | /******************\ | ||
74 | * Internal defines * | ||
75 | \******************/ | ||
76 | |||
77 | /* Module info */ | ||
78 | MODULE_AUTHOR("Jiri Slaby"); | ||
79 | MODULE_AUTHOR("Nick Kossifidis"); | ||
80 | MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); | ||
81 | MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); | ||
82 | MODULE_LICENSE("Dual BSD/GPL"); | ||
83 | MODULE_VERSION("0.1.1 (EXPERIMENTAL)"); | ||
84 | |||
85 | |||
86 | /* Known PCI ids */ | ||
87 | static struct pci_device_id ath5k_pci_id_table[] __devinitdata = { | ||
88 | { PCI_VDEVICE(ATHEROS, 0x0207), .driver_data = AR5K_AR5210 }, /* 5210 early */ | ||
89 | { PCI_VDEVICE(ATHEROS, 0x0007), .driver_data = AR5K_AR5210 }, /* 5210 */ | ||
90 | { PCI_VDEVICE(ATHEROS, 0x0011), .driver_data = AR5K_AR5211 }, /* 5311 - this is on AHB bus !*/ | ||
91 | { PCI_VDEVICE(ATHEROS, 0x0012), .driver_data = AR5K_AR5211 }, /* 5211 */ | ||
92 | { PCI_VDEVICE(ATHEROS, 0x0013), .driver_data = AR5K_AR5212 }, /* 5212 */ | ||
93 | { PCI_VDEVICE(3COM_2, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 5212 */ | ||
94 | { PCI_VDEVICE(3COM, 0x0013), .driver_data = AR5K_AR5212 }, /* 3com 3CRDAG675 5212 */ | ||
95 | { PCI_VDEVICE(ATHEROS, 0x1014), .driver_data = AR5K_AR5212 }, /* IBM minipci 5212 */ | ||
96 | { PCI_VDEVICE(ATHEROS, 0x0014), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
97 | { PCI_VDEVICE(ATHEROS, 0x0015), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
98 | { PCI_VDEVICE(ATHEROS, 0x0016), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
99 | { PCI_VDEVICE(ATHEROS, 0x0017), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
100 | { PCI_VDEVICE(ATHEROS, 0x0018), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
101 | { PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ | ||
102 | { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ | ||
103 | { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ | ||
104 | { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ | ||
105 | { PCI_VDEVICE(ATHEROS, 0x0023), .driver_data = AR5K_AR5212 }, /* 5416 */ | ||
106 | { PCI_VDEVICE(ATHEROS, 0x0024), .driver_data = AR5K_AR5212 }, /* 5418 */ | ||
107 | { 0 } | ||
108 | }; | ||
109 | MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); | ||
110 | |||
111 | /* Known SREVs */ | ||
112 | static struct ath5k_srev_name srev_names[] = { | ||
113 | { "5210", AR5K_VERSION_VER, AR5K_SREV_VER_AR5210 }, | ||
114 | { "5311", AR5K_VERSION_VER, AR5K_SREV_VER_AR5311 }, | ||
115 | { "5311A", AR5K_VERSION_VER, AR5K_SREV_VER_AR5311A }, | ||
116 | { "5311B", AR5K_VERSION_VER, AR5K_SREV_VER_AR5311B }, | ||
117 | { "5211", AR5K_VERSION_VER, AR5K_SREV_VER_AR5211 }, | ||
118 | { "5212", AR5K_VERSION_VER, AR5K_SREV_VER_AR5212 }, | ||
119 | { "5213", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213 }, | ||
120 | { "5213A", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213A }, | ||
121 | { "2424", AR5K_VERSION_VER, AR5K_SREV_VER_AR2424 }, | ||
122 | { "5424", AR5K_VERSION_VER, AR5K_SREV_VER_AR5424 }, | ||
123 | { "5413", AR5K_VERSION_VER, AR5K_SREV_VER_AR5413 }, | ||
124 | { "5414", AR5K_VERSION_VER, AR5K_SREV_VER_AR5414 }, | ||
125 | { "5416", AR5K_VERSION_VER, AR5K_SREV_VER_AR5416 }, | ||
126 | { "5418", AR5K_VERSION_VER, AR5K_SREV_VER_AR5418 }, | ||
127 | { "xxxxx", AR5K_VERSION_VER, AR5K_SREV_UNKNOWN }, | ||
128 | { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, | ||
129 | { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, | ||
130 | { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, | ||
131 | { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, | ||
132 | { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, | ||
133 | { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, | ||
134 | { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, | ||
135 | { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC1 }, | ||
136 | { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC2 }, | ||
137 | { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, | ||
138 | { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, | ||
139 | }; | ||
140 | |||
141 | /* | ||
142 | * Prototypes - PCI stack related functions | ||
143 | */ | ||
144 | static int __devinit ath5k_pci_probe(struct pci_dev *pdev, | ||
145 | const struct pci_device_id *id); | ||
146 | static void __devexit ath5k_pci_remove(struct pci_dev *pdev); | ||
147 | #ifdef CONFIG_PM | ||
148 | static int ath5k_pci_suspend(struct pci_dev *pdev, | ||
149 | pm_message_t state); | ||
150 | static int ath5k_pci_resume(struct pci_dev *pdev); | ||
151 | #else | ||
152 | #define ath5k_pci_suspend NULL | ||
153 | #define ath5k_pci_resume NULL | ||
154 | #endif /* CONFIG_PM */ | ||
155 | |||
156 | static struct pci_driver ath5k_pci_drv_id = { | ||
157 | .name = "ath5k_pci", | ||
158 | .id_table = ath5k_pci_id_table, | ||
159 | .probe = ath5k_pci_probe, | ||
160 | .remove = __devexit_p(ath5k_pci_remove), | ||
161 | .suspend = ath5k_pci_suspend, | ||
162 | .resume = ath5k_pci_resume, | ||
163 | }; | ||
164 | |||
165 | |||
166 | |||
167 | /* | ||
168 | * Prototypes - MAC 802.11 stack related functions | ||
169 | */ | ||
170 | static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
171 | struct ieee80211_tx_control *ctl); | ||
172 | static int ath5k_reset(struct ieee80211_hw *hw); | ||
173 | static int ath5k_start(struct ieee80211_hw *hw); | ||
174 | static void ath5k_stop(struct ieee80211_hw *hw); | ||
175 | static int ath5k_add_interface(struct ieee80211_hw *hw, | ||
176 | struct ieee80211_if_init_conf *conf); | ||
177 | static void ath5k_remove_interface(struct ieee80211_hw *hw, | ||
178 | struct ieee80211_if_init_conf *conf); | ||
179 | static int ath5k_config(struct ieee80211_hw *hw, | ||
180 | struct ieee80211_conf *conf); | ||
181 | static int ath5k_config_interface(struct ieee80211_hw *hw, int if_id, | ||
182 | struct ieee80211_if_conf *conf); | ||
183 | static void ath5k_configure_filter(struct ieee80211_hw *hw, | ||
184 | unsigned int changed_flags, | ||
185 | unsigned int *new_flags, | ||
186 | int mc_count, struct dev_mc_list *mclist); | ||
187 | static int ath5k_set_key(struct ieee80211_hw *hw, | ||
188 | enum set_key_cmd cmd, | ||
189 | const u8 *local_addr, const u8 *addr, | ||
190 | struct ieee80211_key_conf *key); | ||
191 | static int ath5k_get_stats(struct ieee80211_hw *hw, | ||
192 | struct ieee80211_low_level_stats *stats); | ||
193 | static int ath5k_get_tx_stats(struct ieee80211_hw *hw, | ||
194 | struct ieee80211_tx_queue_stats *stats); | ||
195 | static u64 ath5k_get_tsf(struct ieee80211_hw *hw); | ||
196 | static void ath5k_reset_tsf(struct ieee80211_hw *hw); | ||
197 | static int ath5k_beacon_update(struct ieee80211_hw *hw, | ||
198 | struct sk_buff *skb, | ||
199 | struct ieee80211_tx_control *ctl); | ||
200 | |||
201 | static struct ieee80211_ops ath5k_hw_ops = { | ||
202 | .tx = ath5k_tx, | ||
203 | .start = ath5k_start, | ||
204 | .stop = ath5k_stop, | ||
205 | .add_interface = ath5k_add_interface, | ||
206 | .remove_interface = ath5k_remove_interface, | ||
207 | .config = ath5k_config, | ||
208 | .config_interface = ath5k_config_interface, | ||
209 | .configure_filter = ath5k_configure_filter, | ||
210 | .set_key = ath5k_set_key, | ||
211 | .get_stats = ath5k_get_stats, | ||
212 | .conf_tx = NULL, | ||
213 | .get_tx_stats = ath5k_get_tx_stats, | ||
214 | .get_tsf = ath5k_get_tsf, | ||
215 | .reset_tsf = ath5k_reset_tsf, | ||
216 | .beacon_update = ath5k_beacon_update, | ||
217 | }; | ||
218 | |||
219 | /* | ||
220 | * Prototypes - Internal functions | ||
221 | */ | ||
222 | /* Attach detach */ | ||
223 | static int ath5k_attach(struct pci_dev *pdev, | ||
224 | struct ieee80211_hw *hw); | ||
225 | static void ath5k_detach(struct pci_dev *pdev, | ||
226 | struct ieee80211_hw *hw); | ||
227 | /* Channel/mode setup */ | ||
228 | static inline short ath5k_ieee2mhz(short chan); | ||
229 | static unsigned int ath5k_copy_rates(struct ieee80211_rate *rates, | ||
230 | const struct ath5k_rate_table *rt, | ||
231 | unsigned int max); | ||
232 | static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, | ||
233 | struct ieee80211_channel *channels, | ||
234 | unsigned int mode, | ||
235 | unsigned int max); | ||
236 | static int ath5k_getchannels(struct ieee80211_hw *hw); | ||
237 | static int ath5k_chan_set(struct ath5k_softc *sc, | ||
238 | struct ieee80211_channel *chan); | ||
239 | static void ath5k_setcurmode(struct ath5k_softc *sc, | ||
240 | unsigned int mode); | ||
241 | static void ath5k_mode_setup(struct ath5k_softc *sc); | ||
242 | /* Descriptor setup */ | ||
243 | static int ath5k_desc_alloc(struct ath5k_softc *sc, | ||
244 | struct pci_dev *pdev); | ||
245 | static void ath5k_desc_free(struct ath5k_softc *sc, | ||
246 | struct pci_dev *pdev); | ||
247 | /* Buffers setup */ | ||
248 | static int ath5k_rxbuf_setup(struct ath5k_softc *sc, | ||
249 | struct ath5k_buf *bf); | ||
250 | static int ath5k_txbuf_setup(struct ath5k_softc *sc, | ||
251 | struct ath5k_buf *bf, | ||
252 | struct ieee80211_tx_control *ctl); | ||
253 | |||
254 | static inline void ath5k_txbuf_free(struct ath5k_softc *sc, | ||
255 | struct ath5k_buf *bf) | ||
256 | { | ||
257 | BUG_ON(!bf); | ||
258 | if (!bf->skb) | ||
259 | return; | ||
260 | pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, | ||
261 | PCI_DMA_TODEVICE); | ||
262 | dev_kfree_skb(bf->skb); | ||
263 | bf->skb = NULL; | ||
264 | } | ||
265 | |||
266 | /* Queues setup */ | ||
267 | static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, | ||
268 | int qtype, int subtype); | ||
269 | static int ath5k_beaconq_setup(struct ath5k_hw *ah); | ||
270 | static int ath5k_beaconq_config(struct ath5k_softc *sc); | ||
271 | static void ath5k_txq_drainq(struct ath5k_softc *sc, | ||
272 | struct ath5k_txq *txq); | ||
273 | static void ath5k_txq_cleanup(struct ath5k_softc *sc); | ||
274 | static void ath5k_txq_release(struct ath5k_softc *sc); | ||
275 | /* Rx handling */ | ||
276 | static int ath5k_rx_start(struct ath5k_softc *sc); | ||
277 | static void ath5k_rx_stop(struct ath5k_softc *sc); | ||
278 | static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, | ||
279 | struct ath5k_desc *ds, | ||
280 | struct sk_buff *skb); | ||
281 | static void ath5k_tasklet_rx(unsigned long data); | ||
282 | /* Tx handling */ | ||
283 | static void ath5k_tx_processq(struct ath5k_softc *sc, | ||
284 | struct ath5k_txq *txq); | ||
285 | static void ath5k_tasklet_tx(unsigned long data); | ||
286 | /* Beacon handling */ | ||
287 | static int ath5k_beacon_setup(struct ath5k_softc *sc, | ||
288 | struct ath5k_buf *bf, | ||
289 | struct ieee80211_tx_control *ctl); | ||
290 | static void ath5k_beacon_send(struct ath5k_softc *sc); | ||
291 | static void ath5k_beacon_config(struct ath5k_softc *sc); | ||
292 | |||
293 | static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) | ||
294 | { | ||
295 | u64 tsf = ath5k_hw_get_tsf64(ah); | ||
296 | |||
297 | if ((tsf & 0x7fff) < rstamp) | ||
298 | tsf -= 0x8000; | ||
299 | |||
300 | return (tsf & ~0x7fff) | rstamp; | ||
301 | } | ||
302 | |||
303 | /* Interrupt handling */ | ||
304 | static int ath5k_init(struct ath5k_softc *sc); | ||
305 | static int ath5k_stop_locked(struct ath5k_softc *sc); | ||
306 | static int ath5k_stop_hw(struct ath5k_softc *sc); | ||
307 | static irqreturn_t ath5k_intr(int irq, void *dev_id); | ||
308 | static void ath5k_tasklet_reset(unsigned long data); | ||
309 | |||
310 | static void ath5k_calibrate(unsigned long data); | ||
311 | /* LED functions */ | ||
312 | static void ath5k_led_off(unsigned long data); | ||
313 | static void ath5k_led_blink(struct ath5k_softc *sc, | ||
314 | unsigned int on, | ||
315 | unsigned int off); | ||
316 | static void ath5k_led_event(struct ath5k_softc *sc, | ||
317 | int event); | ||
318 | |||
319 | |||
320 | /* | ||
321 | * Module init/exit functions | ||
322 | */ | ||
323 | static int __init | ||
324 | init_ath5k_pci(void) | ||
325 | { | ||
326 | int ret; | ||
327 | |||
328 | ath5k_debug_init(); | ||
329 | |||
330 | ret = pci_register_driver(&ath5k_pci_drv_id); | ||
331 | if (ret) { | ||
332 | printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); | ||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static void __exit | ||
340 | exit_ath5k_pci(void) | ||
341 | { | ||
342 | pci_unregister_driver(&ath5k_pci_drv_id); | ||
343 | |||
344 | ath5k_debug_finish(); | ||
345 | } | ||
346 | |||
347 | module_init(init_ath5k_pci); | ||
348 | module_exit(exit_ath5k_pci); | ||
349 | |||
350 | |||
351 | /********************\ | ||
352 | * PCI Initialization * | ||
353 | \********************/ | ||
354 | |||
355 | static const char * | ||
356 | ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) | ||
357 | { | ||
358 | const char *name = "xxxxx"; | ||
359 | unsigned int i; | ||
360 | |||
361 | for (i = 0; i < ARRAY_SIZE(srev_names); i++) { | ||
362 | if (srev_names[i].sr_type != type) | ||
363 | continue; | ||
364 | if ((val & 0xff) < srev_names[i + 1].sr_val) { | ||
365 | name = srev_names[i].sr_name; | ||
366 | break; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | return name; | ||
371 | } | ||
372 | |||
373 | static int __devinit | ||
374 | ath5k_pci_probe(struct pci_dev *pdev, | ||
375 | const struct pci_device_id *id) | ||
376 | { | ||
377 | void __iomem *mem; | ||
378 | struct ath5k_softc *sc; | ||
379 | struct ieee80211_hw *hw; | ||
380 | int ret; | ||
381 | u8 csz; | ||
382 | |||
383 | ret = pci_enable_device(pdev); | ||
384 | if (ret) { | ||
385 | dev_err(&pdev->dev, "can't enable device\n"); | ||
386 | goto err; | ||
387 | } | ||
388 | |||
389 | /* XXX 32-bit addressing only */ | ||
390 | ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
391 | if (ret) { | ||
392 | dev_err(&pdev->dev, "32-bit DMA not available\n"); | ||
393 | goto err_dis; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Cache line size is used to size and align various | ||
398 | * structures used to communicate with the hardware. | ||
399 | */ | ||
400 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); | ||
401 | if (csz == 0) { | ||
402 | /* | ||
403 | * Linux 2.4.18 (at least) writes the cache line size | ||
404 | * register as a 16-bit wide register which is wrong. | ||
405 | * We must have this setup properly for rx buffer | ||
406 | * DMA to work so force a reasonable value here if it | ||
407 | * comes up zero. | ||
408 | */ | ||
409 | csz = L1_CACHE_BYTES / sizeof(u32); | ||
410 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); | ||
411 | } | ||
412 | /* | ||
413 | * The default setting of latency timer yields poor results, | ||
414 | * set it to the value used by other systems. It may be worth | ||
415 | * tweaking this setting more. | ||
416 | */ | ||
417 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); | ||
418 | |||
419 | /* Enable bus mastering */ | ||
420 | pci_set_master(pdev); | ||
421 | |||
422 | /* | ||
423 | * Disable the RETRY_TIMEOUT register (0x41) to keep | ||
424 | * PCI Tx retries from interfering with C3 CPU state. | ||
425 | */ | ||
426 | pci_write_config_byte(pdev, 0x41, 0); | ||
427 | |||
428 | ret = pci_request_region(pdev, 0, "ath5k"); | ||
429 | if (ret) { | ||
430 | dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); | ||
431 | goto err_dis; | ||
432 | } | ||
433 | |||
434 | mem = pci_iomap(pdev, 0, 0); | ||
435 | if (!mem) { | ||
436 | dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; | ||
437 | ret = -EIO; | ||
438 | goto err_reg; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Allocate hw (mac80211 main struct) | ||
443 | * and hw->priv (driver private data) | ||
444 | */ | ||
445 | hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops); | ||
446 | if (hw == NULL) { | ||
447 | dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); | ||
448 | ret = -ENOMEM; | ||
449 | goto err_map; | ||
450 | } | ||
451 | |||
452 | dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy)); | ||
453 | |||
454 | /* Initialize driver private data */ | ||
455 | SET_IEEE80211_DEV(hw, &pdev->dev); | ||
456 | hw->flags = IEEE80211_HW_RX_INCLUDES_FCS; | ||
457 | hw->extra_tx_headroom = 2; | ||
458 | hw->channel_change_time = 5000; | ||
459 | /* these names are misleading */ | ||
460 | hw->max_rssi = -110; /* signal in dBm */ | ||
461 | hw->max_noise = -110; /* noise in dBm */ | ||
462 | hw->max_signal = 100; /* we will provide a percentage based on rssi */ | ||
463 | sc = hw->priv; | ||
464 | sc->hw = hw; | ||
465 | sc->pdev = pdev; | ||
466 | |||
467 | ath5k_debug_init_device(sc); | ||
468 | |||
469 | /* | ||
470 | * Mark the device as detached to avoid processing | ||
471 | * interrupts until setup is complete. | ||
472 | */ | ||
473 | __set_bit(ATH_STAT_INVALID, sc->status); | ||
474 | |||
475 | sc->iobase = mem; /* So we can unmap it on detach */ | ||
476 | sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ | ||
477 | sc->opmode = IEEE80211_IF_TYPE_STA; | ||
478 | mutex_init(&sc->lock); | ||
479 | spin_lock_init(&sc->rxbuflock); | ||
480 | spin_lock_init(&sc->txbuflock); | ||
481 | |||
482 | /* Set private data */ | ||
483 | pci_set_drvdata(pdev, hw); | ||
484 | |||
485 | /* Enable msi for devices that support it */ | ||
486 | pci_enable_msi(pdev); | ||
487 | |||
488 | /* Setup interrupt handler */ | ||
489 | ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); | ||
490 | if (ret) { | ||
491 | ATH5K_ERR(sc, "request_irq failed\n"); | ||
492 | goto err_free; | ||
493 | } | ||
494 | |||
495 | /* Initialize device */ | ||
496 | sc->ah = ath5k_hw_attach(sc, id->driver_data); | ||
497 | if (IS_ERR(sc->ah)) { | ||
498 | ret = PTR_ERR(sc->ah); | ||
499 | goto err_irq; | ||
500 | } | ||
501 | |||
502 | /* Finish private driver data initialization */ | ||
503 | ret = ath5k_attach(pdev, hw); | ||
504 | if (ret) | ||
505 | goto err_ah; | ||
506 | |||
507 | ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", | ||
508 | ath5k_chip_name(AR5K_VERSION_VER,sc->ah->ah_mac_srev), | ||
509 | sc->ah->ah_mac_srev, | ||
510 | sc->ah->ah_phy_revision); | ||
511 | |||
512 | if(!sc->ah->ah_single_chip){ | ||
513 | /* Single chip radio (!RF5111) */ | ||
514 | if(sc->ah->ah_radio_5ghz_revision && !sc->ah->ah_radio_2ghz_revision) { | ||
515 | /* No 5GHz support -> report 2GHz radio */ | ||
516 | if(!test_bit(MODE_IEEE80211A, sc->ah->ah_capabilities.cap_mode)){ | ||
517 | ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", | ||
518 | ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), | ||
519 | sc->ah->ah_radio_5ghz_revision); | ||
520 | /* No 2GHz support (5110 and some 5Ghz only cards) -> report 5Ghz radio */ | ||
521 | } else if(!test_bit(MODE_IEEE80211B, sc->ah->ah_capabilities.cap_mode)){ | ||
522 | ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", | ||
523 | ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), | ||
524 | sc->ah->ah_radio_5ghz_revision); | ||
525 | /* Multiband radio */ | ||
526 | } else { | ||
527 | ATH5K_INFO(sc, "RF%s multiband radio found" | ||
528 | " (0x%x)\n", | ||
529 | ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), | ||
530 | sc->ah->ah_radio_5ghz_revision); | ||
531 | } | ||
532 | } | ||
533 | /* Multi chip radio (RF5111 - RF2111) -> report both 2GHz/5GHz radios */ | ||
534 | else if(sc->ah->ah_radio_5ghz_revision && sc->ah->ah_radio_2ghz_revision){ | ||
535 | ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", | ||
536 | ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), | ||
537 | sc->ah->ah_radio_5ghz_revision); | ||
538 | ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", | ||
539 | ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_2ghz_revision), | ||
540 | sc->ah->ah_radio_2ghz_revision); | ||
541 | } | ||
542 | } | ||
543 | |||
544 | |||
545 | /* ready to process interrupts */ | ||
546 | __clear_bit(ATH_STAT_INVALID, sc->status); | ||
547 | |||
548 | return 0; | ||
549 | err_ah: | ||
550 | ath5k_hw_detach(sc->ah); | ||
551 | err_irq: | ||
552 | free_irq(pdev->irq, sc); | ||
553 | err_free: | ||
554 | pci_disable_msi(pdev); | ||
555 | ieee80211_free_hw(hw); | ||
556 | err_map: | ||
557 | pci_iounmap(pdev, mem); | ||
558 | err_reg: | ||
559 | pci_release_region(pdev, 0); | ||
560 | err_dis: | ||
561 | pci_disable_device(pdev); | ||
562 | err: | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static void __devexit | ||
567 | ath5k_pci_remove(struct pci_dev *pdev) | ||
568 | { | ||
569 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
570 | struct ath5k_softc *sc = hw->priv; | ||
571 | |||
572 | ath5k_debug_finish_device(sc); | ||
573 | ath5k_detach(pdev, hw); | ||
574 | ath5k_hw_detach(sc->ah); | ||
575 | free_irq(pdev->irq, sc); | ||
576 | pci_disable_msi(pdev); | ||
577 | pci_iounmap(pdev, sc->iobase); | ||
578 | pci_release_region(pdev, 0); | ||
579 | pci_disable_device(pdev); | ||
580 | ieee80211_free_hw(hw); | ||
581 | } | ||
582 | |||
583 | #ifdef CONFIG_PM | ||
584 | static int | ||
585 | ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
586 | { | ||
587 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
588 | struct ath5k_softc *sc = hw->priv; | ||
589 | |||
590 | if (test_bit(ATH_STAT_LEDSOFT, sc->status)) | ||
591 | ath5k_hw_set_gpio(sc->ah, sc->led_pin, 1); | ||
592 | |||
593 | ath5k_stop_hw(sc); | ||
594 | pci_save_state(pdev); | ||
595 | pci_disable_device(pdev); | ||
596 | pci_set_power_state(pdev, PCI_D3hot); | ||
597 | |||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static int | ||
602 | ath5k_pci_resume(struct pci_dev *pdev) | ||
603 | { | ||
604 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
605 | struct ath5k_softc *sc = hw->priv; | ||
606 | int err; | ||
607 | |||
608 | err = pci_set_power_state(pdev, PCI_D0); | ||
609 | if (err) | ||
610 | return err; | ||
611 | |||
612 | err = pci_enable_device(pdev); | ||
613 | if (err) | ||
614 | return err; | ||
615 | |||
616 | pci_restore_state(pdev); | ||
617 | /* | ||
618 | * Suspend/Resume resets the PCI configuration space, so we have to | ||
619 | * re-disable the RETRY_TIMEOUT register (0x41) to keep | ||
620 | * PCI Tx retries from interfering with C3 CPU state | ||
621 | */ | ||
622 | pci_write_config_byte(pdev, 0x41, 0); | ||
623 | |||
624 | ath5k_init(sc); | ||
625 | if (test_bit(ATH_STAT_LEDSOFT, sc->status)) { | ||
626 | ath5k_hw_set_gpio_output(sc->ah, sc->led_pin); | ||
627 | ath5k_hw_set_gpio(sc->ah, sc->led_pin, 0); | ||
628 | } | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | #endif /* CONFIG_PM */ | ||
633 | |||
634 | |||
635 | |||
636 | /***********************\ | ||
637 | * Driver Initialization * | ||
638 | \***********************/ | ||
639 | |||
640 | static int | ||
641 | ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) | ||
642 | { | ||
643 | struct ath5k_softc *sc = hw->priv; | ||
644 | struct ath5k_hw *ah = sc->ah; | ||
645 | u8 mac[ETH_ALEN]; | ||
646 | unsigned int i; | ||
647 | int ret; | ||
648 | |||
649 | ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); | ||
650 | |||
651 | /* | ||
652 | * Check if the MAC has multi-rate retry support. | ||
653 | * We do this by trying to setup a fake extended | ||
654 | * descriptor. MAC's that don't have support will | ||
655 | * return false w/o doing anything. MAC's that do | ||
656 | * support it will return true w/o doing anything. | ||
657 | */ | ||
658 | if (ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0)) | ||
659 | __set_bit(ATH_STAT_MRRETRY, sc->status); | ||
660 | |||
661 | /* | ||
662 | * Reset the key cache since some parts do not | ||
663 | * reset the contents on initial power up. | ||
664 | */ | ||
665 | for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) | ||
666 | ath5k_hw_reset_key(ah, i); | ||
667 | |||
668 | /* | ||
669 | * Collect the channel list. The 802.11 layer | ||
670 | * is resposible for filtering this list based | ||
671 | * on settings like the phy mode and regulatory | ||
672 | * domain restrictions. | ||
673 | */ | ||
674 | ret = ath5k_getchannels(hw); | ||
675 | if (ret) { | ||
676 | ATH5K_ERR(sc, "can't get channels\n"); | ||
677 | goto err; | ||
678 | } | ||
679 | |||
680 | /* NB: setup here so ath5k_rate_update is happy */ | ||
681 | if (test_bit(MODE_IEEE80211A, ah->ah_modes)) | ||
682 | ath5k_setcurmode(sc, MODE_IEEE80211A); | ||
683 | else | ||
684 | ath5k_setcurmode(sc, MODE_IEEE80211B); | ||
685 | |||
686 | /* | ||
687 | * Allocate tx+rx descriptors and populate the lists. | ||
688 | */ | ||
689 | ret = ath5k_desc_alloc(sc, pdev); | ||
690 | if (ret) { | ||
691 | ATH5K_ERR(sc, "can't allocate descriptors\n"); | ||
692 | goto err; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * Allocate hardware transmit queues: one queue for | ||
697 | * beacon frames and one data queue for each QoS | ||
698 | * priority. Note that hw functions handle reseting | ||
699 | * these queues at the needed time. | ||
700 | */ | ||
701 | ret = ath5k_beaconq_setup(ah); | ||
702 | if (ret < 0) { | ||
703 | ATH5K_ERR(sc, "can't setup a beacon xmit queue\n"); | ||
704 | goto err_desc; | ||
705 | } | ||
706 | sc->bhalq = ret; | ||
707 | |||
708 | sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); | ||
709 | if (IS_ERR(sc->txq)) { | ||
710 | ATH5K_ERR(sc, "can't setup xmit queue\n"); | ||
711 | ret = PTR_ERR(sc->txq); | ||
712 | goto err_bhal; | ||
713 | } | ||
714 | |||
715 | tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); | ||
716 | tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); | ||
717 | tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); | ||
718 | setup_timer(&sc->calib_tim, ath5k_calibrate, (unsigned long)sc); | ||
719 | setup_timer(&sc->led_tim, ath5k_led_off, (unsigned long)sc); | ||
720 | |||
721 | sc->led_on = 0; /* low true */ | ||
722 | /* | ||
723 | * Auto-enable soft led processing for IBM cards and for | ||
724 | * 5211 minipci cards. | ||
725 | */ | ||
726 | if (pdev->device == PCI_DEVICE_ID_ATHEROS_AR5212_IBM || | ||
727 | pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) { | ||
728 | __set_bit(ATH_STAT_LEDSOFT, sc->status); | ||
729 | sc->led_pin = 0; | ||
730 | } | ||
731 | /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */ | ||
732 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) { | ||
733 | __set_bit(ATH_STAT_LEDSOFT, sc->status); | ||
734 | sc->led_pin = 0; | ||
735 | } | ||
736 | if (test_bit(ATH_STAT_LEDSOFT, sc->status)) { | ||
737 | ath5k_hw_set_gpio_output(ah, sc->led_pin); | ||
738 | ath5k_hw_set_gpio(ah, sc->led_pin, !sc->led_on); | ||
739 | } | ||
740 | |||
741 | ath5k_hw_get_lladdr(ah, mac); | ||
742 | SET_IEEE80211_PERM_ADDR(hw, mac); | ||
743 | /* All MAC address bits matter for ACKs */ | ||
744 | memset(sc->bssidmask, 0xff, ETH_ALEN); | ||
745 | ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); | ||
746 | |||
747 | ret = ieee80211_register_hw(hw); | ||
748 | if (ret) { | ||
749 | ATH5K_ERR(sc, "can't register ieee80211 hw\n"); | ||
750 | goto err_queues; | ||
751 | } | ||
752 | |||
753 | return 0; | ||
754 | err_queues: | ||
755 | ath5k_txq_release(sc); | ||
756 | err_bhal: | ||
757 | ath5k_hw_release_tx_queue(ah, sc->bhalq); | ||
758 | err_desc: | ||
759 | ath5k_desc_free(sc, pdev); | ||
760 | err: | ||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | static void | ||
765 | ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) | ||
766 | { | ||
767 | struct ath5k_softc *sc = hw->priv; | ||
768 | |||
769 | /* | ||
770 | * NB: the order of these is important: | ||
771 | * o call the 802.11 layer before detaching ath5k_hw to | ||
772 | * insure callbacks into the driver to delete global | ||
773 | * key cache entries can be handled | ||
774 | * o reclaim the tx queue data structures after calling | ||
775 | * the 802.11 layer as we'll get called back to reclaim | ||
776 | * node state and potentially want to use them | ||
777 | * o to cleanup the tx queues the hal is called, so detach | ||
778 | * it last | ||
779 | * XXX: ??? detach ath5k_hw ??? | ||
780 | * Other than that, it's straightforward... | ||
781 | */ | ||
782 | ieee80211_unregister_hw(hw); | ||
783 | ath5k_desc_free(sc, pdev); | ||
784 | ath5k_txq_release(sc); | ||
785 | ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); | ||
786 | |||
787 | /* | ||
788 | * NB: can't reclaim these until after ieee80211_ifdetach | ||
789 | * returns because we'll get called back to reclaim node | ||
790 | * state and potentially want to use them. | ||
791 | */ | ||
792 | } | ||
793 | |||
794 | |||
795 | |||
796 | |||
797 | /********************\ | ||
798 | * Channel/mode setup * | ||
799 | \********************/ | ||
800 | |||
801 | /* | ||
802 | * Convert IEEE channel number to MHz frequency. | ||
803 | */ | ||
804 | static inline short | ||
805 | ath5k_ieee2mhz(short chan) | ||
806 | { | ||
807 | if (chan <= 14 || chan >= 27) | ||
808 | return ieee80211chan2mhz(chan); | ||
809 | else | ||
810 | return 2212 + chan * 20; | ||
811 | } | ||
812 | |||
813 | static unsigned int | ||
814 | ath5k_copy_rates(struct ieee80211_rate *rates, | ||
815 | const struct ath5k_rate_table *rt, | ||
816 | unsigned int max) | ||
817 | { | ||
818 | unsigned int i, count; | ||
819 | |||
820 | if (rt == NULL) | ||
821 | return 0; | ||
822 | |||
823 | for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) { | ||
824 | if (!rt->rates[i].valid) | ||
825 | continue; | ||
826 | rates->rate = rt->rates[i].rate_kbps / 100; | ||
827 | rates->val = rt->rates[i].rate_code; | ||
828 | rates->flags = rt->rates[i].modulation; | ||
829 | rates++; | ||
830 | count++; | ||
831 | max--; | ||
832 | } | ||
833 | |||
834 | return count; | ||
835 | } | ||
836 | |||
837 | static unsigned int | ||
838 | ath5k_copy_channels(struct ath5k_hw *ah, | ||
839 | struct ieee80211_channel *channels, | ||
840 | unsigned int mode, | ||
841 | unsigned int max) | ||
842 | { | ||
843 | static const struct { unsigned int mode, mask, chan; } map[] = { | ||
844 | [MODE_IEEE80211A] = { CHANNEL_OFDM, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_A }, | ||
845 | [MODE_ATHEROS_TURBO] = { CHANNEL_OFDM|CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_T }, | ||
846 | [MODE_IEEE80211B] = { CHANNEL_CCK, CHANNEL_CCK, CHANNEL_B }, | ||
847 | [MODE_IEEE80211G] = { CHANNEL_OFDM, CHANNEL_OFDM, CHANNEL_G }, | ||
848 | [MODE_ATHEROS_TURBOG] = { CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_TG }, | ||
849 | }; | ||
850 | static const struct ath5k_regchannel chans_2ghz[] = | ||
851 | IEEE80211_CHANNELS_2GHZ; | ||
852 | static const struct ath5k_regchannel chans_5ghz[] = | ||
853 | IEEE80211_CHANNELS_5GHZ; | ||
854 | const struct ath5k_regchannel *chans; | ||
855 | enum ath5k_regdom dmn; | ||
856 | unsigned int i, count, size, chfreq, all, f, ch; | ||
857 | |||
858 | if (!test_bit(mode, ah->ah_modes)) | ||
859 | return 0; | ||
860 | |||
861 | all = ah->ah_regdomain == DMN_DEFAULT || CHAN_DEBUG == 1; | ||
862 | |||
863 | switch (mode) { | ||
864 | case MODE_IEEE80211A: | ||
865 | case MODE_ATHEROS_TURBO: | ||
866 | /* 1..220, but 2GHz frequencies are filtered by check_channel */ | ||
867 | size = all ? 220 : ARRAY_SIZE(chans_5ghz); | ||
868 | chans = chans_5ghz; | ||
869 | dmn = ath5k_regdom2flag(ah->ah_regdomain, | ||
870 | IEEE80211_CHANNELS_5GHZ_MIN); | ||
871 | chfreq = CHANNEL_5GHZ; | ||
872 | break; | ||
873 | case MODE_IEEE80211B: | ||
874 | case MODE_IEEE80211G: | ||
875 | case MODE_ATHEROS_TURBOG: | ||
876 | size = all ? 26 : ARRAY_SIZE(chans_2ghz); | ||
877 | chans = chans_2ghz; | ||
878 | dmn = ath5k_regdom2flag(ah->ah_regdomain, | ||
879 | IEEE80211_CHANNELS_2GHZ_MIN); | ||
880 | chfreq = CHANNEL_2GHZ; | ||
881 | break; | ||
882 | default: | ||
883 | ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); | ||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | for (i = 0, count = 0; i < size && max > 0; i++) { | ||
888 | ch = all ? i + 1 : chans[i].chan; | ||
889 | f = ath5k_ieee2mhz(ch); | ||
890 | /* Check if channel is supported by the chipset */ | ||
891 | if (!ath5k_channel_ok(ah, f, chfreq)) | ||
892 | continue; | ||
893 | |||
894 | /* Match regulation domain */ | ||
895 | if (!all && !(IEEE80211_DMN(chans[i].domain) & | ||
896 | IEEE80211_DMN(dmn))) | ||
897 | continue; | ||
898 | |||
899 | if (!all && (chans[i].mode & map[mode].mask) != map[mode].mode) | ||
900 | continue; | ||
901 | |||
902 | /* Write channel and increment counter */ | ||
903 | channels->chan = ch; | ||
904 | channels->freq = f; | ||
905 | channels->val = map[mode].chan; | ||
906 | channels++; | ||
907 | count++; | ||
908 | max--; | ||
909 | } | ||
910 | |||
911 | return count; | ||
912 | } | ||
913 | |||
914 | /* Only tries to register modes our EEPROM says it can support */ | ||
915 | #define REGISTER_MODE(m) do { \ | ||
916 | ret = ath5k_register_mode(hw, m); \ | ||
917 | if (ret) \ | ||
918 | return ret; \ | ||
919 | } while (0) \ | ||
920 | |||
921 | static inline int | ||
922 | ath5k_register_mode(struct ieee80211_hw *hw, u8 m) | ||
923 | { | ||
924 | struct ath5k_softc *sc = hw->priv; | ||
925 | struct ieee80211_hw_mode *modes = sc->modes; | ||
926 | unsigned int i; | ||
927 | int ret; | ||
928 | |||
929 | if (!test_bit(m, sc->ah->ah_capabilities.cap_mode)) | ||
930 | return 0; | ||
931 | |||
932 | for (i = 0; i < NUM_DRIVER_MODES; i++) { | ||
933 | if (modes[i].mode != m || !modes[i].num_channels) | ||
934 | continue; | ||
935 | ret = ieee80211_register_hwmode(hw, &modes[i]); | ||
936 | if (ret) { | ||
937 | ATH5K_ERR(sc, "can't register hwmode %u\n", m); | ||
938 | return ret; | ||
939 | } | ||
940 | return 0; | ||
941 | } | ||
942 | BUG(); | ||
943 | } | ||
944 | |||
945 | static int | ||
946 | ath5k_getchannels(struct ieee80211_hw *hw) | ||
947 | { | ||
948 | struct ath5k_softc *sc = hw->priv; | ||
949 | struct ath5k_hw *ah = sc->ah; | ||
950 | struct ieee80211_hw_mode *modes = sc->modes; | ||
951 | unsigned int i, max_r, max_c; | ||
952 | int ret; | ||
953 | |||
954 | BUILD_BUG_ON(ARRAY_SIZE(sc->modes) < 3); | ||
955 | |||
956 | /* The order here does not matter */ | ||
957 | modes[0].mode = MODE_IEEE80211G; | ||
958 | modes[1].mode = MODE_IEEE80211B; | ||
959 | modes[2].mode = MODE_IEEE80211A; | ||
960 | |||
961 | max_r = ARRAY_SIZE(sc->rates); | ||
962 | max_c = ARRAY_SIZE(sc->channels); | ||
963 | |||
964 | for (i = 0; i < NUM_DRIVER_MODES; i++) { | ||
965 | struct ieee80211_hw_mode *mode = &modes[i]; | ||
966 | const struct ath5k_rate_table *hw_rates; | ||
967 | |||
968 | if (i == 0) { | ||
969 | modes[0].rates = sc->rates; | ||
970 | modes->channels = sc->channels; | ||
971 | } else { | ||
972 | struct ieee80211_hw_mode *prev_mode = &modes[i-1]; | ||
973 | int prev_num_r = prev_mode->num_rates; | ||
974 | int prev_num_c = prev_mode->num_channels; | ||
975 | mode->rates = &prev_mode->rates[prev_num_r]; | ||
976 | mode->channels = &prev_mode->channels[prev_num_c]; | ||
977 | } | ||
978 | |||
979 | hw_rates = ath5k_hw_get_rate_table(ah, mode->mode); | ||
980 | mode->num_rates = ath5k_copy_rates(mode->rates, hw_rates, | ||
981 | max_r); | ||
982 | mode->num_channels = ath5k_copy_channels(ah, mode->channels, | ||
983 | mode->mode, max_c); | ||
984 | max_r -= mode->num_rates; | ||
985 | max_c -= mode->num_channels; | ||
986 | } | ||
987 | |||
988 | /* We try to register all modes this driver supports. We don't bother | ||
989 | * with MODE_IEEE80211B for AR5212 as MODE_IEEE80211G already accounts | ||
990 | * for that as per mac80211. Then, REGISTER_MODE() will will actually | ||
991 | * check the eeprom reading for more reliable capability information. | ||
992 | * Order matters here as per mac80211's latest preference. This will | ||
993 | * all hopefullly soon go away. */ | ||
994 | |||
995 | REGISTER_MODE(MODE_IEEE80211G); | ||
996 | if (ah->ah_version != AR5K_AR5212) | ||
997 | REGISTER_MODE(MODE_IEEE80211B); | ||
998 | REGISTER_MODE(MODE_IEEE80211A); | ||
999 | |||
1000 | ath5k_debug_dump_modes(sc, modes); | ||
1001 | |||
1002 | return ret; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * Set/change channels. If the channel is really being changed, | ||
1007 | * it's done by reseting the chip. To accomplish this we must | ||
1008 | * first cleanup any pending DMA, then restart stuff after a la | ||
1009 | * ath5k_init. | ||
1010 | */ | ||
1011 | static int | ||
1012 | ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) | ||
1013 | { | ||
1014 | struct ath5k_hw *ah = sc->ah; | ||
1015 | int ret; | ||
1016 | |||
1017 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "%u (%u MHz) -> %u (%u MHz)\n", | ||
1018 | sc->curchan->chan, sc->curchan->freq, | ||
1019 | chan->chan, chan->freq); | ||
1020 | |||
1021 | if (chan->freq != sc->curchan->freq || chan->val != sc->curchan->val) { | ||
1022 | /* | ||
1023 | * To switch channels clear any pending DMA operations; | ||
1024 | * wait long enough for the RX fifo to drain, reset the | ||
1025 | * hardware at the new frequency, and then re-enable | ||
1026 | * the relevant bits of the h/w. | ||
1027 | */ | ||
1028 | ath5k_hw_set_intr(ah, 0); /* disable interrupts */ | ||
1029 | ath5k_txq_cleanup(sc); /* clear pending tx frames */ | ||
1030 | ath5k_rx_stop(sc); /* turn off frame recv */ | ||
1031 | ret = ath5k_hw_reset(ah, sc->opmode, chan, true); | ||
1032 | if (ret) { | ||
1033 | ATH5K_ERR(sc, "%s: unable to reset channel %u " | ||
1034 | "(%u Mhz)\n", __func__, chan->chan, chan->freq); | ||
1035 | return ret; | ||
1036 | } | ||
1037 | sc->curchan = chan; | ||
1038 | ath5k_hw_set_txpower_limit(sc->ah, 0); | ||
1039 | |||
1040 | /* | ||
1041 | * Re-enable rx framework. | ||
1042 | */ | ||
1043 | ret = ath5k_rx_start(sc); | ||
1044 | if (ret) { | ||
1045 | ATH5K_ERR(sc, "%s: unable to restart recv logic\n", | ||
1046 | __func__); | ||
1047 | return ret; | ||
1048 | } | ||
1049 | |||
1050 | /* | ||
1051 | * Change channels and update the h/w rate map | ||
1052 | * if we're switching; e.g. 11a to 11b/g. | ||
1053 | * | ||
1054 | * XXX needed? | ||
1055 | */ | ||
1056 | /* ath5k_chan_change(sc, chan); */ | ||
1057 | |||
1058 | ath5k_beacon_config(sc); | ||
1059 | /* | ||
1060 | * Re-enable interrupts. | ||
1061 | */ | ||
1062 | ath5k_hw_set_intr(ah, sc->imask); | ||
1063 | } | ||
1064 | |||
1065 | return 0; | ||
1066 | } | ||
1067 | |||
1068 | static void | ||
1069 | ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) | ||
1070 | { | ||
1071 | if (unlikely(test_bit(ATH_STAT_LEDSOFT, sc->status))) { | ||
1072 | /* from Atheros NDIS driver, w/ permission */ | ||
1073 | static const struct { | ||
1074 | u16 rate; /* tx/rx 802.11 rate */ | ||
1075 | u16 timeOn; /* LED on time (ms) */ | ||
1076 | u16 timeOff; /* LED off time (ms) */ | ||
1077 | } blinkrates[] = { | ||
1078 | { 108, 40, 10 }, | ||
1079 | { 96, 44, 11 }, | ||
1080 | { 72, 50, 13 }, | ||
1081 | { 48, 57, 14 }, | ||
1082 | { 36, 67, 16 }, | ||
1083 | { 24, 80, 20 }, | ||
1084 | { 22, 100, 25 }, | ||
1085 | { 18, 133, 34 }, | ||
1086 | { 12, 160, 40 }, | ||
1087 | { 10, 200, 50 }, | ||
1088 | { 6, 240, 58 }, | ||
1089 | { 4, 267, 66 }, | ||
1090 | { 2, 400, 100 }, | ||
1091 | { 0, 500, 130 } | ||
1092 | }; | ||
1093 | const struct ath5k_rate_table *rt = | ||
1094 | ath5k_hw_get_rate_table(sc->ah, mode); | ||
1095 | unsigned int i, j; | ||
1096 | |||
1097 | BUG_ON(rt == NULL); | ||
1098 | |||
1099 | memset(sc->hwmap, 0, sizeof(sc->hwmap)); | ||
1100 | for (i = 0; i < 32; i++) { | ||
1101 | u8 ix = rt->rate_code_to_index[i]; | ||
1102 | if (ix == 0xff) { | ||
1103 | sc->hwmap[i].ledon = msecs_to_jiffies(500); | ||
1104 | sc->hwmap[i].ledoff = msecs_to_jiffies(130); | ||
1105 | continue; | ||
1106 | } | ||
1107 | sc->hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; | ||
1108 | if (SHPREAMBLE_FLAG(ix) || rt->rates[ix].modulation == | ||
1109 | IEEE80211_RATE_OFDM) | ||
1110 | sc->hwmap[i].txflags |= | ||
1111 | IEEE80211_RADIOTAP_F_SHORTPRE; | ||
1112 | /* receive frames include FCS */ | ||
1113 | sc->hwmap[i].rxflags = sc->hwmap[i].txflags | | ||
1114 | IEEE80211_RADIOTAP_F_FCS; | ||
1115 | /* setup blink rate table to avoid per-packet lookup */ | ||
1116 | for (j = 0; j < ARRAY_SIZE(blinkrates) - 1; j++) | ||
1117 | if (blinkrates[j].rate == /* XXX why 7f? */ | ||
1118 | (rt->rates[ix].dot11_rate&0x7f)) | ||
1119 | break; | ||
1120 | |||
1121 | sc->hwmap[i].ledon = msecs_to_jiffies(blinkrates[j]. | ||
1122 | timeOn); | ||
1123 | sc->hwmap[i].ledoff = msecs_to_jiffies(blinkrates[j]. | ||
1124 | timeOff); | ||
1125 | } | ||
1126 | } | ||
1127 | |||
1128 | sc->curmode = mode; | ||
1129 | } | ||
1130 | |||
1131 | static void | ||
1132 | ath5k_mode_setup(struct ath5k_softc *sc) | ||
1133 | { | ||
1134 | struct ath5k_hw *ah = sc->ah; | ||
1135 | u32 rfilt; | ||
1136 | |||
1137 | /* configure rx filter */ | ||
1138 | rfilt = sc->filter_flags; | ||
1139 | ath5k_hw_set_rx_filter(ah, rfilt); | ||
1140 | |||
1141 | if (ath5k_hw_hasbssidmask(ah)) | ||
1142 | ath5k_hw_set_bssid_mask(ah, sc->bssidmask); | ||
1143 | |||
1144 | /* configure operational mode */ | ||
1145 | ath5k_hw_set_opmode(ah); | ||
1146 | |||
1147 | ath5k_hw_set_mcast_filter(ah, 0, 0); | ||
1148 | ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); | ||
1149 | } | ||
1150 | |||
1151 | |||
1152 | |||
1153 | |||
1154 | /***************\ | ||
1155 | * Buffers setup * | ||
1156 | \***************/ | ||
1157 | |||
1158 | static int | ||
1159 | ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | ||
1160 | { | ||
1161 | struct ath5k_hw *ah = sc->ah; | ||
1162 | struct sk_buff *skb = bf->skb; | ||
1163 | struct ath5k_desc *ds; | ||
1164 | |||
1165 | if (likely(skb == NULL)) { | ||
1166 | unsigned int off; | ||
1167 | |||
1168 | /* | ||
1169 | * Allocate buffer with headroom_needed space for the | ||
1170 | * fake physical layer header at the start. | ||
1171 | */ | ||
1172 | skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1); | ||
1173 | if (unlikely(skb == NULL)) { | ||
1174 | ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", | ||
1175 | sc->rxbufsize + sc->cachelsz - 1); | ||
1176 | return -ENOMEM; | ||
1177 | } | ||
1178 | /* | ||
1179 | * Cache-line-align. This is important (for the | ||
1180 | * 5210 at least) as not doing so causes bogus data | ||
1181 | * in rx'd frames. | ||
1182 | */ | ||
1183 | off = ((unsigned long)skb->data) % sc->cachelsz; | ||
1184 | if (off != 0) | ||
1185 | skb_reserve(skb, sc->cachelsz - off); | ||
1186 | |||
1187 | bf->skb = skb; | ||
1188 | bf->skbaddr = pci_map_single(sc->pdev, | ||
1189 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | ||
1190 | if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { | ||
1191 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); | ||
1192 | dev_kfree_skb(skb); | ||
1193 | bf->skb = NULL; | ||
1194 | return -ENOMEM; | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | /* | ||
1199 | * Setup descriptors. For receive we always terminate | ||
1200 | * the descriptor list with a self-linked entry so we'll | ||
1201 | * not get overrun under high load (as can happen with a | ||
1202 | * 5212 when ANI processing enables PHY error frames). | ||
1203 | * | ||
1204 | * To insure the last descriptor is self-linked we create | ||
1205 | * each descriptor as self-linked and add it to the end. As | ||
1206 | * each additional descriptor is added the previous self-linked | ||
1207 | * entry is ``fixed'' naturally. This should be safe even | ||
1208 | * if DMA is happening. When processing RX interrupts we | ||
1209 | * never remove/process the last, self-linked, entry on the | ||
1210 | * descriptor list. This insures the hardware always has | ||
1211 | * someplace to write a new frame. | ||
1212 | */ | ||
1213 | ds = bf->desc; | ||
1214 | ds->ds_link = bf->daddr; /* link to self */ | ||
1215 | ds->ds_data = bf->skbaddr; | ||
1216 | ath5k_hw_setup_rx_desc(ah, ds, | ||
1217 | skb_tailroom(skb), /* buffer size */ | ||
1218 | 0); | ||
1219 | |||
1220 | if (sc->rxlink != NULL) | ||
1221 | *sc->rxlink = bf->daddr; | ||
1222 | sc->rxlink = &ds->ds_link; | ||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | static int | ||
1227 | ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, | ||
1228 | struct ieee80211_tx_control *ctl) | ||
1229 | { | ||
1230 | struct ath5k_hw *ah = sc->ah; | ||
1231 | struct ath5k_txq *txq = sc->txq; | ||
1232 | struct ath5k_desc *ds = bf->desc; | ||
1233 | struct sk_buff *skb = bf->skb; | ||
1234 | unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; | ||
1235 | int ret; | ||
1236 | |||
1237 | flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; | ||
1238 | bf->ctl = *ctl; | ||
1239 | /* XXX endianness */ | ||
1240 | bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, | ||
1241 | PCI_DMA_TODEVICE); | ||
1242 | |||
1243 | if (ctl->flags & IEEE80211_TXCTL_NO_ACK) | ||
1244 | flags |= AR5K_TXDESC_NOACK; | ||
1245 | |||
1246 | pktlen = skb->len + FCS_LEN; | ||
1247 | |||
1248 | if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) { | ||
1249 | keyidx = ctl->key_idx; | ||
1250 | pktlen += ctl->icv_len; | ||
1251 | } | ||
1252 | |||
1253 | ret = ah->ah_setup_tx_desc(ah, ds, pktlen, | ||
1254 | ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, | ||
1255 | (ctl->power_level * 2), ctl->tx_rate, ctl->retry_limit, keyidx, 0, flags, 0, 0); | ||
1256 | if (ret) | ||
1257 | goto err_unmap; | ||
1258 | |||
1259 | ds->ds_link = 0; | ||
1260 | ds->ds_data = bf->skbaddr; | ||
1261 | |||
1262 | spin_lock_bh(&txq->lock); | ||
1263 | list_add_tail(&bf->list, &txq->q); | ||
1264 | sc->tx_stats.data[txq->qnum].len++; | ||
1265 | if (txq->link == NULL) /* is this first packet? */ | ||
1266 | ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); | ||
1267 | else /* no, so only link it */ | ||
1268 | *txq->link = bf->daddr; | ||
1269 | |||
1270 | txq->link = &ds->ds_link; | ||
1271 | ath5k_hw_tx_start(ah, txq->qnum); | ||
1272 | spin_unlock_bh(&txq->lock); | ||
1273 | |||
1274 | return 0; | ||
1275 | err_unmap: | ||
1276 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); | ||
1277 | return ret; | ||
1278 | } | ||
1279 | |||
1280 | /*******************\ | ||
1281 | * Descriptors setup * | ||
1282 | \*******************/ | ||
1283 | |||
1284 | static int | ||
1285 | ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev) | ||
1286 | { | ||
1287 | struct ath5k_desc *ds; | ||
1288 | struct ath5k_buf *bf; | ||
1289 | dma_addr_t da; | ||
1290 | unsigned int i; | ||
1291 | int ret; | ||
1292 | |||
1293 | /* allocate descriptors */ | ||
1294 | sc->desc_len = sizeof(struct ath5k_desc) * | ||
1295 | (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); | ||
1296 | sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); | ||
1297 | if (sc->desc == NULL) { | ||
1298 | ATH5K_ERR(sc, "can't allocate descriptors\n"); | ||
1299 | ret = -ENOMEM; | ||
1300 | goto err; | ||
1301 | } | ||
1302 | ds = sc->desc; | ||
1303 | da = sc->desc_daddr; | ||
1304 | ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", | ||
1305 | ds, sc->desc_len, (unsigned long long)sc->desc_daddr); | ||
1306 | |||
1307 | bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, | ||
1308 | sizeof(struct ath5k_buf), GFP_KERNEL); | ||
1309 | if (bf == NULL) { | ||
1310 | ATH5K_ERR(sc, "can't allocate bufptr\n"); | ||
1311 | ret = -ENOMEM; | ||
1312 | goto err_free; | ||
1313 | } | ||
1314 | sc->bufptr = bf; | ||
1315 | |||
1316 | INIT_LIST_HEAD(&sc->rxbuf); | ||
1317 | for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { | ||
1318 | bf->desc = ds; | ||
1319 | bf->daddr = da; | ||
1320 | list_add_tail(&bf->list, &sc->rxbuf); | ||
1321 | } | ||
1322 | |||
1323 | INIT_LIST_HEAD(&sc->txbuf); | ||
1324 | sc->txbuf_len = ATH_TXBUF; | ||
1325 | for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, | ||
1326 | da += sizeof(*ds)) { | ||
1327 | bf->desc = ds; | ||
1328 | bf->daddr = da; | ||
1329 | list_add_tail(&bf->list, &sc->txbuf); | ||
1330 | } | ||
1331 | |||
1332 | /* beacon buffer */ | ||
1333 | bf->desc = ds; | ||
1334 | bf->daddr = da; | ||
1335 | sc->bbuf = bf; | ||
1336 | |||
1337 | return 0; | ||
1338 | err_free: | ||
1339 | pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); | ||
1340 | err: | ||
1341 | sc->desc = NULL; | ||
1342 | return ret; | ||
1343 | } | ||
1344 | |||
1345 | static void | ||
1346 | ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) | ||
1347 | { | ||
1348 | struct ath5k_buf *bf; | ||
1349 | |||
1350 | ath5k_txbuf_free(sc, sc->bbuf); | ||
1351 | list_for_each_entry(bf, &sc->txbuf, list) | ||
1352 | ath5k_txbuf_free(sc, bf); | ||
1353 | list_for_each_entry(bf, &sc->rxbuf, list) | ||
1354 | ath5k_txbuf_free(sc, bf); | ||
1355 | |||
1356 | /* Free memory associated with all descriptors */ | ||
1357 | pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); | ||
1358 | |||
1359 | kfree(sc->bufptr); | ||
1360 | sc->bufptr = NULL; | ||
1361 | } | ||
1362 | |||
1363 | |||
1364 | |||
1365 | |||
1366 | |||
1367 | /**************\ | ||
1368 | * Queues setup * | ||
1369 | \**************/ | ||
1370 | |||
1371 | static struct ath5k_txq * | ||
1372 | ath5k_txq_setup(struct ath5k_softc *sc, | ||
1373 | int qtype, int subtype) | ||
1374 | { | ||
1375 | struct ath5k_hw *ah = sc->ah; | ||
1376 | struct ath5k_txq *txq; | ||
1377 | struct ath5k_txq_info qi = { | ||
1378 | .tqi_subtype = subtype, | ||
1379 | .tqi_aifs = AR5K_TXQ_USEDEFAULT, | ||
1380 | .tqi_cw_min = AR5K_TXQ_USEDEFAULT, | ||
1381 | .tqi_cw_max = AR5K_TXQ_USEDEFAULT | ||
1382 | }; | ||
1383 | int qnum; | ||
1384 | |||
1385 | /* | ||
1386 | * Enable interrupts only for EOL and DESC conditions. | ||
1387 | * We mark tx descriptors to receive a DESC interrupt | ||
1388 | * when a tx queue gets deep; otherwise waiting for the | ||
1389 | * EOL to reap descriptors. Note that this is done to | ||
1390 | * reduce interrupt load and this only defers reaping | ||
1391 | * descriptors, never transmitting frames. Aside from | ||
1392 | * reducing interrupts this also permits more concurrency. | ||
1393 | * The only potential downside is if the tx queue backs | ||
1394 | * up in which case the top half of the kernel may backup | ||
1395 | * due to a lack of tx descriptors. | ||
1396 | */ | ||
1397 | qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | | ||
1398 | AR5K_TXQ_FLAG_TXDESCINT_ENABLE; | ||
1399 | qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); | ||
1400 | if (qnum < 0) { | ||
1401 | /* | ||
1402 | * NB: don't print a message, this happens | ||
1403 | * normally on parts with too few tx queues | ||
1404 | */ | ||
1405 | return ERR_PTR(qnum); | ||
1406 | } | ||
1407 | if (qnum >= ARRAY_SIZE(sc->txqs)) { | ||
1408 | ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n", | ||
1409 | qnum, ARRAY_SIZE(sc->txqs)); | ||
1410 | ath5k_hw_release_tx_queue(ah, qnum); | ||
1411 | return ERR_PTR(-EINVAL); | ||
1412 | } | ||
1413 | txq = &sc->txqs[qnum]; | ||
1414 | if (!txq->setup) { | ||
1415 | txq->qnum = qnum; | ||
1416 | txq->link = NULL; | ||
1417 | INIT_LIST_HEAD(&txq->q); | ||
1418 | spin_lock_init(&txq->lock); | ||
1419 | txq->setup = true; | ||
1420 | } | ||
1421 | return &sc->txqs[qnum]; | ||
1422 | } | ||
1423 | |||
1424 | static int | ||
1425 | ath5k_beaconq_setup(struct ath5k_hw *ah) | ||
1426 | { | ||
1427 | struct ath5k_txq_info qi = { | ||
1428 | .tqi_aifs = AR5K_TXQ_USEDEFAULT, | ||
1429 | .tqi_cw_min = AR5K_TXQ_USEDEFAULT, | ||
1430 | .tqi_cw_max = AR5K_TXQ_USEDEFAULT, | ||
1431 | /* NB: for dynamic turbo, don't enable any other interrupts */ | ||
1432 | .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE | ||
1433 | }; | ||
1434 | |||
1435 | return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); | ||
1436 | } | ||
1437 | |||
1438 | static int | ||
1439 | ath5k_beaconq_config(struct ath5k_softc *sc) | ||
1440 | { | ||
1441 | struct ath5k_hw *ah = sc->ah; | ||
1442 | struct ath5k_txq_info qi; | ||
1443 | int ret; | ||
1444 | |||
1445 | ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); | ||
1446 | if (ret) | ||
1447 | return ret; | ||
1448 | if (sc->opmode == IEEE80211_IF_TYPE_AP || | ||
1449 | sc->opmode == IEEE80211_IF_TYPE_IBSS) { | ||
1450 | /* | ||
1451 | * Always burst out beacon and CAB traffic | ||
1452 | * (aifs = cwmin = cwmax = 0) | ||
1453 | */ | ||
1454 | qi.tqi_aifs = 0; | ||
1455 | qi.tqi_cw_min = 0; | ||
1456 | qi.tqi_cw_max = 0; | ||
1457 | } | ||
1458 | |||
1459 | ret = ath5k_hw_setup_tx_queueprops(ah, sc->bhalq, &qi); | ||
1460 | if (ret) { | ||
1461 | ATH5K_ERR(sc, "%s: unable to update parameters for beacon " | ||
1462 | "hardware queue!\n", __func__); | ||
1463 | return ret; | ||
1464 | } | ||
1465 | |||
1466 | return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */; | ||
1467 | } | ||
1468 | |||
1469 | static void | ||
1470 | ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) | ||
1471 | { | ||
1472 | struct ath5k_buf *bf, *bf0; | ||
1473 | |||
1474 | /* | ||
1475 | * NB: this assumes output has been stopped and | ||
1476 | * we do not need to block ath5k_tx_tasklet | ||
1477 | */ | ||
1478 | spin_lock_bh(&txq->lock); | ||
1479 | list_for_each_entry_safe(bf, bf0, &txq->q, list) { | ||
1480 | ath5k_debug_printtxbuf(sc, bf, !sc->ah->ah_proc_tx_desc(sc->ah, | ||
1481 | bf->desc)); | ||
1482 | |||
1483 | ath5k_txbuf_free(sc, bf); | ||
1484 | |||
1485 | spin_lock_bh(&sc->txbuflock); | ||
1486 | sc->tx_stats.data[txq->qnum].len--; | ||
1487 | list_move_tail(&bf->list, &sc->txbuf); | ||
1488 | sc->txbuf_len++; | ||
1489 | spin_unlock_bh(&sc->txbuflock); | ||
1490 | } | ||
1491 | txq->link = NULL; | ||
1492 | spin_unlock_bh(&txq->lock); | ||
1493 | } | ||
1494 | |||
1495 | /* | ||
1496 | * Drain the transmit queues and reclaim resources. | ||
1497 | */ | ||
1498 | static void | ||
1499 | ath5k_txq_cleanup(struct ath5k_softc *sc) | ||
1500 | { | ||
1501 | struct ath5k_hw *ah = sc->ah; | ||
1502 | unsigned int i; | ||
1503 | |||
1504 | /* XXX return value */ | ||
1505 | if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) { | ||
1506 | /* don't touch the hardware if marked invalid */ | ||
1507 | ath5k_hw_stop_tx_dma(ah, sc->bhalq); | ||
1508 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", | ||
1509 | ath5k_hw_get_tx_buf(ah, sc->bhalq)); | ||
1510 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) | ||
1511 | if (sc->txqs[i].setup) { | ||
1512 | ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); | ||
1513 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " | ||
1514 | "link %p\n", | ||
1515 | sc->txqs[i].qnum, | ||
1516 | ath5k_hw_get_tx_buf(ah, | ||
1517 | sc->txqs[i].qnum), | ||
1518 | sc->txqs[i].link); | ||
1519 | } | ||
1520 | } | ||
1521 | ieee80211_start_queues(sc->hw); /* XXX move to callers */ | ||
1522 | |||
1523 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) | ||
1524 | if (sc->txqs[i].setup) | ||
1525 | ath5k_txq_drainq(sc, &sc->txqs[i]); | ||
1526 | } | ||
1527 | |||
1528 | static void | ||
1529 | ath5k_txq_release(struct ath5k_softc *sc) | ||
1530 | { | ||
1531 | struct ath5k_txq *txq = sc->txqs; | ||
1532 | unsigned int i; | ||
1533 | |||
1534 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) | ||
1535 | if (txq->setup) { | ||
1536 | ath5k_hw_release_tx_queue(sc->ah, txq->qnum); | ||
1537 | txq->setup = false; | ||
1538 | } | ||
1539 | } | ||
1540 | |||
1541 | |||
1542 | |||
1543 | |||
1544 | /*************\ | ||
1545 | * RX Handling * | ||
1546 | \*************/ | ||
1547 | |||
1548 | /* | ||
1549 | * Enable the receive h/w following a reset. | ||
1550 | */ | ||
1551 | static int | ||
1552 | ath5k_rx_start(struct ath5k_softc *sc) | ||
1553 | { | ||
1554 | struct ath5k_hw *ah = sc->ah; | ||
1555 | struct ath5k_buf *bf; | ||
1556 | int ret; | ||
1557 | |||
1558 | sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->cachelsz); | ||
1559 | |||
1560 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", | ||
1561 | sc->cachelsz, sc->rxbufsize); | ||
1562 | |||
1563 | sc->rxlink = NULL; | ||
1564 | |||
1565 | spin_lock_bh(&sc->rxbuflock); | ||
1566 | list_for_each_entry(bf, &sc->rxbuf, list) { | ||
1567 | ret = ath5k_rxbuf_setup(sc, bf); | ||
1568 | if (ret != 0) { | ||
1569 | spin_unlock_bh(&sc->rxbuflock); | ||
1570 | goto err; | ||
1571 | } | ||
1572 | } | ||
1573 | bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); | ||
1574 | spin_unlock_bh(&sc->rxbuflock); | ||
1575 | |||
1576 | ath5k_hw_put_rx_buf(ah, bf->daddr); | ||
1577 | ath5k_hw_start_rx(ah); /* enable recv descriptors */ | ||
1578 | ath5k_mode_setup(sc); /* set filters, etc. */ | ||
1579 | ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ | ||
1580 | |||
1581 | return 0; | ||
1582 | err: | ||
1583 | return ret; | ||
1584 | } | ||
1585 | |||
1586 | /* | ||
1587 | * Disable the receive h/w in preparation for a reset. | ||
1588 | */ | ||
1589 | static void | ||
1590 | ath5k_rx_stop(struct ath5k_softc *sc) | ||
1591 | { | ||
1592 | struct ath5k_hw *ah = sc->ah; | ||
1593 | |||
1594 | ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ | ||
1595 | ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ | ||
1596 | ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ | ||
1597 | mdelay(3); /* 3ms is long enough for 1 frame */ | ||
1598 | |||
1599 | ath5k_debug_printrxbuffs(sc, ah); | ||
1600 | |||
1601 | sc->rxlink = NULL; /* just in case */ | ||
1602 | } | ||
1603 | |||
1604 | static unsigned int | ||
1605 | ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, | ||
1606 | struct sk_buff *skb) | ||
1607 | { | ||
1608 | struct ieee80211_hdr *hdr = (void *)skb->data; | ||
1609 | unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1610 | |||
1611 | if (!(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && | ||
1612 | ds->ds_rxstat.rs_keyix != AR5K_RXKEYIX_INVALID) | ||
1613 | return RX_FLAG_DECRYPTED; | ||
1614 | |||
1615 | /* Apparently when a default key is used to decrypt the packet | ||
1616 | the hw does not set the index used to decrypt. In such cases | ||
1617 | get the index from the packet. */ | ||
1618 | if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) && | ||
1619 | !(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && | ||
1620 | skb->len >= hlen + 4) { | ||
1621 | keyix = skb->data[hlen + 3] >> 6; | ||
1622 | |||
1623 | if (test_bit(keyix, sc->keymap)) | ||
1624 | return RX_FLAG_DECRYPTED; | ||
1625 | } | ||
1626 | |||
1627 | return 0; | ||
1628 | } | ||
1629 | |||
1630 | static void | ||
1631 | ath5k_tasklet_rx(unsigned long data) | ||
1632 | { | ||
1633 | struct ieee80211_rx_status rxs = {}; | ||
1634 | struct sk_buff *skb; | ||
1635 | struct ath5k_softc *sc = (void *)data; | ||
1636 | struct ath5k_buf *bf; | ||
1637 | struct ath5k_desc *ds; | ||
1638 | u16 len; | ||
1639 | u8 stat; | ||
1640 | int ret; | ||
1641 | int hdrlen; | ||
1642 | int pad; | ||
1643 | |||
1644 | spin_lock(&sc->rxbuflock); | ||
1645 | do { | ||
1646 | if (unlikely(list_empty(&sc->rxbuf))) { | ||
1647 | ATH5K_WARN(sc, "empty rx buf pool\n"); | ||
1648 | break; | ||
1649 | } | ||
1650 | bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); | ||
1651 | BUG_ON(bf->skb == NULL); | ||
1652 | skb = bf->skb; | ||
1653 | ds = bf->desc; | ||
1654 | |||
1655 | /* TODO only one segment */ | ||
1656 | pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, | ||
1657 | sc->desc_len, PCI_DMA_FROMDEVICE); | ||
1658 | |||
1659 | if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */ | ||
1660 | break; | ||
1661 | |||
1662 | ret = sc->ah->ah_proc_rx_desc(sc->ah, ds); | ||
1663 | if (unlikely(ret == -EINPROGRESS)) | ||
1664 | break; | ||
1665 | else if (unlikely(ret)) { | ||
1666 | ATH5K_ERR(sc, "error in processing rx descriptor\n"); | ||
1667 | return; | ||
1668 | } | ||
1669 | |||
1670 | if (unlikely(ds->ds_rxstat.rs_more)) { | ||
1671 | ATH5K_WARN(sc, "unsupported jumbo\n"); | ||
1672 | goto next; | ||
1673 | } | ||
1674 | |||
1675 | stat = ds->ds_rxstat.rs_status; | ||
1676 | if (unlikely(stat)) { | ||
1677 | if (stat & AR5K_RXERR_PHY) | ||
1678 | goto next; | ||
1679 | if (stat & AR5K_RXERR_DECRYPT) { | ||
1680 | /* | ||
1681 | * Decrypt error. If the error occurred | ||
1682 | * because there was no hardware key, then | ||
1683 | * let the frame through so the upper layers | ||
1684 | * can process it. This is necessary for 5210 | ||
1685 | * parts which have no way to setup a ``clear'' | ||
1686 | * key cache entry. | ||
1687 | * | ||
1688 | * XXX do key cache faulting | ||
1689 | */ | ||
1690 | if (ds->ds_rxstat.rs_keyix == | ||
1691 | AR5K_RXKEYIX_INVALID && | ||
1692 | !(stat & AR5K_RXERR_CRC)) | ||
1693 | goto accept; | ||
1694 | } | ||
1695 | if (stat & AR5K_RXERR_MIC) { | ||
1696 | rxs.flag |= RX_FLAG_MMIC_ERROR; | ||
1697 | goto accept; | ||
1698 | } | ||
1699 | |||
1700 | /* let crypto-error packets fall through in MNTR */ | ||
1701 | if ((stat & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || | ||
1702 | sc->opmode != IEEE80211_IF_TYPE_MNTR) | ||
1703 | goto next; | ||
1704 | } | ||
1705 | accept: | ||
1706 | len = ds->ds_rxstat.rs_datalen; | ||
1707 | pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, len, | ||
1708 | PCI_DMA_FROMDEVICE); | ||
1709 | pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, | ||
1710 | PCI_DMA_FROMDEVICE); | ||
1711 | bf->skb = NULL; | ||
1712 | |||
1713 | skb_put(skb, len); | ||
1714 | |||
1715 | /* | ||
1716 | * the hardware adds a padding to 4 byte boundaries between | ||
1717 | * the header and the payload data if the header length is | ||
1718 | * not multiples of 4 - remove it | ||
1719 | */ | ||
1720 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1721 | if (hdrlen & 3) { | ||
1722 | pad = hdrlen % 4; | ||
1723 | memmove(skb->data + pad, skb->data, hdrlen); | ||
1724 | skb_pull(skb, pad); | ||
1725 | } | ||
1726 | |||
1727 | if (sc->opmode == IEEE80211_IF_TYPE_MNTR) | ||
1728 | rxs.mactime = ath5k_extend_tsf(sc->ah, | ||
1729 | ds->ds_rxstat.rs_tstamp); | ||
1730 | else | ||
1731 | rxs.mactime = ds->ds_rxstat.rs_tstamp; | ||
1732 | rxs.freq = sc->curchan->freq; | ||
1733 | rxs.channel = sc->curchan->chan; | ||
1734 | rxs.phymode = sc->curmode; | ||
1735 | |||
1736 | /* | ||
1737 | * signal quality: | ||
1738 | * the names here are misleading and the usage of these | ||
1739 | * values by iwconfig makes it even worse | ||
1740 | */ | ||
1741 | /* noise floor in dBm, from the last noise calibration */ | ||
1742 | rxs.noise = sc->ah->ah_noise_floor; | ||
1743 | /* signal level in dBm */ | ||
1744 | rxs.ssi = rxs.noise + ds->ds_rxstat.rs_rssi; | ||
1745 | /* | ||
1746 | * "signal" is actually displayed as Link Quality by iwconfig | ||
1747 | * we provide a percentage based on rssi (assuming max rssi 64) | ||
1748 | */ | ||
1749 | rxs.signal = ds->ds_rxstat.rs_rssi * 100 / 64; | ||
1750 | |||
1751 | rxs.antenna = ds->ds_rxstat.rs_antenna; | ||
1752 | rxs.rate = ds->ds_rxstat.rs_rate; | ||
1753 | rxs.flag |= ath5k_rx_decrypted(sc, ds, skb); | ||
1754 | |||
1755 | ath5k_debug_dump_skb(sc, skb, "RX ", 0); | ||
1756 | |||
1757 | __ieee80211_rx(sc->hw, skb, &rxs); | ||
1758 | sc->led_rxrate = ds->ds_rxstat.rs_rate; | ||
1759 | ath5k_led_event(sc, ATH_LED_RX); | ||
1760 | next: | ||
1761 | list_move_tail(&bf->list, &sc->rxbuf); | ||
1762 | } while (ath5k_rxbuf_setup(sc, bf) == 0); | ||
1763 | spin_unlock(&sc->rxbuflock); | ||
1764 | } | ||
1765 | |||
1766 | |||
1767 | |||
1768 | |||
1769 | /*************\ | ||
1770 | * TX Handling * | ||
1771 | \*************/ | ||
1772 | |||
1773 | static void | ||
1774 | ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) | ||
1775 | { | ||
1776 | struct ieee80211_tx_status txs = {}; | ||
1777 | struct ath5k_buf *bf, *bf0; | ||
1778 | struct ath5k_desc *ds; | ||
1779 | struct sk_buff *skb; | ||
1780 | int ret; | ||
1781 | |||
1782 | spin_lock(&txq->lock); | ||
1783 | list_for_each_entry_safe(bf, bf0, &txq->q, list) { | ||
1784 | ds = bf->desc; | ||
1785 | |||
1786 | /* TODO only one segment */ | ||
1787 | pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, | ||
1788 | sc->desc_len, PCI_DMA_FROMDEVICE); | ||
1789 | ret = sc->ah->ah_proc_tx_desc(sc->ah, ds); | ||
1790 | if (unlikely(ret == -EINPROGRESS)) | ||
1791 | break; | ||
1792 | else if (unlikely(ret)) { | ||
1793 | ATH5K_ERR(sc, "error %d while processing queue %u\n", | ||
1794 | ret, txq->qnum); | ||
1795 | break; | ||
1796 | } | ||
1797 | |||
1798 | skb = bf->skb; | ||
1799 | bf->skb = NULL; | ||
1800 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, | ||
1801 | PCI_DMA_TODEVICE); | ||
1802 | |||
1803 | txs.control = bf->ctl; | ||
1804 | txs.retry_count = ds->ds_txstat.ts_shortretry + | ||
1805 | ds->ds_txstat.ts_longretry / 6; | ||
1806 | if (unlikely(ds->ds_txstat.ts_status)) { | ||
1807 | sc->ll_stats.dot11ACKFailureCount++; | ||
1808 | if (ds->ds_txstat.ts_status & AR5K_TXERR_XRETRY) | ||
1809 | txs.excessive_retries = 1; | ||
1810 | else if (ds->ds_txstat.ts_status & AR5K_TXERR_FILT) | ||
1811 | txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; | ||
1812 | } else { | ||
1813 | txs.flags |= IEEE80211_TX_STATUS_ACK; | ||
1814 | txs.ack_signal = ds->ds_txstat.ts_rssi; | ||
1815 | } | ||
1816 | |||
1817 | ieee80211_tx_status(sc->hw, skb, &txs); | ||
1818 | sc->tx_stats.data[txq->qnum].count++; | ||
1819 | |||
1820 | spin_lock(&sc->txbuflock); | ||
1821 | sc->tx_stats.data[txq->qnum].len--; | ||
1822 | list_move_tail(&bf->list, &sc->txbuf); | ||
1823 | sc->txbuf_len++; | ||
1824 | spin_unlock(&sc->txbuflock); | ||
1825 | } | ||
1826 | if (likely(list_empty(&txq->q))) | ||
1827 | txq->link = NULL; | ||
1828 | spin_unlock(&txq->lock); | ||
1829 | if (sc->txbuf_len > ATH_TXBUF / 5) | ||
1830 | ieee80211_wake_queues(sc->hw); | ||
1831 | } | ||
1832 | |||
1833 | static void | ||
1834 | ath5k_tasklet_tx(unsigned long data) | ||
1835 | { | ||
1836 | struct ath5k_softc *sc = (void *)data; | ||
1837 | |||
1838 | ath5k_tx_processq(sc, sc->txq); | ||
1839 | |||
1840 | ath5k_led_event(sc, ATH_LED_TX); | ||
1841 | } | ||
1842 | |||
1843 | |||
1844 | |||
1845 | |||
1846 | /*****************\ | ||
1847 | * Beacon handling * | ||
1848 | \*****************/ | ||
1849 | |||
1850 | /* | ||
1851 | * Setup the beacon frame for transmit. | ||
1852 | */ | ||
1853 | static int | ||
1854 | ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, | ||
1855 | struct ieee80211_tx_control *ctl) | ||
1856 | { | ||
1857 | struct sk_buff *skb = bf->skb; | ||
1858 | struct ath5k_hw *ah = sc->ah; | ||
1859 | struct ath5k_desc *ds; | ||
1860 | int ret, antenna = 0; | ||
1861 | u32 flags; | ||
1862 | |||
1863 | bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, | ||
1864 | PCI_DMA_TODEVICE); | ||
1865 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " | ||
1866 | "skbaddr %llx\n", skb, skb->data, skb->len, | ||
1867 | (unsigned long long)bf->skbaddr); | ||
1868 | if (pci_dma_mapping_error(bf->skbaddr)) { | ||
1869 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); | ||
1870 | return -EIO; | ||
1871 | } | ||
1872 | |||
1873 | ds = bf->desc; | ||
1874 | |||
1875 | flags = AR5K_TXDESC_NOACK; | ||
1876 | if (sc->opmode == IEEE80211_IF_TYPE_IBSS && ath5k_hw_hasveol(ah)) { | ||
1877 | ds->ds_link = bf->daddr; /* self-linked */ | ||
1878 | flags |= AR5K_TXDESC_VEOL; | ||
1879 | /* | ||
1880 | * Let hardware handle antenna switching if txantenna is not set | ||
1881 | */ | ||
1882 | } else { | ||
1883 | ds->ds_link = 0; | ||
1884 | /* | ||
1885 | * Switch antenna every 4 beacons if txantenna is not set | ||
1886 | * XXX assumes two antennas | ||
1887 | */ | ||
1888 | if (antenna == 0) | ||
1889 | antenna = sc->bsent & 4 ? 2 : 1; | ||
1890 | } | ||
1891 | |||
1892 | ds->ds_data = bf->skbaddr; | ||
1893 | ret = ah->ah_setup_tx_desc(ah, ds, skb->len + FCS_LEN, | ||
1894 | ieee80211_get_hdrlen_from_skb(skb), | ||
1895 | AR5K_PKT_TYPE_BEACON, (ctl->power_level * 2), ctl->tx_rate, 1, | ||
1896 | AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); | ||
1897 | if (ret) | ||
1898 | goto err_unmap; | ||
1899 | |||
1900 | return 0; | ||
1901 | err_unmap: | ||
1902 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); | ||
1903 | return ret; | ||
1904 | } | ||
1905 | |||
1906 | /* | ||
1907 | * Transmit a beacon frame at SWBA. Dynamic updates to the | ||
1908 | * frame contents are done as needed and the slot time is | ||
1909 | * also adjusted based on current state. | ||
1910 | * | ||
1911 | * this is usually called from interrupt context (ath5k_intr()) | ||
1912 | * but also from ath5k_beacon_config() in IBSS mode which in turn | ||
1913 | * can be called from a tasklet and user context | ||
1914 | */ | ||
1915 | static void | ||
1916 | ath5k_beacon_send(struct ath5k_softc *sc) | ||
1917 | { | ||
1918 | struct ath5k_buf *bf = sc->bbuf; | ||
1919 | struct ath5k_hw *ah = sc->ah; | ||
1920 | |||
1921 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON_PROC, "in beacon_send\n"); | ||
1922 | |||
1923 | if (unlikely(bf->skb == NULL || sc->opmode == IEEE80211_IF_TYPE_STA || | ||
1924 | sc->opmode == IEEE80211_IF_TYPE_MNTR)) { | ||
1925 | ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); | ||
1926 | return; | ||
1927 | } | ||
1928 | /* | ||
1929 | * Check if the previous beacon has gone out. If | ||
1930 | * not don't don't try to post another, skip this | ||
1931 | * period and wait for the next. Missed beacons | ||
1932 | * indicate a problem and should not occur. If we | ||
1933 | * miss too many consecutive beacons reset the device. | ||
1934 | */ | ||
1935 | if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { | ||
1936 | sc->bmisscount++; | ||
1937 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON_PROC, | ||
1938 | "missed %u consecutive beacons\n", sc->bmisscount); | ||
1939 | if (sc->bmisscount > 3) { /* NB: 3 is a guess */ | ||
1940 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON_PROC, | ||
1941 | "stuck beacon time (%u missed)\n", | ||
1942 | sc->bmisscount); | ||
1943 | tasklet_schedule(&sc->restq); | ||
1944 | } | ||
1945 | return; | ||
1946 | } | ||
1947 | if (unlikely(sc->bmisscount != 0)) { | ||
1948 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON_PROC, | ||
1949 | "resume beacon xmit after %u misses\n", | ||
1950 | sc->bmisscount); | ||
1951 | sc->bmisscount = 0; | ||
1952 | } | ||
1953 | |||
1954 | /* | ||
1955 | * Stop any current dma and put the new frame on the queue. | ||
1956 | * This should never fail since we check above that no frames | ||
1957 | * are still pending on the queue. | ||
1958 | */ | ||
1959 | if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { | ||
1960 | ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq); | ||
1961 | /* NB: hw still stops DMA, so proceed */ | ||
1962 | } | ||
1963 | pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, bf->skb->len, | ||
1964 | PCI_DMA_TODEVICE); | ||
1965 | |||
1966 | ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); | ||
1967 | ath5k_hw_tx_start(ah, sc->bhalq); | ||
1968 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON_PROC, "TXDP[%u] = %llx (%p)\n", | ||
1969 | sc->bhalq, (unsigned long long)bf->daddr, bf->desc); | ||
1970 | |||
1971 | sc->bsent++; | ||
1972 | } | ||
1973 | |||
1974 | |||
1975 | static void | ||
1976 | ath5k_beacon_update_timers(struct ath5k_softc *sc) | ||
1977 | { | ||
1978 | struct ath5k_hw *ah = sc->ah; | ||
1979 | u32 uninitialized_var(nexttbtt), intval, tsftu; | ||
1980 | u64 tsf; | ||
1981 | |||
1982 | intval = sc->bintval & AR5K_BEACON_PERIOD; | ||
1983 | if (WARN_ON(!intval)) | ||
1984 | return; | ||
1985 | |||
1986 | /* current TSF converted to TU */ | ||
1987 | tsf = ath5k_hw_get_tsf64(ah); | ||
1988 | tsftu = TSF_TO_TU(tsf); | ||
1989 | |||
1990 | /* | ||
1991 | * Pull nexttbtt forward to reflect the current | ||
1992 | * TSF. Add one intval otherwise the timespan | ||
1993 | * can be too short for ibss merges. | ||
1994 | */ | ||
1995 | nexttbtt = tsftu + 2 * intval; | ||
1996 | |||
1997 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, | ||
1998 | "hw tsftu %u nexttbtt %u intval %u\n", tsftu, nexttbtt, intval); | ||
1999 | |||
2000 | intval |= AR5K_BEACON_ENA; | ||
2001 | |||
2002 | ath5k_hw_init_beacon(ah, nexttbtt, intval); | ||
2003 | } | ||
2004 | |||
2005 | |||
2006 | /* | ||
2007 | * Configure the beacon timers and interrupts based on the operating mode | ||
2008 | * | ||
2009 | * When operating in station mode we want to receive a BMISS interrupt when we | ||
2010 | * stop seeing beacons from the AP we've associated with so we can look for | ||
2011 | * another AP to associate with. | ||
2012 | * | ||
2013 | * In IBSS mode we need to configure the beacon timers and use a self-linked tx | ||
2014 | * descriptor if possible. If the hardware cannot deal with that we enable SWBA | ||
2015 | * interrupts to send the beacons from the interrupt handler. | ||
2016 | */ | ||
2017 | static void | ||
2018 | ath5k_beacon_config(struct ath5k_softc *sc) | ||
2019 | { | ||
2020 | struct ath5k_hw *ah = sc->ah; | ||
2021 | |||
2022 | ath5k_hw_set_intr(ah, 0); | ||
2023 | sc->bmisscount = 0; | ||
2024 | |||
2025 | if (sc->opmode == IEEE80211_IF_TYPE_STA) { | ||
2026 | sc->imask |= AR5K_INT_BMISS; | ||
2027 | } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { | ||
2028 | /* | ||
2029 | * In IBSS mode enable the beacon timers but only enable SWBA | ||
2030 | * interrupts if we need to manually prepare beacon frames. | ||
2031 | * Otherwise we use a self-linked tx descriptor and let the | ||
2032 | * hardware deal with things. In that case we have to load it | ||
2033 | * only once here. | ||
2034 | */ | ||
2035 | ath5k_beaconq_config(sc); | ||
2036 | ath5k_beacon_update_timers(sc); | ||
2037 | |||
2038 | if (!ath5k_hw_hasveol(ah)) | ||
2039 | sc->imask |= AR5K_INT_SWBA; | ||
2040 | else | ||
2041 | ath5k_beacon_send(sc); | ||
2042 | } | ||
2043 | /* TODO else AP */ | ||
2044 | |||
2045 | ath5k_hw_set_intr(ah, sc->imask); | ||
2046 | } | ||
2047 | |||
2048 | |||
2049 | /********************\ | ||
2050 | * Interrupt handling * | ||
2051 | \********************/ | ||
2052 | |||
2053 | static int | ||
2054 | ath5k_init(struct ath5k_softc *sc) | ||
2055 | { | ||
2056 | int ret; | ||
2057 | |||
2058 | mutex_lock(&sc->lock); | ||
2059 | |||
2060 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); | ||
2061 | |||
2062 | /* | ||
2063 | * Stop anything previously setup. This is safe | ||
2064 | * no matter this is the first time through or not. | ||
2065 | */ | ||
2066 | ath5k_stop_locked(sc); | ||
2067 | |||
2068 | /* | ||
2069 | * The basic interface to setting the hardware in a good | ||
2070 | * state is ``reset''. On return the hardware is known to | ||
2071 | * be powered up and with interrupts disabled. This must | ||
2072 | * be followed by initialization of the appropriate bits | ||
2073 | * and then setup of the interrupt mask. | ||
2074 | */ | ||
2075 | sc->curchan = sc->hw->conf.chan; | ||
2076 | ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false); | ||
2077 | if (ret) { | ||
2078 | ATH5K_ERR(sc, "unable to reset hardware: %d\n", ret); | ||
2079 | goto done; | ||
2080 | } | ||
2081 | /* | ||
2082 | * This is needed only to setup initial state | ||
2083 | * but it's best done after a reset. | ||
2084 | */ | ||
2085 | ath5k_hw_set_txpower_limit(sc->ah, 0); | ||
2086 | |||
2087 | /* | ||
2088 | * Setup the hardware after reset: the key cache | ||
2089 | * is filled as needed and the receive engine is | ||
2090 | * set going. Frame transmit is handled entirely | ||
2091 | * in the frame output path; there's nothing to do | ||
2092 | * here except setup the interrupt mask. | ||
2093 | */ | ||
2094 | ret = ath5k_rx_start(sc); | ||
2095 | if (ret) | ||
2096 | goto done; | ||
2097 | |||
2098 | /* | ||
2099 | * Enable interrupts. | ||
2100 | */ | ||
2101 | sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | | ||
2102 | AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL; | ||
2103 | |||
2104 | ath5k_hw_set_intr(sc->ah, sc->imask); | ||
2105 | /* Set ack to be sent at low bit-rates */ | ||
2106 | ath5k_hw_set_ack_bitrate_high(sc->ah, false); | ||
2107 | |||
2108 | mod_timer(&sc->calib_tim, round_jiffies(jiffies + | ||
2109 | msecs_to_jiffies(ath5k_calinterval * 1000))); | ||
2110 | |||
2111 | ret = 0; | ||
2112 | done: | ||
2113 | mutex_unlock(&sc->lock); | ||
2114 | return ret; | ||
2115 | } | ||
2116 | |||
2117 | static int | ||
2118 | ath5k_stop_locked(struct ath5k_softc *sc) | ||
2119 | { | ||
2120 | struct ath5k_hw *ah = sc->ah; | ||
2121 | |||
2122 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n", | ||
2123 | test_bit(ATH_STAT_INVALID, sc->status)); | ||
2124 | |||
2125 | /* | ||
2126 | * Shutdown the hardware and driver: | ||
2127 | * stop output from above | ||
2128 | * disable interrupts | ||
2129 | * turn off timers | ||
2130 | * turn off the radio | ||
2131 | * clear transmit machinery | ||
2132 | * clear receive machinery | ||
2133 | * drain and release tx queues | ||
2134 | * reclaim beacon resources | ||
2135 | * power down hardware | ||
2136 | * | ||
2137 | * Note that some of this work is not possible if the | ||
2138 | * hardware is gone (invalid). | ||
2139 | */ | ||
2140 | ieee80211_stop_queues(sc->hw); | ||
2141 | |||
2142 | if (!test_bit(ATH_STAT_INVALID, sc->status)) { | ||
2143 | if (test_bit(ATH_STAT_LEDSOFT, sc->status)) { | ||
2144 | del_timer_sync(&sc->led_tim); | ||
2145 | ath5k_hw_set_gpio(ah, sc->led_pin, !sc->led_on); | ||
2146 | __clear_bit(ATH_STAT_LEDBLINKING, sc->status); | ||
2147 | } | ||
2148 | ath5k_hw_set_intr(ah, 0); | ||
2149 | } | ||
2150 | ath5k_txq_cleanup(sc); | ||
2151 | if (!test_bit(ATH_STAT_INVALID, sc->status)) { | ||
2152 | ath5k_rx_stop(sc); | ||
2153 | ath5k_hw_phy_disable(ah); | ||
2154 | } else | ||
2155 | sc->rxlink = NULL; | ||
2156 | |||
2157 | return 0; | ||
2158 | } | ||
2159 | |||
2160 | /* | ||
2161 | * Stop the device, grabbing the top-level lock to protect | ||
2162 | * against concurrent entry through ath5k_init (which can happen | ||
2163 | * if another thread does a system call and the thread doing the | ||
2164 | * stop is preempted). | ||
2165 | */ | ||
2166 | static int | ||
2167 | ath5k_stop_hw(struct ath5k_softc *sc) | ||
2168 | { | ||
2169 | int ret; | ||
2170 | |||
2171 | mutex_lock(&sc->lock); | ||
2172 | ret = ath5k_stop_locked(sc); | ||
2173 | if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) { | ||
2174 | /* | ||
2175 | * Set the chip in full sleep mode. Note that we are | ||
2176 | * careful to do this only when bringing the interface | ||
2177 | * completely to a stop. When the chip is in this state | ||
2178 | * it must be carefully woken up or references to | ||
2179 | * registers in the PCI clock domain may freeze the bus | ||
2180 | * (and system). This varies by chip and is mostly an | ||
2181 | * issue with newer parts that go to sleep more quickly. | ||
2182 | */ | ||
2183 | if (sc->ah->ah_mac_srev >= 0x78) { | ||
2184 | /* | ||
2185 | * XXX | ||
2186 | * don't put newer MAC revisions > 7.8 to sleep because | ||
2187 | * of the above mentioned problems | ||
2188 | */ | ||
2189 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mac version > 7.8, " | ||
2190 | "not putting device to sleep\n"); | ||
2191 | } else { | ||
2192 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, | ||
2193 | "putting device to full sleep\n"); | ||
2194 | ath5k_hw_set_power(sc->ah, AR5K_PM_FULL_SLEEP, true, 0); | ||
2195 | } | ||
2196 | } | ||
2197 | ath5k_txbuf_free(sc, sc->bbuf); | ||
2198 | mutex_unlock(&sc->lock); | ||
2199 | |||
2200 | del_timer_sync(&sc->calib_tim); | ||
2201 | |||
2202 | return ret; | ||
2203 | } | ||
2204 | |||
2205 | static irqreturn_t | ||
2206 | ath5k_intr(int irq, void *dev_id) | ||
2207 | { | ||
2208 | struct ath5k_softc *sc = dev_id; | ||
2209 | struct ath5k_hw *ah = sc->ah; | ||
2210 | enum ath5k_int status; | ||
2211 | unsigned int counter = 1000; | ||
2212 | |||
2213 | if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) || | ||
2214 | !ath5k_hw_is_intr_pending(ah))) | ||
2215 | return IRQ_NONE; | ||
2216 | |||
2217 | do { | ||
2218 | /* | ||
2219 | * Figure out the reason(s) for the interrupt. Note | ||
2220 | * that get_isr returns a pseudo-ISR that may include | ||
2221 | * bits we haven't explicitly enabled so we mask the | ||
2222 | * value to insure we only process bits we requested. | ||
2223 | */ | ||
2224 | ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ | ||
2225 | ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", | ||
2226 | status, sc->imask); | ||
2227 | status &= sc->imask; /* discard unasked for bits */ | ||
2228 | if (unlikely(status & AR5K_INT_FATAL)) { | ||
2229 | /* | ||
2230 | * Fatal errors are unrecoverable. | ||
2231 | * Typically these are caused by DMA errors. | ||
2232 | */ | ||
2233 | tasklet_schedule(&sc->restq); | ||
2234 | } else if (unlikely(status & AR5K_INT_RXORN)) { | ||
2235 | tasklet_schedule(&sc->restq); | ||
2236 | } else { | ||
2237 | if (status & AR5K_INT_SWBA) { | ||
2238 | /* | ||
2239 | * Software beacon alert--time to send a beacon. | ||
2240 | * Handle beacon transmission directly; deferring | ||
2241 | * this is too slow to meet timing constraints | ||
2242 | * under load. | ||
2243 | */ | ||
2244 | ath5k_beacon_send(sc); | ||
2245 | } | ||
2246 | if (status & AR5K_INT_RXEOL) { | ||
2247 | /* | ||
2248 | * NB: the hardware should re-read the link when | ||
2249 | * RXE bit is written, but it doesn't work at | ||
2250 | * least on older hardware revs. | ||
2251 | */ | ||
2252 | sc->rxlink = NULL; | ||
2253 | } | ||
2254 | if (status & AR5K_INT_TXURN) { | ||
2255 | /* bump tx trigger level */ | ||
2256 | ath5k_hw_update_tx_triglevel(ah, true); | ||
2257 | } | ||
2258 | if (status & AR5K_INT_RX) | ||
2259 | tasklet_schedule(&sc->rxtq); | ||
2260 | if (status & AR5K_INT_TX) | ||
2261 | tasklet_schedule(&sc->txtq); | ||
2262 | if (status & AR5K_INT_BMISS) { | ||
2263 | } | ||
2264 | if (status & AR5K_INT_MIB) { | ||
2265 | /* TODO */ | ||
2266 | } | ||
2267 | } | ||
2268 | } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0); | ||
2269 | |||
2270 | if (unlikely(!counter)) | ||
2271 | ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); | ||
2272 | |||
2273 | return IRQ_HANDLED; | ||
2274 | } | ||
2275 | |||
2276 | static void | ||
2277 | ath5k_tasklet_reset(unsigned long data) | ||
2278 | { | ||
2279 | struct ath5k_softc *sc = (void *)data; | ||
2280 | |||
2281 | ath5k_reset(sc->hw); | ||
2282 | } | ||
2283 | |||
2284 | /* | ||
2285 | * Periodically recalibrate the PHY to account | ||
2286 | * for temperature/environment changes. | ||
2287 | */ | ||
2288 | static void | ||
2289 | ath5k_calibrate(unsigned long data) | ||
2290 | { | ||
2291 | struct ath5k_softc *sc = (void *)data; | ||
2292 | struct ath5k_hw *ah = sc->ah; | ||
2293 | |||
2294 | ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", | ||
2295 | sc->curchan->chan, sc->curchan->val); | ||
2296 | |||
2297 | if (ath5k_hw_get_rf_gain(ah) == AR5K_RFGAIN_NEED_CHANGE) { | ||
2298 | /* | ||
2299 | * Rfgain is out of bounds, reset the chip | ||
2300 | * to load new gain values. | ||
2301 | */ | ||
2302 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); | ||
2303 | ath5k_reset(sc->hw); | ||
2304 | } | ||
2305 | if (ath5k_hw_phy_calibrate(ah, sc->curchan)) | ||
2306 | ATH5K_ERR(sc, "calibration of channel %u failed\n", | ||
2307 | sc->curchan->chan); | ||
2308 | |||
2309 | mod_timer(&sc->calib_tim, round_jiffies(jiffies + | ||
2310 | msecs_to_jiffies(ath5k_calinterval * 1000))); | ||
2311 | } | ||
2312 | |||
2313 | |||
2314 | |||
2315 | /***************\ | ||
2316 | * LED functions * | ||
2317 | \***************/ | ||
2318 | |||
2319 | static void | ||
2320 | ath5k_led_off(unsigned long data) | ||
2321 | { | ||
2322 | struct ath5k_softc *sc = (void *)data; | ||
2323 | |||
2324 | if (test_bit(ATH_STAT_LEDENDBLINK, sc->status)) | ||
2325 | __clear_bit(ATH_STAT_LEDBLINKING, sc->status); | ||
2326 | else { | ||
2327 | __set_bit(ATH_STAT_LEDENDBLINK, sc->status); | ||
2328 | ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on); | ||
2329 | mod_timer(&sc->led_tim, jiffies + sc->led_off); | ||
2330 | } | ||
2331 | } | ||
2332 | |||
2333 | /* | ||
2334 | * Blink the LED according to the specified on/off times. | ||
2335 | */ | ||
2336 | static void | ||
2337 | ath5k_led_blink(struct ath5k_softc *sc, unsigned int on, | ||
2338 | unsigned int off) | ||
2339 | { | ||
2340 | ATH5K_DBG(sc, ATH5K_DEBUG_LED, "on %u off %u\n", on, off); | ||
2341 | ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on); | ||
2342 | __set_bit(ATH_STAT_LEDBLINKING, sc->status); | ||
2343 | __clear_bit(ATH_STAT_LEDENDBLINK, sc->status); | ||
2344 | sc->led_off = off; | ||
2345 | mod_timer(&sc->led_tim, jiffies + on); | ||
2346 | } | ||
2347 | |||
2348 | static void | ||
2349 | ath5k_led_event(struct ath5k_softc *sc, int event) | ||
2350 | { | ||
2351 | if (likely(!test_bit(ATH_STAT_LEDSOFT, sc->status))) | ||
2352 | return; | ||
2353 | if (unlikely(test_bit(ATH_STAT_LEDBLINKING, sc->status))) | ||
2354 | return; /* don't interrupt active blink */ | ||
2355 | switch (event) { | ||
2356 | case ATH_LED_TX: | ||
2357 | ath5k_led_blink(sc, sc->hwmap[sc->led_txrate].ledon, | ||
2358 | sc->hwmap[sc->led_txrate].ledoff); | ||
2359 | break; | ||
2360 | case ATH_LED_RX: | ||
2361 | ath5k_led_blink(sc, sc->hwmap[sc->led_rxrate].ledon, | ||
2362 | sc->hwmap[sc->led_rxrate].ledoff); | ||
2363 | break; | ||
2364 | } | ||
2365 | } | ||
2366 | |||
2367 | |||
2368 | |||
2369 | |||
2370 | /********************\ | ||
2371 | * Mac80211 functions * | ||
2372 | \********************/ | ||
2373 | |||
2374 | static int | ||
2375 | ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
2376 | struct ieee80211_tx_control *ctl) | ||
2377 | { | ||
2378 | struct ath5k_softc *sc = hw->priv; | ||
2379 | struct ath5k_buf *bf; | ||
2380 | unsigned long flags; | ||
2381 | int hdrlen; | ||
2382 | int pad; | ||
2383 | |||
2384 | ath5k_debug_dump_skb(sc, skb, "TX ", 1); | ||
2385 | |||
2386 | if (sc->opmode == IEEE80211_IF_TYPE_MNTR) | ||
2387 | ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); | ||
2388 | |||
2389 | /* | ||
2390 | * the hardware expects the header padded to 4 byte boundaries | ||
2391 | * if this is not the case we add the padding after the header | ||
2392 | */ | ||
2393 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
2394 | if (hdrlen & 3) { | ||
2395 | pad = hdrlen % 4; | ||
2396 | if (skb_headroom(skb) < pad) { | ||
2397 | ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" | ||
2398 | " headroom to pad %d\n", hdrlen, pad); | ||
2399 | return -1; | ||
2400 | } | ||
2401 | skb_push(skb, pad); | ||
2402 | memmove(skb->data, skb->data+pad, hdrlen); | ||
2403 | } | ||
2404 | |||
2405 | sc->led_txrate = ctl->tx_rate; | ||
2406 | |||
2407 | spin_lock_irqsave(&sc->txbuflock, flags); | ||
2408 | if (list_empty(&sc->txbuf)) { | ||
2409 | ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); | ||
2410 | spin_unlock_irqrestore(&sc->txbuflock, flags); | ||
2411 | ieee80211_stop_queue(hw, ctl->queue); | ||
2412 | return -1; | ||
2413 | } | ||
2414 | bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); | ||
2415 | list_del(&bf->list); | ||
2416 | sc->txbuf_len--; | ||
2417 | if (list_empty(&sc->txbuf)) | ||
2418 | ieee80211_stop_queues(hw); | ||
2419 | spin_unlock_irqrestore(&sc->txbuflock, flags); | ||
2420 | |||
2421 | bf->skb = skb; | ||
2422 | |||
2423 | if (ath5k_txbuf_setup(sc, bf, ctl)) { | ||
2424 | bf->skb = NULL; | ||
2425 | spin_lock_irqsave(&sc->txbuflock, flags); | ||
2426 | list_add_tail(&bf->list, &sc->txbuf); | ||
2427 | sc->txbuf_len++; | ||
2428 | spin_unlock_irqrestore(&sc->txbuflock, flags); | ||
2429 | dev_kfree_skb_any(skb); | ||
2430 | return 0; | ||
2431 | } | ||
2432 | |||
2433 | return 0; | ||
2434 | } | ||
2435 | |||
2436 | static int | ||
2437 | ath5k_reset(struct ieee80211_hw *hw) | ||
2438 | { | ||
2439 | struct ath5k_softc *sc = hw->priv; | ||
2440 | struct ath5k_hw *ah = sc->ah; | ||
2441 | int ret; | ||
2442 | |||
2443 | ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); | ||
2444 | /* | ||
2445 | * Convert to a hw channel description with the flags | ||
2446 | * constrained to reflect the current operating mode. | ||
2447 | */ | ||
2448 | sc->curchan = hw->conf.chan; | ||
2449 | |||
2450 | ath5k_hw_set_intr(ah, 0); | ||
2451 | ath5k_txq_cleanup(sc); | ||
2452 | ath5k_rx_stop(sc); | ||
2453 | |||
2454 | ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); | ||
2455 | if (unlikely(ret)) { | ||
2456 | ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); | ||
2457 | goto err; | ||
2458 | } | ||
2459 | ath5k_hw_set_txpower_limit(sc->ah, 0); | ||
2460 | |||
2461 | ret = ath5k_rx_start(sc); | ||
2462 | if (unlikely(ret)) { | ||
2463 | ATH5K_ERR(sc, "can't start recv logic\n"); | ||
2464 | goto err; | ||
2465 | } | ||
2466 | /* | ||
2467 | * We may be doing a reset in response to an ioctl | ||
2468 | * that changes the channel so update any state that | ||
2469 | * might change as a result. | ||
2470 | * | ||
2471 | * XXX needed? | ||
2472 | */ | ||
2473 | /* ath5k_chan_change(sc, c); */ | ||
2474 | ath5k_beacon_config(sc); | ||
2475 | /* intrs are started by ath5k_beacon_config */ | ||
2476 | |||
2477 | ieee80211_wake_queues(hw); | ||
2478 | |||
2479 | return 0; | ||
2480 | err: | ||
2481 | return ret; | ||
2482 | } | ||
2483 | |||
2484 | static int ath5k_start(struct ieee80211_hw *hw) | ||
2485 | { | ||
2486 | return ath5k_init(hw->priv); | ||
2487 | } | ||
2488 | |||
2489 | static void ath5k_stop(struct ieee80211_hw *hw) | ||
2490 | { | ||
2491 | ath5k_stop_hw(hw->priv); | ||
2492 | } | ||
2493 | |||
2494 | static int ath5k_add_interface(struct ieee80211_hw *hw, | ||
2495 | struct ieee80211_if_init_conf *conf) | ||
2496 | { | ||
2497 | struct ath5k_softc *sc = hw->priv; | ||
2498 | int ret; | ||
2499 | |||
2500 | mutex_lock(&sc->lock); | ||
2501 | if (sc->iface_id) { | ||
2502 | ret = 0; | ||
2503 | goto end; | ||
2504 | } | ||
2505 | |||
2506 | sc->iface_id = conf->if_id; | ||
2507 | |||
2508 | switch (conf->type) { | ||
2509 | case IEEE80211_IF_TYPE_STA: | ||
2510 | case IEEE80211_IF_TYPE_IBSS: | ||
2511 | case IEEE80211_IF_TYPE_MNTR: | ||
2512 | sc->opmode = conf->type; | ||
2513 | break; | ||
2514 | default: | ||
2515 | ret = -EOPNOTSUPP; | ||
2516 | goto end; | ||
2517 | } | ||
2518 | ret = 0; | ||
2519 | end: | ||
2520 | mutex_unlock(&sc->lock); | ||
2521 | return ret; | ||
2522 | } | ||
2523 | |||
2524 | static void | ||
2525 | ath5k_remove_interface(struct ieee80211_hw *hw, | ||
2526 | struct ieee80211_if_init_conf *conf) | ||
2527 | { | ||
2528 | struct ath5k_softc *sc = hw->priv; | ||
2529 | |||
2530 | mutex_lock(&sc->lock); | ||
2531 | if (sc->iface_id != conf->if_id) | ||
2532 | goto end; | ||
2533 | |||
2534 | sc->iface_id = 0; | ||
2535 | end: | ||
2536 | mutex_unlock(&sc->lock); | ||
2537 | } | ||
2538 | |||
2539 | static int | ||
2540 | ath5k_config(struct ieee80211_hw *hw, | ||
2541 | struct ieee80211_conf *conf) | ||
2542 | { | ||
2543 | struct ath5k_softc *sc = hw->priv; | ||
2544 | |||
2545 | sc->bintval = conf->beacon_int * 1000 / 1024; | ||
2546 | ath5k_setcurmode(sc, conf->phymode); | ||
2547 | |||
2548 | return ath5k_chan_set(sc, conf->chan); | ||
2549 | } | ||
2550 | |||
2551 | static int | ||
2552 | ath5k_config_interface(struct ieee80211_hw *hw, int if_id, | ||
2553 | struct ieee80211_if_conf *conf) | ||
2554 | { | ||
2555 | struct ath5k_softc *sc = hw->priv; | ||
2556 | struct ath5k_hw *ah = sc->ah; | ||
2557 | int ret; | ||
2558 | |||
2559 | /* Set to a reasonable value. Note that this will | ||
2560 | * be set to mac80211's value at ath5k_config(). */ | ||
2561 | sc->bintval = 1000 * 1000 / 1024; | ||
2562 | mutex_lock(&sc->lock); | ||
2563 | if (sc->iface_id != if_id) { | ||
2564 | ret = -EIO; | ||
2565 | goto unlock; | ||
2566 | } | ||
2567 | if (conf->bssid) { | ||
2568 | /* Cache for later use during resets */ | ||
2569 | memcpy(ah->ah_bssid, conf->bssid, ETH_ALEN); | ||
2570 | /* XXX: assoc id is set to 0 for now, mac80211 doesn't have | ||
2571 | * a clean way of letting us retrieve this yet. */ | ||
2572 | ath5k_hw_set_associd(ah, ah->ah_bssid, 0); | ||
2573 | } | ||
2574 | mutex_unlock(&sc->lock); | ||
2575 | |||
2576 | return ath5k_reset(hw); | ||
2577 | unlock: | ||
2578 | mutex_unlock(&sc->lock); | ||
2579 | return ret; | ||
2580 | } | ||
2581 | |||
2582 | #define SUPPORTED_FIF_FLAGS \ | ||
2583 | FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ | ||
2584 | FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ | ||
2585 | FIF_BCN_PRBRESP_PROMISC | ||
2586 | /* | ||
2587 | * o always accept unicast, broadcast, and multicast traffic | ||
2588 | * o multicast traffic for all BSSIDs will be enabled if mac80211 | ||
2589 | * says it should be | ||
2590 | * o maintain current state of phy ofdm or phy cck error reception. | ||
2591 | * If the hardware detects any of these type of errors then | ||
2592 | * ath5k_hw_get_rx_filter() will pass to us the respective | ||
2593 | * hardware filters to be able to receive these type of frames. | ||
2594 | * o probe request frames are accepted only when operating in | ||
2595 | * hostap, adhoc, or monitor modes | ||
2596 | * o enable promiscuous mode according to the interface state | ||
2597 | * o accept beacons: | ||
2598 | * - when operating in adhoc mode so the 802.11 layer creates | ||
2599 | * node table entries for peers, | ||
2600 | * - when operating in station mode for collecting rssi data when | ||
2601 | * the station is otherwise quiet, or | ||
2602 | * - when scanning | ||
2603 | */ | ||
2604 | static void ath5k_configure_filter(struct ieee80211_hw *hw, | ||
2605 | unsigned int changed_flags, | ||
2606 | unsigned int *new_flags, | ||
2607 | int mc_count, struct dev_mc_list *mclist) | ||
2608 | { | ||
2609 | struct ath5k_softc *sc = hw->priv; | ||
2610 | struct ath5k_hw *ah = sc->ah; | ||
2611 | u32 mfilt[2], val, rfilt; | ||
2612 | u8 pos; | ||
2613 | int i; | ||
2614 | |||
2615 | mfilt[0] = 0; | ||
2616 | mfilt[1] = 0; | ||
2617 | |||
2618 | /* Only deal with supported flags */ | ||
2619 | changed_flags &= SUPPORTED_FIF_FLAGS; | ||
2620 | *new_flags &= SUPPORTED_FIF_FLAGS; | ||
2621 | |||
2622 | /* If HW detects any phy or radar errors, leave those filters on. | ||
2623 | * Also, always enable Unicast, Broadcasts and Multicast | ||
2624 | * XXX: move unicast, bssid broadcasts and multicast to mac80211 */ | ||
2625 | rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) | | ||
2626 | (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | | ||
2627 | AR5K_RX_FILTER_MCAST); | ||
2628 | |||
2629 | if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { | ||
2630 | if (*new_flags & FIF_PROMISC_IN_BSS) { | ||
2631 | rfilt |= AR5K_RX_FILTER_PROM; | ||
2632 | __set_bit(ATH_STAT_PROMISC, sc->status); | ||
2633 | } | ||
2634 | else | ||
2635 | __clear_bit(ATH_STAT_PROMISC, sc->status); | ||
2636 | } | ||
2637 | |||
2638 | /* Note, AR5K_RX_FILTER_MCAST is already enabled */ | ||
2639 | if (*new_flags & FIF_ALLMULTI) { | ||
2640 | mfilt[0] = ~0; | ||
2641 | mfilt[1] = ~0; | ||
2642 | } else { | ||
2643 | for (i = 0; i < mc_count; i++) { | ||
2644 | if (!mclist) | ||
2645 | break; | ||
2646 | /* calculate XOR of eight 6-bit values */ | ||
2647 | val = LE_READ_4(mclist->dmi_addr + 0); | ||
2648 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | ||
2649 | val = LE_READ_4(mclist->dmi_addr + 3); | ||
2650 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | ||
2651 | pos &= 0x3f; | ||
2652 | mfilt[pos / 32] |= (1 << (pos % 32)); | ||
2653 | /* XXX: we might be able to just do this instead, | ||
2654 | * but not sure, needs testing, if we do use this we'd | ||
2655 | * neet to inform below to not reset the mcast */ | ||
2656 | /* ath5k_hw_set_mcast_filterindex(ah, | ||
2657 | * mclist->dmi_addr[5]); */ | ||
2658 | mclist = mclist->next; | ||
2659 | } | ||
2660 | } | ||
2661 | |||
2662 | /* This is the best we can do */ | ||
2663 | if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)) | ||
2664 | rfilt |= AR5K_RX_FILTER_PHYERR; | ||
2665 | |||
2666 | /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons | ||
2667 | * and probes for any BSSID, this needs testing */ | ||
2668 | if (*new_flags & FIF_BCN_PRBRESP_PROMISC) | ||
2669 | rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; | ||
2670 | |||
2671 | /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not | ||
2672 | * set we should only pass on control frames for this | ||
2673 | * station. This needs testing. I believe right now this | ||
2674 | * enables *all* control frames, which is OK.. but | ||
2675 | * but we should see if we can improve on granularity */ | ||
2676 | if (*new_flags & FIF_CONTROL) | ||
2677 | rfilt |= AR5K_RX_FILTER_CONTROL; | ||
2678 | |||
2679 | /* Additional settings per mode -- this is per ath5k */ | ||
2680 | |||
2681 | /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ | ||
2682 | |||
2683 | if (sc->opmode == IEEE80211_IF_TYPE_MNTR) | ||
2684 | rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | | ||
2685 | AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; | ||
2686 | if (sc->opmode != IEEE80211_IF_TYPE_STA) | ||
2687 | rfilt |= AR5K_RX_FILTER_PROBEREQ; | ||
2688 | if (sc->opmode != IEEE80211_IF_TYPE_AP && | ||
2689 | test_bit(ATH_STAT_PROMISC, sc->status)) | ||
2690 | rfilt |= AR5K_RX_FILTER_PROM; | ||
2691 | if (sc->opmode == IEEE80211_IF_TYPE_STA || | ||
2692 | sc->opmode == IEEE80211_IF_TYPE_IBSS) { | ||
2693 | rfilt |= AR5K_RX_FILTER_BEACON; | ||
2694 | } | ||
2695 | |||
2696 | /* Set filters */ | ||
2697 | ath5k_hw_set_rx_filter(ah,rfilt); | ||
2698 | |||
2699 | /* Set multicast bits */ | ||
2700 | ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); | ||
2701 | /* Set the cached hw filter flags, this will alter actually | ||
2702 | * be set in HW */ | ||
2703 | sc->filter_flags = rfilt; | ||
2704 | } | ||
2705 | |||
2706 | static int | ||
2707 | ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | ||
2708 | const u8 *local_addr, const u8 *addr, | ||
2709 | struct ieee80211_key_conf *key) | ||
2710 | { | ||
2711 | struct ath5k_softc *sc = hw->priv; | ||
2712 | int ret = 0; | ||
2713 | |||
2714 | switch(key->alg) { | ||
2715 | case ALG_WEP: | ||
2716 | break; | ||
2717 | case ALG_TKIP: | ||
2718 | case ALG_CCMP: | ||
2719 | return -EOPNOTSUPP; | ||
2720 | default: | ||
2721 | WARN_ON(1); | ||
2722 | return -EINVAL; | ||
2723 | } | ||
2724 | |||
2725 | mutex_lock(&sc->lock); | ||
2726 | |||
2727 | switch (cmd) { | ||
2728 | case SET_KEY: | ||
2729 | ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, addr); | ||
2730 | if (ret) { | ||
2731 | ATH5K_ERR(sc, "can't set the key\n"); | ||
2732 | goto unlock; | ||
2733 | } | ||
2734 | __set_bit(key->keyidx, sc->keymap); | ||
2735 | key->hw_key_idx = key->keyidx; | ||
2736 | break; | ||
2737 | case DISABLE_KEY: | ||
2738 | ath5k_hw_reset_key(sc->ah, key->keyidx); | ||
2739 | __clear_bit(key->keyidx, sc->keymap); | ||
2740 | break; | ||
2741 | default: | ||
2742 | ret = -EINVAL; | ||
2743 | goto unlock; | ||
2744 | } | ||
2745 | |||
2746 | unlock: | ||
2747 | mutex_unlock(&sc->lock); | ||
2748 | return ret; | ||
2749 | } | ||
2750 | |||
2751 | static int | ||
2752 | ath5k_get_stats(struct ieee80211_hw *hw, | ||
2753 | struct ieee80211_low_level_stats *stats) | ||
2754 | { | ||
2755 | struct ath5k_softc *sc = hw->priv; | ||
2756 | |||
2757 | memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); | ||
2758 | |||
2759 | return 0; | ||
2760 | } | ||
2761 | |||
2762 | static int | ||
2763 | ath5k_get_tx_stats(struct ieee80211_hw *hw, | ||
2764 | struct ieee80211_tx_queue_stats *stats) | ||
2765 | { | ||
2766 | struct ath5k_softc *sc = hw->priv; | ||
2767 | |||
2768 | memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats)); | ||
2769 | |||
2770 | return 0; | ||
2771 | } | ||
2772 | |||
2773 | static u64 | ||
2774 | ath5k_get_tsf(struct ieee80211_hw *hw) | ||
2775 | { | ||
2776 | struct ath5k_softc *sc = hw->priv; | ||
2777 | |||
2778 | return ath5k_hw_get_tsf64(sc->ah); | ||
2779 | } | ||
2780 | |||
2781 | static void | ||
2782 | ath5k_reset_tsf(struct ieee80211_hw *hw) | ||
2783 | { | ||
2784 | struct ath5k_softc *sc = hw->priv; | ||
2785 | |||
2786 | ath5k_hw_reset_tsf(sc->ah); | ||
2787 | } | ||
2788 | |||
2789 | static int | ||
2790 | ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
2791 | struct ieee80211_tx_control *ctl) | ||
2792 | { | ||
2793 | struct ath5k_softc *sc = hw->priv; | ||
2794 | int ret; | ||
2795 | |||
2796 | ath5k_debug_dump_skb(sc, skb, "BC ", 1); | ||
2797 | |||
2798 | mutex_lock(&sc->lock); | ||
2799 | |||
2800 | if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { | ||
2801 | ret = -EIO; | ||
2802 | goto end; | ||
2803 | } | ||
2804 | |||
2805 | ath5k_txbuf_free(sc, sc->bbuf); | ||
2806 | sc->bbuf->skb = skb; | ||
2807 | ret = ath5k_beacon_setup(sc, sc->bbuf, ctl); | ||
2808 | if (ret) | ||
2809 | sc->bbuf->skb = NULL; | ||
2810 | else | ||
2811 | ath5k_beacon_config(sc); | ||
2812 | |||
2813 | end: | ||
2814 | mutex_unlock(&sc->lock); | ||
2815 | return ret; | ||
2816 | } | ||
2817 | |||