aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBob Copeland <me@bobcopeland.com>2010-09-16 23:45:07 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-09-21 11:05:06 -0400
commit8a63facc376a7b8bb2b64c0ffbdb0949d1d6c71c (patch)
treeba5d13d26296c2dbe258dfcec7f8a6102ff4a549 /drivers
parentce2220d1da0bad9583af38a03ad508968d554c0f (diff)
ath5k: reorder base.c to remove fwd decls
This change reorganizes the main ath5k file in order to re-group related functions and remove most of the forward declarations (from 61 down to 3). This is, unfortunately, a lot of churn, but there should be no functional changes. Signed-off-by: Bob Copeland <me@bobcopeland.com> Signed-off-by: Bruno Randolf <br1@einfach.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1662
1 files changed, 764 insertions, 898 deletions
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0709bb091b2..4a07fb89021 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -70,11 +70,6 @@ static int modparam_all_channels;
70module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); 70module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
71MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); 71MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
72 72
73
74/******************\
75* Internal defines *
76\******************/
77
78/* Module info */ 73/* Module info */
79MODULE_AUTHOR("Jiri Slaby"); 74MODULE_AUTHOR("Jiri Slaby");
80MODULE_AUTHOR("Nick Kossifidis"); 75MODULE_AUTHOR("Nick Kossifidis");
@@ -83,6 +78,10 @@ MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
83MODULE_LICENSE("Dual BSD/GPL"); 78MODULE_LICENSE("Dual BSD/GPL");
84MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); 79MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
85 80
81static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
82static int ath5k_beacon_update(struct ieee80211_hw *hw,
83 struct ieee80211_vif *vif);
84static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
86 85
87/* Known PCI ids */ 86/* Known PCI ids */
88static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { 87static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
@@ -190,129 +189,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
190 /* XR missing */ 189 /* XR missing */
191}; 190};
192 191
193/*
194 * Prototypes - PCI stack related functions
195 */
196static int __devinit ath5k_pci_probe(struct pci_dev *pdev,
197 const struct pci_device_id *id);
198static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
199#ifdef CONFIG_PM_SLEEP
200static int ath5k_pci_suspend(struct device *dev);
201static int ath5k_pci_resume(struct device *dev);
202
203static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
204#define ATH5K_PM_OPS (&ath5k_pm_ops)
205#else
206#define ATH5K_PM_OPS NULL
207#endif /* CONFIG_PM_SLEEP */
208
209static struct pci_driver ath5k_pci_driver = {
210 .name = KBUILD_MODNAME,
211 .id_table = ath5k_pci_id_table,
212 .probe = ath5k_pci_probe,
213 .remove = __devexit_p(ath5k_pci_remove),
214 .driver.pm = ATH5K_PM_OPS,
215};
216
217
218
219/*
220 * Prototypes - MAC 802.11 stack related functions
221 */
222static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
223static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
224 struct ath5k_txq *txq);
225static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
226static int ath5k_start(struct ieee80211_hw *hw);
227static void ath5k_stop(struct ieee80211_hw *hw);
228static int ath5k_add_interface(struct ieee80211_hw *hw,
229 struct ieee80211_vif *vif);
230static void ath5k_remove_interface(struct ieee80211_hw *hw,
231 struct ieee80211_vif *vif);
232static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
234 struct netdev_hw_addr_list *mc_list);
235static void ath5k_configure_filter(struct ieee80211_hw *hw,
236 unsigned int changed_flags,
237 unsigned int *new_flags,
238 u64 multicast);
239static int ath5k_set_key(struct ieee80211_hw *hw,
240 enum set_key_cmd cmd,
241 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
242 struct ieee80211_key_conf *key);
243static int ath5k_get_stats(struct ieee80211_hw *hw,
244 struct ieee80211_low_level_stats *stats);
245static int ath5k_get_survey(struct ieee80211_hw *hw,
246 int idx, struct survey_info *survey);
247static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
248static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
249static void ath5k_reset_tsf(struct ieee80211_hw *hw);
250static int ath5k_beacon_update(struct ieee80211_hw *hw,
251 struct ieee80211_vif *vif);
252static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
253 struct ieee80211_vif *vif,
254 struct ieee80211_bss_conf *bss_conf,
255 u32 changes);
256static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
257static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
258static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
259 u8 coverage_class);
260
261static const struct ieee80211_ops ath5k_hw_ops = {
262 .tx = ath5k_tx,
263 .start = ath5k_start,
264 .stop = ath5k_stop,
265 .add_interface = ath5k_add_interface,
266 .remove_interface = ath5k_remove_interface,
267 .config = ath5k_config,
268 .prepare_multicast = ath5k_prepare_multicast,
269 .configure_filter = ath5k_configure_filter,
270 .set_key = ath5k_set_key,
271 .get_stats = ath5k_get_stats,
272 .get_survey = ath5k_get_survey,
273 .conf_tx = NULL,
274 .get_tsf = ath5k_get_tsf,
275 .set_tsf = ath5k_set_tsf,
276 .reset_tsf = ath5k_reset_tsf,
277 .bss_info_changed = ath5k_bss_info_changed,
278 .sw_scan_start = ath5k_sw_scan_start,
279 .sw_scan_complete = ath5k_sw_scan_complete,
280 .set_coverage_class = ath5k_set_coverage_class,
281};
282
283/*
284 * Prototypes - Internal functions
285 */
286/* Attach detach */
287static int ath5k_attach(struct pci_dev *pdev,
288 struct ieee80211_hw *hw);
289static void ath5k_detach(struct pci_dev *pdev,
290 struct ieee80211_hw *hw);
291/* Channel/mode setup */
292static inline short ath5k_ieee2mhz(short chan);
293static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
294 struct ieee80211_channel *channels,
295 unsigned int mode,
296 unsigned int max);
297static int ath5k_setup_bands(struct ieee80211_hw *hw);
298static int ath5k_chan_set(struct ath5k_softc *sc,
299 struct ieee80211_channel *chan);
300static void ath5k_setcurmode(struct ath5k_softc *sc,
301 unsigned int mode);
302static void ath5k_mode_setup(struct ath5k_softc *sc);
303
304/* Descriptor setup */
305static int ath5k_desc_alloc(struct ath5k_softc *sc,
306 struct pci_dev *pdev);
307static void ath5k_desc_free(struct ath5k_softc *sc,
308 struct pci_dev *pdev);
309/* Buffers setup */
310static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
311 struct ath5k_buf *bf);
312static int ath5k_txbuf_setup(struct ath5k_softc *sc,
313 struct ath5k_buf *bf,
314 struct ath5k_txq *txq, int padsize);
315
316static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc, 192static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
317 struct ath5k_buf *bf) 193 struct ath5k_buf *bf)
318{ 194{
@@ -345,35 +221,6 @@ static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
345} 221}
346 222
347 223
348/* Queues setup */
349static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
350 int qtype, int subtype);
351static int ath5k_beaconq_setup(struct ath5k_hw *ah);
352static int ath5k_beaconq_config(struct ath5k_softc *sc);
353static void ath5k_txq_drainq(struct ath5k_softc *sc,
354 struct ath5k_txq *txq);
355static void ath5k_txq_cleanup(struct ath5k_softc *sc);
356static void ath5k_txq_release(struct ath5k_softc *sc);
357/* Rx handling */
358static int ath5k_rx_start(struct ath5k_softc *sc);
359static void ath5k_rx_stop(struct ath5k_softc *sc);
360static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
361 struct sk_buff *skb,
362 struct ath5k_rx_status *rs);
363static void ath5k_tasklet_rx(unsigned long data);
364/* Tx handling */
365static void ath5k_tx_processq(struct ath5k_softc *sc,
366 struct ath5k_txq *txq);
367static void ath5k_tasklet_tx(unsigned long data);
368/* Beacon handling */
369static int ath5k_beacon_setup(struct ath5k_softc *sc,
370 struct ath5k_buf *bf);
371static void ath5k_beacon_send(struct ath5k_softc *sc);
372static void ath5k_beacon_config(struct ath5k_softc *sc);
373static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
374static void ath5k_tasklet_beacon(unsigned long data);
375static void ath5k_tasklet_ani(unsigned long data);
376
377static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 224static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
378{ 225{
379 u64 tsf = ath5k_hw_get_tsf64(ah); 226 u64 tsf = ath5k_hw_get_tsf64(ah);
@@ -384,50 +231,6 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
384 return (tsf & ~0x7fff) | rstamp; 231 return (tsf & ~0x7fff) | rstamp;
385} 232}
386 233
387/* Interrupt handling */
388static int ath5k_init(struct ath5k_softc *sc);
389static int ath5k_stop_locked(struct ath5k_softc *sc);
390static int ath5k_stop_hw(struct ath5k_softc *sc);
391static irqreturn_t ath5k_intr(int irq, void *dev_id);
392static void ath5k_reset_work(struct work_struct *work);
393
394static void ath5k_tasklet_calibrate(unsigned long data);
395
396/*
397 * Module init/exit functions
398 */
399static int __init
400init_ath5k_pci(void)
401{
402 int ret;
403
404 ath5k_debug_init();
405
406 ret = pci_register_driver(&ath5k_pci_driver);
407 if (ret) {
408 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
409 return ret;
410 }
411
412 return 0;
413}
414
415static void __exit
416exit_ath5k_pci(void)
417{
418 pci_unregister_driver(&ath5k_pci_driver);
419
420 ath5k_debug_finish();
421}
422
423module_init(init_ath5k_pci);
424module_exit(exit_ath5k_pci);
425
426
427/********************\
428* PCI Initialization *
429\********************/
430
431static const char * 234static const char *
432ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 235ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
433{ 236{
@@ -466,299 +269,6 @@ static const struct ath_ops ath5k_common_ops = {
466 .write = ath5k_iowrite32, 269 .write = ath5k_iowrite32,
467}; 270};
468 271
469static int __devinit
470ath5k_pci_probe(struct pci_dev *pdev,
471 const struct pci_device_id *id)
472{
473 void __iomem *mem;
474 struct ath5k_softc *sc;
475 struct ath_common *common;
476 struct ieee80211_hw *hw;
477 int ret;
478 u8 csz;
479
480 /*
481 * L0s needs to be disabled on all ath5k cards.
482 *
483 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
484 * by default in the future in 2.6.36) this will also mean both L1 and
485 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
486 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
487 * though but cannot currently undue the effect of a blacklist, for
488 * details you can read pcie_aspm_sanity_check() and see how it adjusts
489 * the device link capability.
490 *
491 * It may be possible in the future to implement some PCI API to allow
492 * drivers to override blacklists for pre 1.1 PCIe but for now it is
493 * best to accept that both L0s and L1 will be disabled completely for
494 * distributions shipping with CONFIG_PCIEASPM rather than having this
495 * issue present. Motivation for adding this new API will be to help
496 * with power consumption for some of these devices.
497 */
498 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
499
500 ret = pci_enable_device(pdev);
501 if (ret) {
502 dev_err(&pdev->dev, "can't enable device\n");
503 goto err;
504 }
505
506 /* XXX 32-bit addressing only */
507 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
508 if (ret) {
509 dev_err(&pdev->dev, "32-bit DMA not available\n");
510 goto err_dis;
511 }
512
513 /*
514 * Cache line size is used to size and align various
515 * structures used to communicate with the hardware.
516 */
517 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
518 if (csz == 0) {
519 /*
520 * Linux 2.4.18 (at least) writes the cache line size
521 * register as a 16-bit wide register which is wrong.
522 * We must have this setup properly for rx buffer
523 * DMA to work so force a reasonable value here if it
524 * comes up zero.
525 */
526 csz = L1_CACHE_BYTES >> 2;
527 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
528 }
529 /*
530 * The default setting of latency timer yields poor results,
531 * set it to the value used by other systems. It may be worth
532 * tweaking this setting more.
533 */
534 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
535
536 /* Enable bus mastering */
537 pci_set_master(pdev);
538
539 /*
540 * Disable the RETRY_TIMEOUT register (0x41) to keep
541 * PCI Tx retries from interfering with C3 CPU state.
542 */
543 pci_write_config_byte(pdev, 0x41, 0);
544
545 ret = pci_request_region(pdev, 0, "ath5k");
546 if (ret) {
547 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
548 goto err_dis;
549 }
550
551 mem = pci_iomap(pdev, 0, 0);
552 if (!mem) {
553 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
554 ret = -EIO;
555 goto err_reg;
556 }
557
558 /*
559 * Allocate hw (mac80211 main struct)
560 * and hw->priv (driver private data)
561 */
562 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
563 if (hw == NULL) {
564 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
565 ret = -ENOMEM;
566 goto err_map;
567 }
568
569 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
570
571 /* Initialize driver private data */
572 SET_IEEE80211_DEV(hw, &pdev->dev);
573 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
574 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
575 IEEE80211_HW_SIGNAL_DBM;
576
577 hw->wiphy->interface_modes =
578 BIT(NL80211_IFTYPE_AP) |
579 BIT(NL80211_IFTYPE_STATION) |
580 BIT(NL80211_IFTYPE_ADHOC) |
581 BIT(NL80211_IFTYPE_MESH_POINT);
582
583 hw->extra_tx_headroom = 2;
584 hw->channel_change_time = 5000;
585 sc = hw->priv;
586 sc->hw = hw;
587 sc->pdev = pdev;
588
589 ath5k_debug_init_device(sc);
590
591 /*
592 * Mark the device as detached to avoid processing
593 * interrupts until setup is complete.
594 */
595 __set_bit(ATH_STAT_INVALID, sc->status);
596
597 sc->iobase = mem; /* So we can unmap it on detach */
598 sc->opmode = NL80211_IFTYPE_STATION;
599 sc->bintval = 1000;
600 mutex_init(&sc->lock);
601 spin_lock_init(&sc->rxbuflock);
602 spin_lock_init(&sc->txbuflock);
603 spin_lock_init(&sc->block);
604
605 /* Set private data */
606 pci_set_drvdata(pdev, sc);
607
608 /* Setup interrupt handler */
609 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
610 if (ret) {
611 ATH5K_ERR(sc, "request_irq failed\n");
612 goto err_free;
613 }
614
615 /* If we passed the test, malloc an ath5k_hw struct */
616 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
617 if (!sc->ah) {
618 ret = -ENOMEM;
619 ATH5K_ERR(sc, "out of memory\n");
620 goto err_irq;
621 }
622
623 sc->ah->ah_sc = sc;
624 sc->ah->ah_iobase = sc->iobase;
625 common = ath5k_hw_common(sc->ah);
626 common->ops = &ath5k_common_ops;
627 common->ah = sc->ah;
628 common->hw = hw;
629 common->cachelsz = csz << 2; /* convert to bytes */
630
631 /* Initialize device */
632 ret = ath5k_hw_attach(sc);
633 if (ret) {
634 goto err_free_ah;
635 }
636
637 /* set up multi-rate retry capabilities */
638 if (sc->ah->ah_version == AR5K_AR5212) {
639 hw->max_rates = 4;
640 hw->max_rate_tries = 11;
641 }
642
643 /* Finish private driver data initialization */
644 ret = ath5k_attach(pdev, hw);
645 if (ret)
646 goto err_ah;
647
648 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
649 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
650 sc->ah->ah_mac_srev,
651 sc->ah->ah_phy_revision);
652
653 if (!sc->ah->ah_single_chip) {
654 /* Single chip radio (!RF5111) */
655 if (sc->ah->ah_radio_5ghz_revision &&
656 !sc->ah->ah_radio_2ghz_revision) {
657 /* No 5GHz support -> report 2GHz radio */
658 if (!test_bit(AR5K_MODE_11A,
659 sc->ah->ah_capabilities.cap_mode)) {
660 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
661 ath5k_chip_name(AR5K_VERSION_RAD,
662 sc->ah->ah_radio_5ghz_revision),
663 sc->ah->ah_radio_5ghz_revision);
664 /* No 2GHz support (5110 and some
665 * 5Ghz only cards) -> report 5Ghz radio */
666 } else if (!test_bit(AR5K_MODE_11B,
667 sc->ah->ah_capabilities.cap_mode)) {
668 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
669 ath5k_chip_name(AR5K_VERSION_RAD,
670 sc->ah->ah_radio_5ghz_revision),
671 sc->ah->ah_radio_5ghz_revision);
672 /* Multiband radio */
673 } else {
674 ATH5K_INFO(sc, "RF%s multiband radio found"
675 " (0x%x)\n",
676 ath5k_chip_name(AR5K_VERSION_RAD,
677 sc->ah->ah_radio_5ghz_revision),
678 sc->ah->ah_radio_5ghz_revision);
679 }
680 }
681 /* Multi chip radio (RF5111 - RF2111) ->
682 * report both 2GHz/5GHz radios */
683 else if (sc->ah->ah_radio_5ghz_revision &&
684 sc->ah->ah_radio_2ghz_revision){
685 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
686 ath5k_chip_name(AR5K_VERSION_RAD,
687 sc->ah->ah_radio_5ghz_revision),
688 sc->ah->ah_radio_5ghz_revision);
689 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
690 ath5k_chip_name(AR5K_VERSION_RAD,
691 sc->ah->ah_radio_2ghz_revision),
692 sc->ah->ah_radio_2ghz_revision);
693 }
694 }
695
696
697 /* ready to process interrupts */
698 __clear_bit(ATH_STAT_INVALID, sc->status);
699
700 return 0;
701err_ah:
702 ath5k_hw_detach(sc->ah);
703err_free_ah:
704 kfree(sc->ah);
705err_irq:
706 free_irq(pdev->irq, sc);
707err_free:
708 ieee80211_free_hw(hw);
709err_map:
710 pci_iounmap(pdev, mem);
711err_reg:
712 pci_release_region(pdev, 0);
713err_dis:
714 pci_disable_device(pdev);
715err:
716 return ret;
717}
718
719static void __devexit
720ath5k_pci_remove(struct pci_dev *pdev)
721{
722 struct ath5k_softc *sc = pci_get_drvdata(pdev);
723
724 ath5k_debug_finish_device(sc);
725 ath5k_detach(pdev, sc->hw);
726 ath5k_hw_detach(sc->ah);
727 kfree(sc->ah);
728 free_irq(pdev->irq, sc);
729 pci_iounmap(pdev, sc->iobase);
730 pci_release_region(pdev, 0);
731 pci_disable_device(pdev);
732 ieee80211_free_hw(sc->hw);
733}
734
735#ifdef CONFIG_PM_SLEEP
736static int ath5k_pci_suspend(struct device *dev)
737{
738 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
739
740 ath5k_led_off(sc);
741 return 0;
742}
743
744static int ath5k_pci_resume(struct device *dev)
745{
746 struct pci_dev *pdev = to_pci_dev(dev);
747 struct ath5k_softc *sc = pci_get_drvdata(pdev);
748
749 /*
750 * Suspend/Resume resets the PCI configuration space, so we have to
751 * re-disable the RETRY_TIMEOUT register (0x41) to keep
752 * PCI Tx retries from interfering with C3 CPU state
753 */
754 pci_write_config_byte(pdev, 0x41, 0);
755
756 ath5k_led_enable(sc);
757 return 0;
758}
759#endif /* CONFIG_PM_SLEEP */
760
761
762/***********************\ 272/***********************\
763* Driver Initialization * 273* Driver Initialization *
764\***********************/ 274\***********************/
@@ -772,170 +282,6 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
772 return ath_reg_notifier_apply(wiphy, request, regulatory); 282 return ath_reg_notifier_apply(wiphy, request, regulatory);
773} 283}
774 284
775static int
776ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
777{
778 struct ath5k_softc *sc = hw->priv;
779 struct ath5k_hw *ah = sc->ah;
780 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
781 u8 mac[ETH_ALEN] = {};
782 int ret;
783
784 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
785
786 /*
787 * Check if the MAC has multi-rate retry support.
788 * We do this by trying to setup a fake extended
789 * descriptor. MACs that don't have support will
790 * return false w/o doing anything. MACs that do
791 * support it will return true w/o doing anything.
792 */
793 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
794
795 if (ret < 0)
796 goto err;
797 if (ret > 0)
798 __set_bit(ATH_STAT_MRRETRY, sc->status);
799
800 /*
801 * Collect the channel list. The 802.11 layer
802 * is resposible for filtering this list based
803 * on settings like the phy mode and regulatory
804 * domain restrictions.
805 */
806 ret = ath5k_setup_bands(hw);
807 if (ret) {
808 ATH5K_ERR(sc, "can't get channels\n");
809 goto err;
810 }
811
812 /* NB: setup here so ath5k_rate_update is happy */
813 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
814 ath5k_setcurmode(sc, AR5K_MODE_11A);
815 else
816 ath5k_setcurmode(sc, AR5K_MODE_11B);
817
818 /*
819 * Allocate tx+rx descriptors and populate the lists.
820 */
821 ret = ath5k_desc_alloc(sc, pdev);
822 if (ret) {
823 ATH5K_ERR(sc, "can't allocate descriptors\n");
824 goto err;
825 }
826
827 /*
828 * Allocate hardware transmit queues: one queue for
829 * beacon frames and one data queue for each QoS
830 * priority. Note that hw functions handle resetting
831 * these queues at the needed time.
832 */
833 ret = ath5k_beaconq_setup(ah);
834 if (ret < 0) {
835 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
836 goto err_desc;
837 }
838 sc->bhalq = ret;
839 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
840 if (IS_ERR(sc->cabq)) {
841 ATH5K_ERR(sc, "can't setup cab queue\n");
842 ret = PTR_ERR(sc->cabq);
843 goto err_bhal;
844 }
845
846 sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
847 if (IS_ERR(sc->txq)) {
848 ATH5K_ERR(sc, "can't setup xmit queue\n");
849 ret = PTR_ERR(sc->txq);
850 goto err_queues;
851 }
852
853 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
854 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
855 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
856 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
857 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
858
859 INIT_WORK(&sc->reset_work, ath5k_reset_work);
860
861 ret = ath5k_eeprom_read_mac(ah, mac);
862 if (ret) {
863 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
864 sc->pdev->device);
865 goto err_queues;
866 }
867
868 SET_IEEE80211_PERM_ADDR(hw, mac);
869 /* All MAC address bits matter for ACKs */
870 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
871 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
872
873 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
874 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
875 if (ret) {
876 ATH5K_ERR(sc, "can't initialize regulatory system\n");
877 goto err_queues;
878 }
879
880 ret = ieee80211_register_hw(hw);
881 if (ret) {
882 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
883 goto err_queues;
884 }
885
886 if (!ath_is_world_regd(regulatory))
887 regulatory_hint(hw->wiphy, regulatory->alpha2);
888
889 ath5k_init_leds(sc);
890
891 ath5k_sysfs_register(sc);
892
893 return 0;
894err_queues:
895 ath5k_txq_release(sc);
896err_bhal:
897 ath5k_hw_release_tx_queue(ah, sc->bhalq);
898err_desc:
899 ath5k_desc_free(sc, pdev);
900err:
901 return ret;
902}
903
904static void
905ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
906{
907 struct ath5k_softc *sc = hw->priv;
908
909 /*
910 * NB: the order of these is important:
911 * o call the 802.11 layer before detaching ath5k_hw to
912 * ensure callbacks into the driver to delete global
913 * key cache entries can be handled
914 * o reclaim the tx queue data structures after calling
915 * the 802.11 layer as we'll get called back to reclaim
916 * node state and potentially want to use them
917 * o to cleanup the tx queues the hal is called, so detach
918 * it last
919 * XXX: ??? detach ath5k_hw ???
920 * Other than that, it's straightforward...
921 */
922 ieee80211_unregister_hw(hw);
923 ath5k_desc_free(sc, pdev);
924 ath5k_txq_release(sc);
925 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
926 ath5k_unregister_leds(sc);
927
928 ath5k_sysfs_unregister(sc);
929 /*
930 * NB: can't reclaim these until after ieee80211_ifdetach
931 * returns because we'll get called back to reclaim node
932 * state and potentially want to use them.
933 */
934}
935
936
937
938
939/********************\ 285/********************\
940* Channel/mode setup * 286* Channel/mode setup *
941\********************/ 287\********************/
@@ -1490,9 +836,6 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1490} 836}
1491 837
1492 838
1493
1494
1495
1496/**************\ 839/**************\
1497* Queues setup * 840* Queues setup *
1498\**************/ 841\**************/
@@ -1692,8 +1035,6 @@ ath5k_txq_release(struct ath5k_softc *sc)
1692} 1035}
1693 1036
1694 1037
1695
1696
1697/*************\ 1038/*************\
1698* RX Handling * 1039* RX Handling *
1699\*************/ 1040\*************/
@@ -2117,6 +1458,59 @@ unlock:
2117* TX Handling * 1458* TX Handling *
2118\*************/ 1459\*************/
2119 1460
1461static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1462 struct ath5k_txq *txq)
1463{
1464 struct ath5k_softc *sc = hw->priv;
1465 struct ath5k_buf *bf;
1466 unsigned long flags;
1467 int padsize;
1468
1469 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
1470
1471 /*
1472 * The hardware expects the header padded to 4 byte boundaries.
1473 * If this is not the case, we add the padding after the header.
1474 */
1475 padsize = ath5k_add_padding(skb);
1476 if (padsize < 0) {
1477 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
1478 " headroom to pad");
1479 goto drop_packet;
1480 }
1481
1482 spin_lock_irqsave(&sc->txbuflock, flags);
1483 if (list_empty(&sc->txbuf)) {
1484 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
1485 spin_unlock_irqrestore(&sc->txbuflock, flags);
1486 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1487 goto drop_packet;
1488 }
1489 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
1490 list_del(&bf->list);
1491 sc->txbuf_len--;
1492 if (list_empty(&sc->txbuf))
1493 ieee80211_stop_queues(hw);
1494 spin_unlock_irqrestore(&sc->txbuflock, flags);
1495
1496 bf->skb = skb;
1497
1498 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
1499 bf->skb = NULL;
1500 spin_lock_irqsave(&sc->txbuflock, flags);
1501 list_add_tail(&bf->list, &sc->txbuf);
1502 sc->txbuf_len++;
1503 spin_unlock_irqrestore(&sc->txbuflock, flags);
1504 goto drop_packet;
1505 }
1506 return NETDEV_TX_OK;
1507
1508drop_packet:
1509 dev_kfree_skb_any(skb);
1510 return NETDEV_TX_OK;
1511}
1512
1513
2120static void 1514static void
2121ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1515ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2122{ 1516{
@@ -2309,6 +1703,43 @@ err_unmap:
2309} 1703}
2310 1704
2311/* 1705/*
1706 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1707 * this is called only once at config_bss time, for AP we do it every
1708 * SWBA interrupt so that the TIM will reflect buffered frames.
1709 *
1710 * Called with the beacon lock.
1711 */
1712static int
1713ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1714{
1715 int ret;
1716 struct ath5k_softc *sc = hw->priv;
1717 struct sk_buff *skb;
1718
1719 if (WARN_ON(!vif)) {
1720 ret = -EINVAL;
1721 goto out;
1722 }
1723
1724 skb = ieee80211_beacon_get(hw, vif);
1725
1726 if (!skb) {
1727 ret = -ENOMEM;
1728 goto out;
1729 }
1730
1731 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
1732
1733 ath5k_txbuf_free_skb(sc, sc->bbuf);
1734 sc->bbuf->skb = skb;
1735 ret = ath5k_beacon_setup(sc, sc->bbuf);
1736 if (ret)
1737 sc->bbuf->skb = NULL;
1738out:
1739 return ret;
1740}
1741
1742/*
2312 * Transmit a beacon frame at SWBA. Dynamic updates to the 1743 * Transmit a beacon frame at SWBA. Dynamic updates to the
2313 * frame contents are done as needed and the slot time is 1744 * frame contents are done as needed and the slot time is
2314 * also adjusted based on current state. 1745 * also adjusted based on current state.
@@ -2385,7 +1816,6 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2385 sc->bsent++; 1816 sc->bsent++;
2386} 1817}
2387 1818
2388
2389/** 1819/**
2390 * ath5k_beacon_update_timers - update beacon timers 1820 * ath5k_beacon_update_timers - update beacon timers
2391 * 1821 *
@@ -2487,7 +1917,6 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2487 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); 1917 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2488} 1918}
2489 1919
2490
2491/** 1920/**
2492 * ath5k_beacon_config - Configure the beacon queues and interrupts 1921 * ath5k_beacon_config - Configure the beacon queues and interrupts
2493 * 1922 *
@@ -2566,156 +1995,6 @@ static void ath5k_tasklet_beacon(unsigned long data)
2566* Interrupt handling * 1995* Interrupt handling *
2567\********************/ 1996\********************/
2568 1997
2569static int
2570ath5k_init(struct ath5k_softc *sc)
2571{
2572 struct ath5k_hw *ah = sc->ah;
2573 struct ath_common *common = ath5k_hw_common(ah);
2574 int ret, i;
2575
2576 mutex_lock(&sc->lock);
2577
2578 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2579
2580 /*
2581 * Stop anything previously setup. This is safe
2582 * no matter this is the first time through or not.
2583 */
2584 ath5k_stop_locked(sc);
2585
2586 /*
2587 * The basic interface to setting the hardware in a good
2588 * state is ``reset''. On return the hardware is known to
2589 * be powered up and with interrupts disabled. This must
2590 * be followed by initialization of the appropriate bits
2591 * and then setup of the interrupt mask.
2592 */
2593 sc->curchan = sc->hw->conf.channel;
2594 sc->curband = &sc->sbands[sc->curchan->band];
2595 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2596 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2597 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2598
2599 ret = ath5k_reset(sc, NULL);
2600 if (ret)
2601 goto done;
2602
2603 ath5k_rfkill_hw_start(ah);
2604
2605 /*
2606 * Reset the key cache since some parts do not reset the
2607 * contents on initial power up or resume from suspend.
2608 */
2609 for (i = 0; i < common->keymax; i++)
2610 ath_hw_keyreset(common, (u16)i);
2611
2612 ath5k_hw_set_ack_bitrate_high(ah, true);
2613 ret = 0;
2614done:
2615 mmiowb();
2616 mutex_unlock(&sc->lock);
2617 return ret;
2618}
2619
2620static int
2621ath5k_stop_locked(struct ath5k_softc *sc)
2622{
2623 struct ath5k_hw *ah = sc->ah;
2624
2625 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2626 test_bit(ATH_STAT_INVALID, sc->status));
2627
2628 /*
2629 * Shutdown the hardware and driver:
2630 * stop output from above
2631 * disable interrupts
2632 * turn off timers
2633 * turn off the radio
2634 * clear transmit machinery
2635 * clear receive machinery
2636 * drain and release tx queues
2637 * reclaim beacon resources
2638 * power down hardware
2639 *
2640 * Note that some of this work is not possible if the
2641 * hardware is gone (invalid).
2642 */
2643 ieee80211_stop_queues(sc->hw);
2644
2645 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2646 ath5k_led_off(sc);
2647 ath5k_hw_set_imr(ah, 0);
2648 synchronize_irq(sc->pdev->irq);
2649 }
2650 ath5k_txq_cleanup(sc);
2651 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2652 ath5k_rx_stop(sc);
2653 ath5k_hw_phy_disable(ah);
2654 }
2655
2656 return 0;
2657}
2658
2659static void stop_tasklets(struct ath5k_softc *sc)
2660{
2661 tasklet_kill(&sc->rxtq);
2662 tasklet_kill(&sc->txtq);
2663 tasklet_kill(&sc->calib);
2664 tasklet_kill(&sc->beacontq);
2665 tasklet_kill(&sc->ani_tasklet);
2666}
2667
2668/*
2669 * Stop the device, grabbing the top-level lock to protect
2670 * against concurrent entry through ath5k_init (which can happen
2671 * if another thread does a system call and the thread doing the
2672 * stop is preempted).
2673 */
2674static int
2675ath5k_stop_hw(struct ath5k_softc *sc)
2676{
2677 int ret;
2678
2679 mutex_lock(&sc->lock);
2680 ret = ath5k_stop_locked(sc);
2681 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2682 /*
2683 * Don't set the card in full sleep mode!
2684 *
2685 * a) When the device is in this state it must be carefully
2686 * woken up or references to registers in the PCI clock
2687 * domain may freeze the bus (and system). This varies
2688 * by chip and is mostly an issue with newer parts
2689 * (madwifi sources mentioned srev >= 0x78) that go to
2690 * sleep more quickly.
2691 *
2692 * b) On older chips full sleep results a weird behaviour
2693 * during wakeup. I tested various cards with srev < 0x78
2694 * and they don't wake up after module reload, a second
2695 * module reload is needed to bring the card up again.
2696 *
2697 * Until we figure out what's going on don't enable
2698 * full chip reset on any chip (this is what Legacy HAL
2699 * and Sam's HAL do anyway). Instead Perform a full reset
2700 * on the device (same as initial state after attach) and
2701 * leave it idle (keep MAC/BB on warm reset) */
2702 ret = ath5k_hw_on_hold(sc->ah);
2703
2704 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2705 "putting device to sleep\n");
2706 }
2707 ath5k_txbuf_free_skb(sc, sc->bbuf);
2708
2709 mmiowb();
2710 mutex_unlock(&sc->lock);
2711
2712 stop_tasklets(sc);
2713
2714 ath5k_rfkill_hw_stop(sc->ah);
2715
2716 return ret;
2717}
2718
2719static void 1998static void
2720ath5k_intr_calibration_poll(struct ath5k_hw *ah) 1999ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2721{ 2000{
@@ -2878,68 +2157,158 @@ ath5k_tasklet_ani(unsigned long data)
2878} 2157}
2879 2158
2880 2159
2881/********************\ 2160/*************************\
2882* Mac80211 functions * 2161* Initialization routines *
2883\********************/ 2162\*************************/
2884 2163
2885static int 2164static int
2886ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2165ath5k_stop_locked(struct ath5k_softc *sc)
2887{ 2166{
2888 struct ath5k_softc *sc = hw->priv; 2167 struct ath5k_hw *ah = sc->ah;
2889 2168
2890 return ath5k_tx_queue(hw, skb, sc->txq); 2169 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2170 test_bit(ATH_STAT_INVALID, sc->status));
2171
2172 /*
2173 * Shutdown the hardware and driver:
2174 * stop output from above
2175 * disable interrupts
2176 * turn off timers
2177 * turn off the radio
2178 * clear transmit machinery
2179 * clear receive machinery
2180 * drain and release tx queues
2181 * reclaim beacon resources
2182 * power down hardware
2183 *
2184 * Note that some of this work is not possible if the
2185 * hardware is gone (invalid).
2186 */
2187 ieee80211_stop_queues(sc->hw);
2188
2189 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2190 ath5k_led_off(sc);
2191 ath5k_hw_set_imr(ah, 0);
2192 synchronize_irq(sc->pdev->irq);
2193 }
2194 ath5k_txq_cleanup(sc);
2195 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2196 ath5k_rx_stop(sc);
2197 ath5k_hw_phy_disable(ah);
2198 }
2199
2200 return 0;
2891} 2201}
2892 2202
2893static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 2203static int
2894 struct ath5k_txq *txq) 2204ath5k_init(struct ath5k_softc *sc)
2895{ 2205{
2896 struct ath5k_softc *sc = hw->priv; 2206 struct ath5k_hw *ah = sc->ah;
2897 struct ath5k_buf *bf; 2207 struct ath_common *common = ath5k_hw_common(ah);
2898 unsigned long flags; 2208 int ret, i;
2899 int padsize;
2900 2209
2901 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2210 mutex_lock(&sc->lock);
2211
2212 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2902 2213
2903 /* 2214 /*
2904 * The hardware expects the header padded to 4 byte boundaries. 2215 * Stop anything previously setup. This is safe
2905 * If this is not the case, we add the padding after the header. 2216 * no matter this is the first time through or not.
2906 */ 2217 */
2907 padsize = ath5k_add_padding(skb); 2218 ath5k_stop_locked(sc);
2908 if (padsize < 0) {
2909 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
2910 " headroom to pad");
2911 goto drop_packet;
2912 }
2913 2219
2914 spin_lock_irqsave(&sc->txbuflock, flags); 2220 /*
2915 if (list_empty(&sc->txbuf)) { 2221 * The basic interface to setting the hardware in a good
2916 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2222 * state is ``reset''. On return the hardware is known to
2917 spin_unlock_irqrestore(&sc->txbuflock, flags); 2223 * be powered up and with interrupts disabled. This must
2918 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2224 * be followed by initialization of the appropriate bits
2919 goto drop_packet; 2225 * and then setup of the interrupt mask.
2920 } 2226 */
2921 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2227 sc->curchan = sc->hw->conf.channel;
2922 list_del(&bf->list); 2228 sc->curband = &sc->sbands[sc->curchan->band];
2923 sc->txbuf_len--; 2229 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2924 if (list_empty(&sc->txbuf)) 2230 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2925 ieee80211_stop_queues(hw); 2231 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2926 spin_unlock_irqrestore(&sc->txbuflock, flags);
2927 2232
2928 bf->skb = skb; 2233 ret = ath5k_reset(sc, NULL);
2234 if (ret)
2235 goto done;
2929 2236
2930 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) { 2237 ath5k_rfkill_hw_start(ah);
2931 bf->skb = NULL; 2238
2932 spin_lock_irqsave(&sc->txbuflock, flags); 2239 /*
2933 list_add_tail(&bf->list, &sc->txbuf); 2240 * Reset the key cache since some parts do not reset the
2934 sc->txbuf_len++; 2241 * contents on initial power up or resume from suspend.
2935 spin_unlock_irqrestore(&sc->txbuflock, flags); 2242 */
2936 goto drop_packet; 2243 for (i = 0; i < common->keymax; i++)
2244 ath_hw_keyreset(common, (u16) i);
2245
2246 ath5k_hw_set_ack_bitrate_high(ah, true);
2247 ret = 0;
2248done:
2249 mmiowb();
2250 mutex_unlock(&sc->lock);
2251 return ret;
2252}
2253
2254static void stop_tasklets(struct ath5k_softc *sc)
2255{
2256 tasklet_kill(&sc->rxtq);
2257 tasklet_kill(&sc->txtq);
2258 tasklet_kill(&sc->calib);
2259 tasklet_kill(&sc->beacontq);
2260 tasklet_kill(&sc->ani_tasklet);
2261}
2262
2263/*
2264 * Stop the device, grabbing the top-level lock to protect
2265 * against concurrent entry through ath5k_init (which can happen
2266 * if another thread does a system call and the thread doing the
2267 * stop is preempted).
2268 */
2269static int
2270ath5k_stop_hw(struct ath5k_softc *sc)
2271{
2272 int ret;
2273
2274 mutex_lock(&sc->lock);
2275 ret = ath5k_stop_locked(sc);
2276 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2277 /*
2278 * Don't set the card in full sleep mode!
2279 *
2280 * a) When the device is in this state it must be carefully
2281 * woken up or references to registers in the PCI clock
2282 * domain may freeze the bus (and system). This varies
2283 * by chip and is mostly an issue with newer parts
2284 * (madwifi sources mentioned srev >= 0x78) that go to
2285 * sleep more quickly.
2286 *
2287 * b) On older chips full sleep results a weird behaviour
2288 * during wakeup. I tested various cards with srev < 0x78
2289 * and they don't wake up after module reload, a second
2290 * module reload is needed to bring the card up again.
2291 *
2292 * Until we figure out what's going on don't enable
2293 * full chip reset on any chip (this is what Legacy HAL
2294 * and Sam's HAL do anyway). Instead Perform a full reset
2295 * on the device (same as initial state after attach) and
2296 * leave it idle (keep MAC/BB on warm reset) */
2297 ret = ath5k_hw_on_hold(sc->ah);
2298
2299 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2300 "putting device to sleep\n");
2937 } 2301 }
2938 return NETDEV_TX_OK; 2302 ath5k_txbuf_free_skb(sc, sc->bbuf);
2939 2303
2940drop_packet: 2304 mmiowb();
2941 dev_kfree_skb_any(skb); 2305 mutex_unlock(&sc->lock);
2942 return NETDEV_TX_OK; 2306
2307 stop_tasklets(sc);
2308
2309 ath5k_rfkill_hw_stop(sc->ah);
2310
2311 return ret;
2943} 2312}
2944 2313
2945/* 2314/*
@@ -3016,6 +2385,179 @@ static void ath5k_reset_work(struct work_struct *work)
3016 mutex_unlock(&sc->lock); 2385 mutex_unlock(&sc->lock);
3017} 2386}
3018 2387
2388static int
2389ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2390{
2391 struct ath5k_softc *sc = hw->priv;
2392 struct ath5k_hw *ah = sc->ah;
2393 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2394 u8 mac[ETH_ALEN] = {};
2395 int ret;
2396
2397 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
2398
2399 /*
2400 * Check if the MAC has multi-rate retry support.
2401 * We do this by trying to setup a fake extended
2402 * descriptor. MACs that don't have support will
2403 * return false w/o doing anything. MACs that do
2404 * support it will return true w/o doing anything.
2405 */
2406 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2407
2408 if (ret < 0)
2409 goto err;
2410 if (ret > 0)
2411 __set_bit(ATH_STAT_MRRETRY, sc->status);
2412
2413 /*
2414 * Collect the channel list. The 802.11 layer
2415 * is resposible for filtering this list based
2416 * on settings like the phy mode and regulatory
2417 * domain restrictions.
2418 */
2419 ret = ath5k_setup_bands(hw);
2420 if (ret) {
2421 ATH5K_ERR(sc, "can't get channels\n");
2422 goto err;
2423 }
2424
2425 /* NB: setup here so ath5k_rate_update is happy */
2426 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2427 ath5k_setcurmode(sc, AR5K_MODE_11A);
2428 else
2429 ath5k_setcurmode(sc, AR5K_MODE_11B);
2430
2431 /*
2432 * Allocate tx+rx descriptors and populate the lists.
2433 */
2434 ret = ath5k_desc_alloc(sc, pdev);
2435 if (ret) {
2436 ATH5K_ERR(sc, "can't allocate descriptors\n");
2437 goto err;
2438 }
2439
2440 /*
2441 * Allocate hardware transmit queues: one queue for
2442 * beacon frames and one data queue for each QoS
2443 * priority. Note that hw functions handle resetting
2444 * these queues at the needed time.
2445 */
2446 ret = ath5k_beaconq_setup(ah);
2447 if (ret < 0) {
2448 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
2449 goto err_desc;
2450 }
2451 sc->bhalq = ret;
2452 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2453 if (IS_ERR(sc->cabq)) {
2454 ATH5K_ERR(sc, "can't setup cab queue\n");
2455 ret = PTR_ERR(sc->cabq);
2456 goto err_bhal;
2457 }
2458
2459 sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2460 if (IS_ERR(sc->txq)) {
2461 ATH5K_ERR(sc, "can't setup xmit queue\n");
2462 ret = PTR_ERR(sc->txq);
2463 goto err_queues;
2464 }
2465
2466 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2467 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2468 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2469 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2470 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2471
2472 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2473
2474 ret = ath5k_eeprom_read_mac(ah, mac);
2475 if (ret) {
2476 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
2477 sc->pdev->device);
2478 goto err_queues;
2479 }
2480
2481 SET_IEEE80211_PERM_ADDR(hw, mac);
2482 /* All MAC address bits matter for ACKs */
2483 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
2484 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
2485
2486 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2487 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
2488 if (ret) {
2489 ATH5K_ERR(sc, "can't initialize regulatory system\n");
2490 goto err_queues;
2491 }
2492
2493 ret = ieee80211_register_hw(hw);
2494 if (ret) {
2495 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
2496 goto err_queues;
2497 }
2498
2499 if (!ath_is_world_regd(regulatory))
2500 regulatory_hint(hw->wiphy, regulatory->alpha2);
2501
2502 ath5k_init_leds(sc);
2503
2504 ath5k_sysfs_register(sc);
2505
2506 return 0;
2507err_queues:
2508 ath5k_txq_release(sc);
2509err_bhal:
2510 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2511err_desc:
2512 ath5k_desc_free(sc, pdev);
2513err:
2514 return ret;
2515}
2516
2517static void
2518ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2519{
2520 struct ath5k_softc *sc = hw->priv;
2521
2522 /*
2523 * NB: the order of these is important:
2524 * o call the 802.11 layer before detaching ath5k_hw to
2525 * ensure callbacks into the driver to delete global
2526 * key cache entries can be handled
2527 * o reclaim the tx queue data structures after calling
2528 * the 802.11 layer as we'll get called back to reclaim
2529 * node state and potentially want to use them
2530 * o to cleanup the tx queues the hal is called, so detach
2531 * it last
2532 * XXX: ??? detach ath5k_hw ???
2533 * Other than that, it's straightforward...
2534 */
2535 ieee80211_unregister_hw(hw);
2536 ath5k_desc_free(sc, pdev);
2537 ath5k_txq_release(sc);
2538 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2539 ath5k_unregister_leds(sc);
2540
2541 ath5k_sysfs_unregister(sc);
2542 /*
2543 * NB: can't reclaim these until after ieee80211_ifdetach
2544 * returns because we'll get called back to reclaim node
2545 * state and potentially want to use them.
2546 */
2547}
2548
2549/********************\
2550* Mac80211 functions *
2551\********************/
2552
2553static int
2554ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2555{
2556 struct ath5k_softc *sc = hw->priv;
2557
2558 return ath5k_tx_queue(hw, skb, sc->txq);
2559}
2560
3019static int ath5k_start(struct ieee80211_hw *hw) 2561static int ath5k_start(struct ieee80211_hw *hw)
3020{ 2562{
3021 return ath5k_init(hw->priv); 2563 return ath5k_init(hw->priv);
@@ -3394,43 +2936,6 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3394 ath5k_hw_reset_tsf(sc->ah); 2936 ath5k_hw_reset_tsf(sc->ah);
3395} 2937}
3396 2938
3397/*
3398 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
3399 * this is called only once at config_bss time, for AP we do it every
3400 * SWBA interrupt so that the TIM will reflect buffered frames.
3401 *
3402 * Called with the beacon lock.
3403 */
3404static int
3405ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3406{
3407 int ret;
3408 struct ath5k_softc *sc = hw->priv;
3409 struct sk_buff *skb;
3410
3411 if (WARN_ON(!vif)) {
3412 ret = -EINVAL;
3413 goto out;
3414 }
3415
3416 skb = ieee80211_beacon_get(hw, vif);
3417
3418 if (!skb) {
3419 ret = -ENOMEM;
3420 goto out;
3421 }
3422
3423 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3424
3425 ath5k_txbuf_free_skb(sc, sc->bbuf);
3426 sc->bbuf->skb = skb;
3427 ret = ath5k_beacon_setup(sc, sc->bbuf);
3428 if (ret)
3429 sc->bbuf->skb = NULL;
3430out:
3431 return ret;
3432}
3433
3434static void 2939static void
3435set_beacon_filter(struct ieee80211_hw *hw, bool enable) 2940set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3436{ 2941{
@@ -3536,3 +3041,364 @@ static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3536 ath5k_hw_set_coverage_class(sc->ah, coverage_class); 3041 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3537 mutex_unlock(&sc->lock); 3042 mutex_unlock(&sc->lock);
3538} 3043}
3044
3045static const struct ieee80211_ops ath5k_hw_ops = {
3046 .tx = ath5k_tx,
3047 .start = ath5k_start,
3048 .stop = ath5k_stop,
3049 .add_interface = ath5k_add_interface,
3050 .remove_interface = ath5k_remove_interface,
3051 .config = ath5k_config,
3052 .prepare_multicast = ath5k_prepare_multicast,
3053 .configure_filter = ath5k_configure_filter,
3054 .set_key = ath5k_set_key,
3055 .get_stats = ath5k_get_stats,
3056 .get_survey = ath5k_get_survey,
3057 .conf_tx = NULL,
3058 .get_tsf = ath5k_get_tsf,
3059 .set_tsf = ath5k_set_tsf,
3060 .reset_tsf = ath5k_reset_tsf,
3061 .bss_info_changed = ath5k_bss_info_changed,
3062 .sw_scan_start = ath5k_sw_scan_start,
3063 .sw_scan_complete = ath5k_sw_scan_complete,
3064 .set_coverage_class = ath5k_set_coverage_class,
3065};
3066
3067/********************\
3068* PCI Initialization *
3069\********************/
3070
3071static int __devinit
3072ath5k_pci_probe(struct pci_dev *pdev,
3073 const struct pci_device_id *id)
3074{
3075 void __iomem *mem;
3076 struct ath5k_softc *sc;
3077 struct ath_common *common;
3078 struct ieee80211_hw *hw;
3079 int ret;
3080 u8 csz;
3081
3082 /*
3083 * L0s needs to be disabled on all ath5k cards.
3084 *
3085 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
3086 * by default in the future in 2.6.36) this will also mean both L1 and
3087 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
3088 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
3089 * though but cannot currently undue the effect of a blacklist, for
3090 * details you can read pcie_aspm_sanity_check() and see how it adjusts
3091 * the device link capability.
3092 *
3093 * It may be possible in the future to implement some PCI API to allow
3094 * drivers to override blacklists for pre 1.1 PCIe but for now it is
3095 * best to accept that both L0s and L1 will be disabled completely for
3096 * distributions shipping with CONFIG_PCIEASPM rather than having this
3097 * issue present. Motivation for adding this new API will be to help
3098 * with power consumption for some of these devices.
3099 */
3100 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
3101
3102 ret = pci_enable_device(pdev);
3103 if (ret) {
3104 dev_err(&pdev->dev, "can't enable device\n");
3105 goto err;
3106 }
3107
3108 /* XXX 32-bit addressing only */
3109 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3110 if (ret) {
3111 dev_err(&pdev->dev, "32-bit DMA not available\n");
3112 goto err_dis;
3113 }
3114
3115 /*
3116 * Cache line size is used to size and align various
3117 * structures used to communicate with the hardware.
3118 */
3119 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
3120 if (csz == 0) {
3121 /*
3122 * Linux 2.4.18 (at least) writes the cache line size
3123 * register as a 16-bit wide register which is wrong.
3124 * We must have this setup properly for rx buffer
3125 * DMA to work so force a reasonable value here if it
3126 * comes up zero.
3127 */
3128 csz = L1_CACHE_BYTES >> 2;
3129 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
3130 }
3131 /*
3132 * The default setting of latency timer yields poor results,
3133 * set it to the value used by other systems. It may be worth
3134 * tweaking this setting more.
3135 */
3136 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
3137
3138 /* Enable bus mastering */
3139 pci_set_master(pdev);
3140
3141 /*
3142 * Disable the RETRY_TIMEOUT register (0x41) to keep
3143 * PCI Tx retries from interfering with C3 CPU state.
3144 */
3145 pci_write_config_byte(pdev, 0x41, 0);
3146
3147 ret = pci_request_region(pdev, 0, "ath5k");
3148 if (ret) {
3149 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
3150 goto err_dis;
3151 }
3152
3153 mem = pci_iomap(pdev, 0, 0);
3154 if (!mem) {
3155 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
3156 ret = -EIO;
3157 goto err_reg;
3158 }
3159
3160 /*
3161 * Allocate hw (mac80211 main struct)
3162 * and hw->priv (driver private data)
3163 */
3164 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
3165 if (hw == NULL) {
3166 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
3167 ret = -ENOMEM;
3168 goto err_map;
3169 }
3170
3171 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
3172
3173 /* Initialize driver private data */
3174 SET_IEEE80211_DEV(hw, &pdev->dev);
3175 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3176 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
3177 IEEE80211_HW_SIGNAL_DBM;
3178
3179 hw->wiphy->interface_modes =
3180 BIT(NL80211_IFTYPE_AP) |
3181 BIT(NL80211_IFTYPE_STATION) |
3182 BIT(NL80211_IFTYPE_ADHOC) |
3183 BIT(NL80211_IFTYPE_MESH_POINT);
3184
3185 hw->extra_tx_headroom = 2;
3186 hw->channel_change_time = 5000;
3187 sc = hw->priv;
3188 sc->hw = hw;
3189 sc->pdev = pdev;
3190
3191 ath5k_debug_init_device(sc);
3192
3193 /*
3194 * Mark the device as detached to avoid processing
3195 * interrupts until setup is complete.
3196 */
3197 __set_bit(ATH_STAT_INVALID, sc->status);
3198
3199 sc->iobase = mem; /* So we can unmap it on detach */
3200 sc->opmode = NL80211_IFTYPE_STATION;
3201 sc->bintval = 1000;
3202 mutex_init(&sc->lock);
3203 spin_lock_init(&sc->rxbuflock);
3204 spin_lock_init(&sc->txbuflock);
3205 spin_lock_init(&sc->block);
3206
3207 /* Set private data */
3208 pci_set_drvdata(pdev, sc);
3209
3210 /* Setup interrupt handler */
3211 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
3212 if (ret) {
3213 ATH5K_ERR(sc, "request_irq failed\n");
3214 goto err_free;
3215 }
3216
3217 /* If we passed the test, malloc an ath5k_hw struct */
3218 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
3219 if (!sc->ah) {
3220 ret = -ENOMEM;
3221 ATH5K_ERR(sc, "out of memory\n");
3222 goto err_irq;
3223 }
3224
3225 sc->ah->ah_sc = sc;
3226 sc->ah->ah_iobase = sc->iobase;
3227 common = ath5k_hw_common(sc->ah);
3228 common->ops = &ath5k_common_ops;
3229 common->ah = sc->ah;
3230 common->hw = hw;
3231 common->cachelsz = csz << 2; /* convert to bytes */
3232
3233 /* Initialize device */
3234 ret = ath5k_hw_attach(sc);
3235 if (ret) {
3236 goto err_free_ah;
3237 }
3238
3239 /* set up multi-rate retry capabilities */
3240 if (sc->ah->ah_version == AR5K_AR5212) {
3241 hw->max_rates = 4;
3242 hw->max_rate_tries = 11;
3243 }
3244
3245 /* Finish private driver data initialization */
3246 ret = ath5k_attach(pdev, hw);
3247 if (ret)
3248 goto err_ah;
3249
3250 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
3251 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
3252 sc->ah->ah_mac_srev,
3253 sc->ah->ah_phy_revision);
3254
3255 if (!sc->ah->ah_single_chip) {
3256 /* Single chip radio (!RF5111) */
3257 if (sc->ah->ah_radio_5ghz_revision &&
3258 !sc->ah->ah_radio_2ghz_revision) {
3259 /* No 5GHz support -> report 2GHz radio */
3260 if (!test_bit(AR5K_MODE_11A,
3261 sc->ah->ah_capabilities.cap_mode)) {
3262 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3263 ath5k_chip_name(AR5K_VERSION_RAD,
3264 sc->ah->ah_radio_5ghz_revision),
3265 sc->ah->ah_radio_5ghz_revision);
3266 /* No 2GHz support (5110 and some
3267 * 5Ghz only cards) -> report 5Ghz radio */
3268 } else if (!test_bit(AR5K_MODE_11B,
3269 sc->ah->ah_capabilities.cap_mode)) {
3270 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3271 ath5k_chip_name(AR5K_VERSION_RAD,
3272 sc->ah->ah_radio_5ghz_revision),
3273 sc->ah->ah_radio_5ghz_revision);
3274 /* Multiband radio */
3275 } else {
3276 ATH5K_INFO(sc, "RF%s multiband radio found"
3277 " (0x%x)\n",
3278 ath5k_chip_name(AR5K_VERSION_RAD,
3279 sc->ah->ah_radio_5ghz_revision),
3280 sc->ah->ah_radio_5ghz_revision);
3281 }
3282 }
3283 /* Multi chip radio (RF5111 - RF2111) ->
3284 * report both 2GHz/5GHz radios */
3285 else if (sc->ah->ah_radio_5ghz_revision &&
3286 sc->ah->ah_radio_2ghz_revision){
3287 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3288 ath5k_chip_name(AR5K_VERSION_RAD,
3289 sc->ah->ah_radio_5ghz_revision),
3290 sc->ah->ah_radio_5ghz_revision);
3291 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3292 ath5k_chip_name(AR5K_VERSION_RAD,
3293 sc->ah->ah_radio_2ghz_revision),
3294 sc->ah->ah_radio_2ghz_revision);
3295 }
3296 }
3297
3298
3299 /* ready to process interrupts */
3300 __clear_bit(ATH_STAT_INVALID, sc->status);
3301
3302 return 0;
3303err_ah:
3304 ath5k_hw_detach(sc->ah);
3305err_free_ah:
3306 kfree(sc->ah);
3307err_irq:
3308 free_irq(pdev->irq, sc);
3309err_free:
3310 ieee80211_free_hw(hw);
3311err_map:
3312 pci_iounmap(pdev, mem);
3313err_reg:
3314 pci_release_region(pdev, 0);
3315err_dis:
3316 pci_disable_device(pdev);
3317err:
3318 return ret;
3319}
3320
3321static void __devexit
3322ath5k_pci_remove(struct pci_dev *pdev)
3323{
3324 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3325
3326 ath5k_debug_finish_device(sc);
3327 ath5k_detach(pdev, sc->hw);
3328 ath5k_hw_detach(sc->ah);
3329 kfree(sc->ah);
3330 free_irq(pdev->irq, sc);
3331 pci_iounmap(pdev, sc->iobase);
3332 pci_release_region(pdev, 0);
3333 pci_disable_device(pdev);
3334 ieee80211_free_hw(sc->hw);
3335}
3336
3337#ifdef CONFIG_PM_SLEEP
3338static int ath5k_pci_suspend(struct device *dev)
3339{
3340 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
3341
3342 ath5k_led_off(sc);
3343 return 0;
3344}
3345
3346static int ath5k_pci_resume(struct device *dev)
3347{
3348 struct pci_dev *pdev = to_pci_dev(dev);
3349 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3350
3351 /*
3352 * Suspend/Resume resets the PCI configuration space, so we have to
3353 * re-disable the RETRY_TIMEOUT register (0x41) to keep
3354 * PCI Tx retries from interfering with C3 CPU state
3355 */
3356 pci_write_config_byte(pdev, 0x41, 0);
3357
3358 ath5k_led_enable(sc);
3359 return 0;
3360}
3361
3362static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
3363#define ATH5K_PM_OPS (&ath5k_pm_ops)
3364#else
3365#define ATH5K_PM_OPS NULL
3366#endif /* CONFIG_PM_SLEEP */
3367
3368static struct pci_driver ath5k_pci_driver = {
3369 .name = KBUILD_MODNAME,
3370 .id_table = ath5k_pci_id_table,
3371 .probe = ath5k_pci_probe,
3372 .remove = __devexit_p(ath5k_pci_remove),
3373 .driver.pm = ATH5K_PM_OPS,
3374};
3375
3376/*
3377 * Module init/exit functions
3378 */
3379static int __init
3380init_ath5k_pci(void)
3381{
3382 int ret;
3383
3384 ath5k_debug_init();
3385
3386 ret = pci_register_driver(&ath5k_pci_driver);
3387 if (ret) {
3388 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
3389 return ret;
3390 }
3391
3392 return 0;
3393}
3394
3395static void __exit
3396exit_ath5k_pci(void)
3397{
3398 pci_unregister_driver(&ath5k_pci_driver);
3399
3400 ath5k_debug_finish();
3401}
3402
3403module_init(init_ath5k_pci);
3404module_exit(exit_ath5k_pci);