aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/via-velocity.c
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2009-07-23 21:11:12 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-23 21:11:12 -0400
commit2cf71d2e388cb0076b03f40f2fadfc590c228461 (patch)
tree472f6569c6b7a7abf34bffe3af2a3004d189b238 /drivers/net/via-velocity.c
parentc40674001b162f9218ba2a6f26188177c6a4e763 (diff)
Remove unnecessary forward declarations from velocity NIC driver.
By moving functions to before their first call, we eliminate the need to define forward references. Signed-off-by: Dave Jones <davej@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r--drivers/net/via-velocity.c3261
1 files changed, 1580 insertions, 1681 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index d6a92b794f35..47be41a39d35 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -92,7 +92,6 @@ static int msglevel = MSG_LEVEL_INFO;
92 * Fetch the mask bits of the selected CAM and store them into the 92 * Fetch the mask bits of the selected CAM and store them into the
93 * provided mask buffer. 93 * provided mask buffer.
94 */ 94 */
95
96static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask) 95static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
97{ 96{
98 int i; 97 int i;
@@ -121,7 +120,6 @@ static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
121 * 120 *
122 * Store a new mask into a CAM 121 * Store a new mask into a CAM
123 */ 122 */
124
125static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask) 123static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
126{ 124{
127 int i; 125 int i;
@@ -166,7 +164,6 @@ static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
166 * 164 *
167 * Load an address or vlan tag into a CAM 165 * Load an address or vlan tag into a CAM
168 */ 166 */
169
170static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr) 167static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
171{ 168{
172 int i; 169 int i;
@@ -222,7 +219,6 @@ static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
222 * reset the Wake on lan features. This function doesn't restore 219 * reset the Wake on lan features. This function doesn't restore
223 * the rest of the logic from the result of sleep/wakeup 220 * the rest of the logic from the result of sleep/wakeup
224 */ 221 */
225
226static void mac_wol_reset(struct mac_regs __iomem *regs) 222static void mac_wol_reset(struct mac_regs __iomem *regs)
227{ 223{
228 224
@@ -241,7 +237,6 @@ static void mac_wol_reset(struct mac_regs __iomem *regs)
241 writew(0xFFFF, &regs->WOLSRClr); 237 writew(0xFFFF, &regs->WOLSRClr);
242} 238}
243 239
244static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
245static const struct ethtool_ops velocity_ethtool_ops; 240static const struct ethtool_ops velocity_ethtool_ops;
246 241
247/* 242/*
@@ -369,76 +364,14 @@ static int rx_copybreak = 200;
369module_param(rx_copybreak, int, 0644); 364module_param(rx_copybreak, int, 0644);
370MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
371 366
372static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
373 const struct velocity_info_tbl *info);
374static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
375static void velocity_print_info(struct velocity_info *vptr);
376static int velocity_open(struct net_device *dev);
377static int velocity_change_mtu(struct net_device *dev, int mtu);
378static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
379static irqreturn_t velocity_intr(int irq, void *dev_instance);
380static void velocity_set_multi(struct net_device *dev);
381static struct net_device_stats *velocity_get_stats(struct net_device *dev);
382static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
383static int velocity_close(struct net_device *dev);
384static int velocity_receive_frame(struct velocity_info *, int idx);
385static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
386static void velocity_free_rd_ring(struct velocity_info *vptr);
387static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
388static int velocity_soft_reset(struct velocity_info *vptr);
389static void mii_init(struct velocity_info *vptr, u32 mii_status);
390static u32 velocity_get_link(struct net_device *dev);
391static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
392static void velocity_print_link_status(struct velocity_info *vptr);
393static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs);
394static void velocity_shutdown(struct velocity_info *vptr);
395static void enable_flow_control_ability(struct velocity_info *vptr);
396static void enable_mii_autopoll(struct mac_regs __iomem *regs);
397static int velocity_mii_read(struct mac_regs __iomem *, u8 byIdx, u16 *pdata);
398static int velocity_mii_write(struct mac_regs __iomem *, u8 byMiiAddr, u16 data);
399static u32 mii_check_media_mode(struct mac_regs __iomem *regs);
400static u32 check_connection_type(struct mac_regs __iomem *regs);
401static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
402
403#ifdef CONFIG_PM 367#ifdef CONFIG_PM
404
405static int velocity_suspend(struct pci_dev *pdev, pm_message_t state);
406static int velocity_resume(struct pci_dev *pdev);
407
408static DEFINE_SPINLOCK(velocity_dev_list_lock); 368static DEFINE_SPINLOCK(velocity_dev_list_lock);
409static LIST_HEAD(velocity_dev_list); 369static LIST_HEAD(velocity_dev_list);
410
411#endif
412
413#if defined(CONFIG_PM) && defined(CONFIG_INET)
414
415static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
416
417static struct notifier_block velocity_inetaddr_notifier = {
418 .notifier_call = velocity_netdev_event,
419};
420
421static void velocity_register_notifier(void)
422{
423 register_inetaddr_notifier(&velocity_inetaddr_notifier);
424}
425
426static void velocity_unregister_notifier(void)
427{
428 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
429}
430
431#else
432
433#define velocity_register_notifier() do {} while (0)
434#define velocity_unregister_notifier() do {} while (0)
435
436#endif 370#endif
437 371
438/* 372/*
439 * Internal board variants. At the moment we have only one 373 * Internal board variants. At the moment we have only one
440 */ 374 */
441
442static struct velocity_info_tbl chip_info_table[] = { 375static struct velocity_info_tbl chip_info_table[] = {
443 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL}, 376 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
444 { } 377 { }
@@ -448,7 +381,6 @@ static struct velocity_info_tbl chip_info_table[] = {
448 * Describe the PCI device identifiers that we support in this 381 * Describe the PCI device identifiers that we support in this
449 * device driver. Used for hotplug autoloading. 382 * device driver. Used for hotplug autoloading.
450 */ 383 */
451
452static const struct pci_device_id velocity_id_table[] __devinitdata = { 384static const struct pci_device_id velocity_id_table[] __devinitdata = {
453 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 385 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
454 { } 386 { }
@@ -463,7 +395,6 @@ MODULE_DEVICE_TABLE(pci, velocity_id_table);
463 * Given a chip identifier return a suitable description. Returns 395 * Given a chip identifier return a suitable description. Returns
464 * a pointer a static string valid while the driver is loaded. 396 * a pointer a static string valid while the driver is loaded.
465 */ 397 */
466
467static const char __devinit *get_chip_name(enum chip_type chip_id) 398static const char __devinit *get_chip_name(enum chip_type chip_id)
468{ 399{
469 int i; 400 int i;
@@ -481,7 +412,6 @@ static const char __devinit *get_chip_name(enum chip_type chip_id)
481 * unload for each active device that is present. Disconnects 412 * unload for each active device that is present. Disconnects
482 * the device from the network layer and frees all the resources 413 * the device from the network layer and frees all the resources
483 */ 414 */
484
485static void __devexit velocity_remove1(struct pci_dev *pdev) 415static void __devexit velocity_remove1(struct pci_dev *pdev)
486{ 416{
487 struct net_device *dev = pci_get_drvdata(pdev); 417 struct net_device *dev = pci_get_drvdata(pdev);
@@ -519,7 +449,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
519 * all the verification and checking as well as reporting so that 449 * all the verification and checking as well as reporting so that
520 * we don't duplicate code for each option. 450 * we don't duplicate code for each option.
521 */ 451 */
522
523static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname) 452static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
524{ 453{
525 if (val == -1) 454 if (val == -1)
@@ -548,7 +477,6 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
548 * all the verification and checking as well as reporting so that 477 * all the verification and checking as well as reporting so that
549 * we don't duplicate code for each option. 478 * we don't duplicate code for each option.
550 */ 479 */
551
552static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname) 480static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
553{ 481{
554 (*opt) &= (~flag); 482 (*opt) &= (~flag);
@@ -574,7 +502,6 @@ static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag
574 * Turn the module and command options into a single structure 502 * Turn the module and command options into a single structure
575 * for the current device 503 * for the current device
576 */ 504 */
577
578static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname) 505static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
579{ 506{
580 507
@@ -600,7 +527,6 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
600 * Initialize the content addressable memory used for filters. Load 527 * Initialize the content addressable memory used for filters. Load
601 * appropriately according to the presence of VLAN 528 * appropriately according to the presence of VLAN
602 */ 529 */
603
604static void velocity_init_cam_filter(struct velocity_info *vptr) 530static void velocity_init_cam_filter(struct velocity_info *vptr)
605{ 531{
606 struct mac_regs __iomem *regs = vptr->mac_regs; 532 struct mac_regs __iomem *regs = vptr->mac_regs;
@@ -673,7 +599,6 @@ static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
673 * Reset the ownership and status for the receive ring side. 599 * Reset the ownership and status for the receive ring side.
674 * Hand all the receive queue to the NIC. 600 * Hand all the receive queue to the NIC.
675 */ 601 */
676
677static void velocity_rx_reset(struct velocity_info *vptr) 602static void velocity_rx_reset(struct velocity_info *vptr)
678{ 603{
679 604
@@ -695,6 +620,647 @@ static void velocity_rx_reset(struct velocity_info *vptr)
695} 620}
696 621
697/** 622/**
623 * velocity_get_opt_media_mode - get media selection
624 * @vptr: velocity adapter
625 *
626 * Get the media mode stored in EEPROM or module options and load
627 * mii_status accordingly. The requested link state information
628 * is also returned.
629 */
630static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
631{
632 u32 status = 0;
633
634 switch (vptr->options.spd_dpx) {
635 case SPD_DPX_AUTO:
636 status = VELOCITY_AUTONEG_ENABLE;
637 break;
638 case SPD_DPX_100_FULL:
639 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
640 break;
641 case SPD_DPX_10_FULL:
642 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
643 break;
644 case SPD_DPX_100_HALF:
645 status = VELOCITY_SPEED_100;
646 break;
647 case SPD_DPX_10_HALF:
648 status = VELOCITY_SPEED_10;
649 break;
650 }
651 vptr->mii_status = status;
652 return status;
653}
654
655/**
656 * safe_disable_mii_autopoll - autopoll off
657 * @regs: velocity registers
658 *
659 * Turn off the autopoll and wait for it to disable on the chip
660 */
661static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
662{
663 u16 ww;
664
665 /* turn off MAUTO */
666 writeb(0, &regs->MIICR);
667 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
668 udelay(1);
669 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
670 break;
671 }
672}
673
674/**
675 * enable_mii_autopoll - turn on autopolling
676 * @regs: velocity registers
677 *
678 * Enable the MII link status autopoll feature on the Velocity
679 * hardware. Wait for it to enable.
680 */
681static void enable_mii_autopoll(struct mac_regs __iomem *regs)
682{
683 int ii;
684
685 writeb(0, &(regs->MIICR));
686 writeb(MIIADR_SWMPL, &regs->MIIADR);
687
688 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
689 udelay(1);
690 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
691 break;
692 }
693
694 writeb(MIICR_MAUTO, &regs->MIICR);
695
696 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
697 udelay(1);
698 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
699 break;
700 }
701
702}
703
704/**
705 * velocity_mii_read - read MII data
706 * @regs: velocity registers
707 * @index: MII register index
708 * @data: buffer for received data
709 *
710 * Perform a single read of an MII 16bit register. Returns zero
711 * on success or -ETIMEDOUT if the PHY did not respond.
712 */
713static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
714{
715 u16 ww;
716
717 /*
718 * Disable MIICR_MAUTO, so that mii addr can be set normally
719 */
720 safe_disable_mii_autopoll(regs);
721
722 writeb(index, &regs->MIIADR);
723
724 BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
725
726 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
727 if (!(readb(&regs->MIICR) & MIICR_RCMD))
728 break;
729 }
730
731 *data = readw(&regs->MIIDATA);
732
733 enable_mii_autopoll(regs);
734 if (ww == W_MAX_TIMEOUT)
735 return -ETIMEDOUT;
736 return 0;
737}
738
739
740/**
741 * mii_check_media_mode - check media state
742 * @regs: velocity registers
743 *
744 * Check the current MII status and determine the link status
745 * accordingly
746 */
747static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
748{
749 u32 status = 0;
750 u16 ANAR;
751
752 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
753 status |= VELOCITY_LINK_FAIL;
754
755 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
756 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
757 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
758 status |= (VELOCITY_SPEED_1000);
759 else {
760 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
761 if (ANAR & ANAR_TXFD)
762 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
763 else if (ANAR & ANAR_TX)
764 status |= VELOCITY_SPEED_100;
765 else if (ANAR & ANAR_10FD)
766 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
767 else
768 status |= (VELOCITY_SPEED_10);
769 }
770
771 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
772 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
773 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
774 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
775 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
776 status |= VELOCITY_AUTONEG_ENABLE;
777 }
778 }
779
780 return status;
781}
782
783/**
784 * velocity_mii_write - write MII data
785 * @regs: velocity registers
786 * @index: MII register index
787 * @data: 16bit data for the MII register
788 *
789 * Perform a single write to an MII 16bit register. Returns zero
790 * on success or -ETIMEDOUT if the PHY did not respond.
791 */
792static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
793{
794 u16 ww;
795
796 /*
797 * Disable MIICR_MAUTO, so that mii addr can be set normally
798 */
799 safe_disable_mii_autopoll(regs);
800
801 /* MII reg offset */
802 writeb(mii_addr, &regs->MIIADR);
803 /* set MII data */
804 writew(data, &regs->MIIDATA);
805
806 /* turn on MIICR_WCMD */
807 BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
808
809 /* W_MAX_TIMEOUT is the timeout period */
810 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
811 udelay(5);
812 if (!(readb(&regs->MIICR) & MIICR_WCMD))
813 break;
814 }
815 enable_mii_autopoll(regs);
816
817 if (ww == W_MAX_TIMEOUT)
818 return -ETIMEDOUT;
819 return 0;
820}
821
822/**
823 * set_mii_flow_control - flow control setup
824 * @vptr: velocity interface
825 *
826 * Set up the flow control on this interface according to
827 * the supplied user/eeprom options.
828 */
829static void set_mii_flow_control(struct velocity_info *vptr)
830{
831 /*Enable or Disable PAUSE in ANAR */
832 switch (vptr->options.flow_cntl) {
833 case FLOW_CNTL_TX:
834 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
835 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
836 break;
837
838 case FLOW_CNTL_RX:
839 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
840 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
841 break;
842
843 case FLOW_CNTL_TX_RX:
844 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
845 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
846 break;
847
848 case FLOW_CNTL_DISABLE:
849 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
850 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
851 break;
852 default:
853 break;
854 }
855}
856
857/**
858 * mii_set_auto_on - autonegotiate on
859 * @vptr: velocity
860 *
861 * Enable autonegotation on this interface
862 */
863static void mii_set_auto_on(struct velocity_info *vptr)
864{
865 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
866 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
867 else
868 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
869}
870
871static u32 check_connection_type(struct mac_regs __iomem *regs)
872{
873 u32 status = 0;
874 u8 PHYSR0;
875 u16 ANAR;
876 PHYSR0 = readb(&regs->PHYSR0);
877
878 /*
879 if (!(PHYSR0 & PHYSR0_LINKGD))
880 status|=VELOCITY_LINK_FAIL;
881 */
882
883 if (PHYSR0 & PHYSR0_FDPX)
884 status |= VELOCITY_DUPLEX_FULL;
885
886 if (PHYSR0 & PHYSR0_SPDG)
887 status |= VELOCITY_SPEED_1000;
888 else if (PHYSR0 & PHYSR0_SPD10)
889 status |= VELOCITY_SPEED_10;
890 else
891 status |= VELOCITY_SPEED_100;
892
893 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
894 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
895 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
896 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
897 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
898 status |= VELOCITY_AUTONEG_ENABLE;
899 }
900 }
901
902 return status;
903}
904
905
906
907/**
908 * velocity_set_media_mode - set media mode
909 * @mii_status: old MII link state
910 *
911 * Check the media link state and configure the flow control
912 * PHY and also velocity hardware setup accordingly. In particular
913 * we need to set up CD polling and frame bursting.
914 */
915static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
916{
917 u32 curr_status;
918 struct mac_regs __iomem *regs = vptr->mac_regs;
919
920 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
921 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
922
923 /* Set mii link status */
924 set_mii_flow_control(vptr);
925
926 /*
927 Check if new status is consisent with current status
928 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
929 || (mii_status==curr_status)) {
930 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
931 vptr->mii_status=check_connection_type(vptr->mac_regs);
932 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
933 return 0;
934 }
935 */
936
937 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
938 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
939
940 /*
941 * If connection type is AUTO
942 */
943 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
944 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
945 /* clear force MAC mode bit */
946 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
947 /* set duplex mode of MAC according to duplex mode of MII */
948 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
949 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
950 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
951
952 /* enable AUTO-NEGO mode */
953 mii_set_auto_on(vptr);
954 } else {
955 u16 ANAR;
956 u8 CHIPGCR;
957
958 /*
959 * 1. if it's 3119, disable frame bursting in halfduplex mode
960 * and enable it in fullduplex mode
961 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
962 * 3. only enable CD heart beat counter in 10HD mode
963 */
964
965 /* set force MAC mode bit */
966 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
967
968 CHIPGCR = readb(&regs->CHIPGCR);
969 CHIPGCR &= ~CHIPGCR_FCGMII;
970
971 if (mii_status & VELOCITY_DUPLEX_FULL) {
972 CHIPGCR |= CHIPGCR_FCFDX;
973 writeb(CHIPGCR, &regs->CHIPGCR);
974 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
975 if (vptr->rev_id < REV_ID_VT3216_A0)
976 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
977 } else {
978 CHIPGCR &= ~CHIPGCR_FCFDX;
979 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
980 writeb(CHIPGCR, &regs->CHIPGCR);
981 if (vptr->rev_id < REV_ID_VT3216_A0)
982 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
983 }
984
985 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
986
987 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
988 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
989 else
990 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
991
992 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
993 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
994 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
995 if (mii_status & VELOCITY_SPEED_100) {
996 if (mii_status & VELOCITY_DUPLEX_FULL)
997 ANAR |= ANAR_TXFD;
998 else
999 ANAR |= ANAR_TX;
1000 } else {
1001 if (mii_status & VELOCITY_DUPLEX_FULL)
1002 ANAR |= ANAR_10FD;
1003 else
1004 ANAR |= ANAR_10;
1005 }
1006 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
1007 /* enable AUTO-NEGO mode */
1008 mii_set_auto_on(vptr);
1009 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
1010 }
1011 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
1012 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
1013 return VELOCITY_LINK_CHANGE;
1014}
1015
1016/**
1017 * velocity_print_link_status - link status reporting
1018 * @vptr: velocity to report on
1019 *
1020 * Turn the link status of the velocity card into a kernel log
1021 * description of the new link state, detailing speed and duplex
1022 * status
1023 */
1024static void velocity_print_link_status(struct velocity_info *vptr)
1025{
1026
1027 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1028 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1029 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1030 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1031
1032 if (vptr->mii_status & VELOCITY_SPEED_1000)
1033 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1034 else if (vptr->mii_status & VELOCITY_SPEED_100)
1035 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1036 else
1037 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1038
1039 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1040 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1041 else
1042 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1043 } else {
1044 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1045 switch (vptr->options.spd_dpx) {
1046 case SPD_DPX_100_HALF:
1047 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1048 break;
1049 case SPD_DPX_100_FULL:
1050 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1051 break;
1052 case SPD_DPX_10_HALF:
1053 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1054 break;
1055 case SPD_DPX_10_FULL:
1056 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1057 break;
1058 default:
1059 break;
1060 }
1061 }
1062}
1063
1064/**
1065 * enable_flow_control_ability - flow control
1066 * @vptr: veloity to configure
1067 *
1068 * Set up flow control according to the flow control options
1069 * determined by the eeprom/configuration.
1070 */
1071static void enable_flow_control_ability(struct velocity_info *vptr)
1072{
1073
1074 struct mac_regs __iomem *regs = vptr->mac_regs;
1075
1076 switch (vptr->options.flow_cntl) {
1077
1078 case FLOW_CNTL_DEFAULT:
1079 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1080 writel(CR0_FDXRFCEN, &regs->CR0Set);
1081 else
1082 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1083
1084 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1085 writel(CR0_FDXTFCEN, &regs->CR0Set);
1086 else
1087 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1088 break;
1089
1090 case FLOW_CNTL_TX:
1091 writel(CR0_FDXTFCEN, &regs->CR0Set);
1092 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1093 break;
1094
1095 case FLOW_CNTL_RX:
1096 writel(CR0_FDXRFCEN, &regs->CR0Set);
1097 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1098 break;
1099
1100 case FLOW_CNTL_TX_RX:
1101 writel(CR0_FDXTFCEN, &regs->CR0Set);
1102 writel(CR0_FDXRFCEN, &regs->CR0Set);
1103 break;
1104
1105 case FLOW_CNTL_DISABLE:
1106 writel(CR0_FDXRFCEN, &regs->CR0Clr);
1107 writel(CR0_FDXTFCEN, &regs->CR0Clr);
1108 break;
1109
1110 default:
1111 break;
1112 }
1113
1114}
1115
1116/**
1117 * velocity_soft_reset - soft reset
1118 * @vptr: velocity to reset
1119 *
1120 * Kick off a soft reset of the velocity adapter and then poll
1121 * until the reset sequence has completed before returning.
1122 */
1123static int velocity_soft_reset(struct velocity_info *vptr)
1124{
1125 struct mac_regs __iomem *regs = vptr->mac_regs;
1126 int i = 0;
1127
1128 writel(CR0_SFRST, &regs->CR0Set);
1129
1130 for (i = 0; i < W_MAX_TIMEOUT; i++) {
1131 udelay(5);
1132 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1133 break;
1134 }
1135
1136 if (i == W_MAX_TIMEOUT) {
1137 writel(CR0_FORSRST, &regs->CR0Set);
1138 /* FIXME: PCI POSTING */
1139 /* delay 2ms */
1140 mdelay(2);
1141 }
1142 return 0;
1143}
1144
1145/**
1146 * velocity_set_multi - filter list change callback
1147 * @dev: network device
1148 *
1149 * Called by the network layer when the filter lists need to change
1150 * for a velocity adapter. Reload the CAMs with the new address
1151 * filter ruleset.
1152 */
1153static void velocity_set_multi(struct net_device *dev)
1154{
1155 struct velocity_info *vptr = netdev_priv(dev);
1156 struct mac_regs __iomem *regs = vptr->mac_regs;
1157 u8 rx_mode;
1158 int i;
1159 struct dev_mc_list *mclist;
1160
1161 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1162 writel(0xffffffff, &regs->MARCAM[0]);
1163 writel(0xffffffff, &regs->MARCAM[4]);
1164 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1165 } else if ((dev->mc_count > vptr->multicast_limit)
1166 || (dev->flags & IFF_ALLMULTI)) {
1167 writel(0xffffffff, &regs->MARCAM[0]);
1168 writel(0xffffffff, &regs->MARCAM[4]);
1169 rx_mode = (RCR_AM | RCR_AB);
1170 } else {
1171 int offset = MCAM_SIZE - vptr->multicast_limit;
1172 mac_get_cam_mask(regs, vptr->mCAMmask);
1173
1174 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
1175 mac_set_cam(regs, i + offset, mclist->dmi_addr);
1176 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1177 }
1178
1179 mac_set_cam_mask(regs, vptr->mCAMmask);
1180 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1181 }
1182 if (dev->mtu > 1500)
1183 rx_mode |= RCR_AL;
1184
1185 BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1186
1187}
1188
1189/*
1190 * MII access , media link mode setting functions
1191 */
1192
1193/**
1194 * mii_init - set up MII
1195 * @vptr: velocity adapter
1196 * @mii_status: links tatus
1197 *
1198 * Set up the PHY for the current link state.
1199 */
1200static void mii_init(struct velocity_info *vptr, u32 mii_status)
1201{
1202 u16 BMCR;
1203
1204 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1205 case PHYID_CICADA_CS8201:
1206 /*
1207 * Reset to hardware default
1208 */
1209 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
1210 /*
1211 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1212 * off it in NWay-forced half mode for NWay-forced v.s.
1213 * legacy-forced issue.
1214 */
1215 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1216 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1217 else
1218 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1219 /*
1220 * Turn on Link/Activity LED enable bit for CIS8201
1221 */
1222 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
1223 break;
1224 case PHYID_VT3216_32BIT:
1225 case PHYID_VT3216_64BIT:
1226 /*
1227 * Reset to hardware default
1228 */
1229 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
1230 /*
1231 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1232 * off it in NWay-forced half mode for NWay-forced v.s.
1233 * legacy-forced issue
1234 */
1235 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1236 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1237 else
1238 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
1239 break;
1240
1241 case PHYID_MARVELL_1000:
1242 case PHYID_MARVELL_1000S:
1243 /*
1244 * Assert CRS on Transmit
1245 */
1246 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1247 /*
1248 * Reset to hardware default
1249 */
1250 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
1251 break;
1252 default:
1253 ;
1254 }
1255 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
1256 if (BMCR & BMCR_ISO) {
1257 BMCR &= ~BMCR_ISO;
1258 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
1259 }
1260}
1261
1262
1263/**
698 * velocity_init_registers - initialise MAC registers 1264 * velocity_init_registers - initialise MAC registers
699 * @vptr: velocity to init 1265 * @vptr: velocity to init
700 * @type: type of initialisation (hot or cold) 1266 * @type: type of initialisation (hot or cold)
@@ -702,7 +1268,6 @@ static void velocity_rx_reset(struct velocity_info *vptr)
702 * Initialise the MAC on a reset or on first set up on the 1268 * Initialise the MAC on a reset or on first set up on the
703 * hardware. 1269 * hardware.
704 */ 1270 */
705
706static void velocity_init_registers(struct velocity_info *vptr, 1271static void velocity_init_registers(struct velocity_info *vptr,
707 enum velocity_init_type type) 1272 enum velocity_init_type type)
708{ 1273{
@@ -818,288 +1383,29 @@ static void velocity_init_registers(struct velocity_info *vptr,
818 } 1383 }
819} 1384}
820 1385
821/** 1386static void velocity_give_many_rx_descs(struct velocity_info *vptr)
822 * velocity_soft_reset - soft reset
823 * @vptr: velocity to reset
824 *
825 * Kick off a soft reset of the velocity adapter and then poll
826 * until the reset sequence has completed before returning.
827 */
828
829static int velocity_soft_reset(struct velocity_info *vptr)
830{ 1387{
831 struct mac_regs __iomem *regs = vptr->mac_regs; 1388 struct mac_regs __iomem *regs = vptr->mac_regs;
832 int i = 0; 1389 int avail, dirty, unusable;
833
834 writel(CR0_SFRST, &regs->CR0Set);
835
836 for (i = 0; i < W_MAX_TIMEOUT; i++) {
837 udelay(5);
838 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
839 break;
840 }
841
842 if (i == W_MAX_TIMEOUT) {
843 writel(CR0_FORSRST, &regs->CR0Set);
844 /* FIXME: PCI POSTING */
845 /* delay 2ms */
846 mdelay(2);
847 }
848 return 0;
849}
850
851static const struct net_device_ops velocity_netdev_ops = {
852 .ndo_open = velocity_open,
853 .ndo_stop = velocity_close,
854 .ndo_start_xmit = velocity_xmit,
855 .ndo_get_stats = velocity_get_stats,
856 .ndo_validate_addr = eth_validate_addr,
857 .ndo_set_mac_address = eth_mac_addr,
858 .ndo_set_multicast_list = velocity_set_multi,
859 .ndo_change_mtu = velocity_change_mtu,
860 .ndo_do_ioctl = velocity_ioctl,
861 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
862 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
863 .ndo_vlan_rx_register = velocity_vlan_rx_register,
864};
865
866/**
867 * velocity_found1 - set up discovered velocity card
868 * @pdev: PCI device
869 * @ent: PCI device table entry that matched
870 *
871 * Configure a discovered adapter from scratch. Return a negative
872 * errno error code on failure paths.
873 */
874
875static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
876{
877 static int first = 1;
878 struct net_device *dev;
879 int i;
880 const char *drv_string;
881 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
882 struct velocity_info *vptr;
883 struct mac_regs __iomem *regs;
884 int ret = -ENOMEM;
885
886 /* FIXME: this driver, like almost all other ethernet drivers,
887 * can support more than MAX_UNITS.
888 */
889 if (velocity_nics >= MAX_UNITS) {
890 dev_notice(&pdev->dev, "already found %d NICs.\n",
891 velocity_nics);
892 return -ENODEV;
893 }
894
895 dev = alloc_etherdev(sizeof(struct velocity_info));
896 if (!dev) {
897 dev_err(&pdev->dev, "allocate net device failed.\n");
898 goto out;
899 }
900
901 /* Chain it all together */
902
903 SET_NETDEV_DEV(dev, &pdev->dev);
904 vptr = netdev_priv(dev);
905
906
907 if (first) {
908 printk(KERN_INFO "%s Ver. %s\n",
909 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
910 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
911 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
912 first = 0;
913 }
914
915 velocity_init_info(pdev, vptr, info);
916
917 vptr->dev = dev;
918
919 dev->irq = pdev->irq;
920
921 ret = pci_enable_device(pdev);
922 if (ret < 0)
923 goto err_free_dev;
924
925 ret = velocity_get_pci_info(vptr, pdev);
926 if (ret < 0) {
927 /* error message already printed */
928 goto err_disable;
929 }
930
931 ret = pci_request_regions(pdev, VELOCITY_NAME);
932 if (ret < 0) {
933 dev_err(&pdev->dev, "No PCI resources.\n");
934 goto err_disable;
935 }
936
937 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
938 if (regs == NULL) {
939 ret = -EIO;
940 goto err_release_res;
941 }
942
943 vptr->mac_regs = regs;
944
945 mac_wol_reset(regs);
946
947 dev->base_addr = vptr->ioaddr;
948
949 for (i = 0; i < 6; i++)
950 dev->dev_addr[i] = readb(&regs->PAR[i]);
951
952
953 drv_string = dev_driver_string(&pdev->dev);
954
955 velocity_get_options(&vptr->options, velocity_nics, drv_string);
956
957 /*
958 * Mask out the options cannot be set to the chip
959 */
960
961 vptr->options.flags &= info->flags;
962 1390
963 /* 1391 /*
964 * Enable the chip specified capbilities 1392 * RD number must be equal to 4X per hardware spec
1393 * (programming guide rev 1.20, p.13)
965 */ 1394 */
1395 if (vptr->rx.filled < 4)
1396 return;
966 1397
967 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); 1398 wmb();
968
969 vptr->wol_opts = vptr->options.wol_opts;
970 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
971
972 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
973
974 dev->irq = pdev->irq;
975 dev->netdev_ops = &velocity_netdev_ops;
976 dev->ethtool_ops = &velocity_ethtool_ops;
977
978 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
979 NETIF_F_HW_VLAN_RX;
980
981 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
982 dev->features |= NETIF_F_IP_CSUM;
983
984 ret = register_netdev(dev);
985 if (ret < 0)
986 goto err_iounmap;
987
988 if (!velocity_get_link(dev)) {
989 netif_carrier_off(dev);
990 vptr->mii_status |= VELOCITY_LINK_FAIL;
991 }
992
993 velocity_print_info(vptr);
994 pci_set_drvdata(pdev, dev);
995
996 /* and leave the chip powered down */
997
998 pci_set_power_state(pdev, PCI_D3hot);
999#ifdef CONFIG_PM
1000 {
1001 unsigned long flags;
1002
1003 spin_lock_irqsave(&velocity_dev_list_lock, flags);
1004 list_add(&vptr->list, &velocity_dev_list);
1005 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
1006 }
1007#endif
1008 velocity_nics++;
1009out:
1010 return ret;
1011
1012err_iounmap:
1013 iounmap(regs);
1014err_release_res:
1015 pci_release_regions(pdev);
1016err_disable:
1017 pci_disable_device(pdev);
1018err_free_dev:
1019 free_netdev(dev);
1020 goto out;
1021}
1022
1023/**
1024 * velocity_print_info - per driver data
1025 * @vptr: velocity
1026 *
1027 * Print per driver data as the kernel driver finds Velocity
1028 * hardware
1029 */
1030
1031static void __devinit velocity_print_info(struct velocity_info *vptr)
1032{
1033 struct net_device *dev = vptr->dev;
1034
1035 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
1036 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
1037 dev->name,
1038 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1039 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1040}
1041
1042/**
1043 * velocity_init_info - init private data
1044 * @pdev: PCI device
1045 * @vptr: Velocity info
1046 * @info: Board type
1047 *
1048 * Set up the initial velocity_info struct for the device that has been
1049 * discovered.
1050 */
1051
1052static void __devinit velocity_init_info(struct pci_dev *pdev,
1053 struct velocity_info *vptr,
1054 const struct velocity_info_tbl *info)
1055{
1056 memset(vptr, 0, sizeof(struct velocity_info));
1057
1058 vptr->pdev = pdev;
1059 vptr->chip_id = info->chip_id;
1060 vptr->tx.numq = info->txqueue;
1061 vptr->multicast_limit = MCAM_SIZE;
1062 spin_lock_init(&vptr->lock);
1063 INIT_LIST_HEAD(&vptr->list);
1064}
1065
1066/**
1067 * velocity_get_pci_info - retrieve PCI info for device
1068 * @vptr: velocity device
1069 * @pdev: PCI device it matches
1070 *
1071 * Retrieve the PCI configuration space data that interests us from
1072 * the kernel PCI layer
1073 */
1074
1075static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
1076{
1077 vptr->rev_id = pdev->revision;
1078
1079 pci_set_master(pdev);
1080
1081 vptr->ioaddr = pci_resource_start(pdev, 0);
1082 vptr->memaddr = pci_resource_start(pdev, 1);
1083
1084 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
1085 dev_err(&pdev->dev,
1086 "region #0 is not an I/O resource, aborting.\n");
1087 return -EINVAL;
1088 }
1089
1090 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
1091 dev_err(&pdev->dev,
1092 "region #1 is an I/O resource, aborting.\n");
1093 return -EINVAL;
1094 }
1095 1399
1096 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) { 1400 unusable = vptr->rx.filled & 0x0003;
1097 dev_err(&pdev->dev, "region #1 is too small.\n"); 1401 dirty = vptr->rx.dirty - unusable;
1098 return -EINVAL; 1402 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1403 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1404 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1099 } 1405 }
1100 vptr->pdev = pdev;
1101 1406
1102 return 0; 1407 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1408 vptr->rx.filled = unusable;
1103} 1409}
1104 1410
1105/** 1411/**
@@ -1109,7 +1415,6 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1109 * Allocate PCI mapped DMA rings for the receive and transmit layer 1415 * Allocate PCI mapped DMA rings for the receive and transmit layer
1110 * to use. 1416 * to use.
1111 */ 1417 */
1112
1113static int velocity_init_dma_rings(struct velocity_info *vptr) 1418static int velocity_init_dma_rings(struct velocity_info *vptr)
1114{ 1419{
1115 struct velocity_opt *opt = &vptr->options; 1420 struct velocity_opt *opt = &vptr->options;
@@ -1150,46 +1455,50 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1150 return 0; 1455 return 0;
1151} 1456}
1152 1457
1458static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1459{
1460 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1461}
1462
1153/** 1463/**
1154 * velocity_free_dma_rings - free PCI ring pointers 1464 * velocity_alloc_rx_buf - allocate aligned receive buffer
1155 * @vptr: Velocity to free from 1465 * @vptr: velocity
1466 * @idx: ring index
1156 * 1467 *
1157 * Clean up the PCI ring buffers allocated to this velocity. 1468 * Allocate a new full sized buffer for the reception of a frame and
1469 * map it into PCI space for the hardware to use. The hardware
1470 * requires *64* byte alignment of the buffer which makes life
1471 * less fun than would be ideal.
1158 */ 1472 */
1159 1473static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1160static void velocity_free_dma_rings(struct velocity_info *vptr)
1161{ 1474{
1162 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1475 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1163 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; 1476 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1164
1165 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1166}
1167 1477
1168static void velocity_give_many_rx_descs(struct velocity_info *vptr) 1478 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1169{ 1479 if (rd_info->skb == NULL)
1170 struct mac_regs __iomem *regs = vptr->mac_regs; 1480 return -ENOMEM;
1171 int avail, dirty, unusable;
1172 1481
1173 /* 1482 /*
1174 * RD number must be equal to 4X per hardware spec 1483 * Do the gymnastics to get the buffer head for data at
1175 * (programming guide rev 1.20, p.13) 1484 * 64byte alignment.
1176 */ 1485 */
1177 if (vptr->rx.filled < 4) 1486 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1178 return; 1487 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1179 1488 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1180 wmb();
1181 1489
1182 unusable = vptr->rx.filled & 0x0003; 1490 /*
1183 dirty = vptr->rx.dirty - unusable; 1491 * Fill in the descriptor to match
1184 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { 1492 */
1185 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1186 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1187 }
1188 1493
1189 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU); 1494 *((u32 *) & (rd->rdesc0)) = 0;
1190 vptr->rx.filled = unusable; 1495 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1496 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1497 rd->pa_high = 0;
1498 return 0;
1191} 1499}
1192 1500
1501
1193static int velocity_rx_refill(struct velocity_info *vptr) 1502static int velocity_rx_refill(struct velocity_info *vptr)
1194{ 1503{
1195 int dirty = vptr->rx.dirty, done = 0; 1504 int dirty = vptr->rx.dirty, done = 0;
@@ -1217,42 +1526,6 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1217 return done; 1526 return done;
1218} 1527}
1219 1528
1220static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1221{
1222 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1223}
1224
1225/**
1226 * velocity_init_rd_ring - set up receive ring
1227 * @vptr: velocity to configure
1228 *
1229 * Allocate and set up the receive buffers for each ring slot and
1230 * assign them to the network adapter.
1231 */
1232
1233static int velocity_init_rd_ring(struct velocity_info *vptr)
1234{
1235 int ret = -ENOMEM;
1236
1237 vptr->rx.info = kcalloc(vptr->options.numrx,
1238 sizeof(struct velocity_rd_info), GFP_KERNEL);
1239 if (!vptr->rx.info)
1240 goto out;
1241
1242 velocity_init_rx_ring_indexes(vptr);
1243
1244 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1245 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1246 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1247 velocity_free_rd_ring(vptr);
1248 goto out;
1249 }
1250
1251 ret = 0;
1252out:
1253 return ret;
1254}
1255
1256/** 1529/**
1257 * velocity_free_rd_ring - free receive ring 1530 * velocity_free_rd_ring - free receive ring
1258 * @vptr: velocity to clean up 1531 * @vptr: velocity to clean up
@@ -1260,7 +1533,6 @@ out:
1260 * Free the receive buffers for each ring slot and any 1533 * Free the receive buffers for each ring slot and any
1261 * attached socket buffers that need to go away. 1534 * attached socket buffers that need to go away.
1262 */ 1535 */
1263
1264static void velocity_free_rd_ring(struct velocity_info *vptr) 1536static void velocity_free_rd_ring(struct velocity_info *vptr)
1265{ 1537{
1266 int i; 1538 int i;
@@ -1288,6 +1560,38 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1288 vptr->rx.info = NULL; 1560 vptr->rx.info = NULL;
1289} 1561}
1290 1562
1563
1564
1565/**
1566 * velocity_init_rd_ring - set up receive ring
1567 * @vptr: velocity to configure
1568 *
1569 * Allocate and set up the receive buffers for each ring slot and
1570 * assign them to the network adapter.
1571 */
1572static int velocity_init_rd_ring(struct velocity_info *vptr)
1573{
1574 int ret = -ENOMEM;
1575
1576 vptr->rx.info = kcalloc(vptr->options.numrx,
1577 sizeof(struct velocity_rd_info), GFP_KERNEL);
1578 if (!vptr->rx.info)
1579 goto out;
1580
1581 velocity_init_rx_ring_indexes(vptr);
1582
1583 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1584 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1585 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1586 velocity_free_rd_ring(vptr);
1587 goto out;
1588 }
1589
1590 ret = 0;
1591out:
1592 return ret;
1593}
1594
1291/** 1595/**
1292 * velocity_init_td_ring - set up transmit ring 1596 * velocity_init_td_ring - set up transmit ring
1293 * @vptr: velocity 1597 * @vptr: velocity
@@ -1296,7 +1600,6 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1296 * Returns zero on success or a negative posix errno code for 1600 * Returns zero on success or a negative posix errno code for
1297 * failure. 1601 * failure.
1298 */ 1602 */
1299
1300static int velocity_init_td_ring(struct velocity_info *vptr) 1603static int velocity_init_td_ring(struct velocity_info *vptr)
1301{ 1604{
1302 dma_addr_t curr; 1605 dma_addr_t curr;
@@ -1320,10 +1623,81 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1320 return 0; 1623 return 0;
1321} 1624}
1322 1625
1626/**
1627 * velocity_free_dma_rings - free PCI ring pointers
1628 * @vptr: Velocity to free from
1629 *
1630 * Clean up the PCI ring buffers allocated to this velocity.
1631 */
1632static void velocity_free_dma_rings(struct velocity_info *vptr)
1633{
1634 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1635 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1636
1637 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1638}
1639
1640
1641static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1642{
1643 int ret;
1644
1645 velocity_set_rxbufsize(vptr, mtu);
1646
1647 ret = velocity_init_dma_rings(vptr);
1648 if (ret < 0)
1649 goto out;
1650
1651 ret = velocity_init_rd_ring(vptr);
1652 if (ret < 0)
1653 goto err_free_dma_rings_0;
1654
1655 ret = velocity_init_td_ring(vptr);
1656 if (ret < 0)
1657 goto err_free_rd_ring_1;
1658out:
1659 return ret;
1660
1661err_free_rd_ring_1:
1662 velocity_free_rd_ring(vptr);
1663err_free_dma_rings_0:
1664 velocity_free_dma_rings(vptr);
1665 goto out;
1666}
1667
1668/**
1669 * velocity_free_tx_buf - free transmit buffer
1670 * @vptr: velocity
1671 * @tdinfo: buffer
1672 *
1673 * Release an transmit buffer. If the buffer was preallocated then
1674 * recycle it, if not then unmap the buffer.
1675 */
1676static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
1677{
1678 struct sk_buff *skb = tdinfo->skb;
1679 int i;
1680 int pktlen;
1681
1682 /*
1683 * Don't unmap the pre-allocated tx_bufs
1684 */
1685 if (tdinfo->skb_dma) {
1686
1687 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
1688 for (i = 0; i < tdinfo->nskb_dma; i++) {
1689 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
1690 tdinfo->skb_dma[i] = 0;
1691 }
1692 }
1693 dev_kfree_skb_irq(skb);
1694 tdinfo->skb = NULL;
1695}
1696
1697
1323/* 1698/*
1324 * FIXME: could we merge this with velocity_free_tx_buf ? 1699 * FIXME: could we merge this with velocity_free_tx_buf ?
1325 */ 1700 */
1326
1327static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1701static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1328 int q, int n) 1702 int q, int n)
1329{ 1703{
@@ -1353,7 +1727,6 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1353 * Free up the transmit ring for this particular velocity adapter. 1727 * Free up the transmit ring for this particular velocity adapter.
1354 * We free the ring contents but not the ring itself. 1728 * We free the ring contents but not the ring itself.
1355 */ 1729 */
1356
1357static void velocity_free_td_ring(struct velocity_info *vptr) 1730static void velocity_free_td_ring(struct velocity_info *vptr)
1358{ 1731{
1359 int i, j; 1732 int i, j;
@@ -1369,61 +1742,167 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1369 } 1742 }
1370} 1743}
1371 1744
1745
1746static void velocity_free_rings(struct velocity_info *vptr)
1747{
1748 velocity_free_td_ring(vptr);
1749 velocity_free_rd_ring(vptr);
1750 velocity_free_dma_rings(vptr);
1751}
1752
1372/** 1753/**
1373 * velocity_rx_srv - service RX interrupt 1754 * velocity_error - handle error from controller
1374 * @vptr: velocity 1755 * @vptr: velocity
1375 * @status: adapter status (unused) 1756 * @status: card status
1757 *
1758 * Process an error report from the hardware and attempt to recover
1759 * the card itself. At the moment we cannot recover from some
1760 * theoretically impossible errors but this could be fixed using
1761 * the pci_device_failed logic to bounce the hardware
1376 * 1762 *
1377 * Walk the receive ring of the velocity adapter and remove
1378 * any received packets from the receive queue. Hand the ring
1379 * slots back to the adapter for reuse.
1380 */ 1763 */
1381 1764static void velocity_error(struct velocity_info *vptr, int status)
1382static int velocity_rx_srv(struct velocity_info *vptr, int status)
1383{ 1765{
1384 struct net_device_stats *stats = &vptr->dev->stats;
1385 int rd_curr = vptr->rx.curr;
1386 int works = 0;
1387 1766
1388 do { 1767 if (status & ISR_TXSTLI) {
1389 struct rx_desc *rd = vptr->rx.ring + rd_curr; 1768 struct mac_regs __iomem *regs = vptr->mac_regs;
1390 1769
1391 if (!vptr->rx.info[rd_curr].skb) 1770 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1392 break; 1771 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1772 writew(TRDCSR_RUN, &regs->TDCSRClr);
1773 netif_stop_queue(vptr->dev);
1393 1774
1394 if (rd->rdesc0.len & OWNED_BY_NIC) 1775 /* FIXME: port over the pci_device_failed code and use it
1395 break; 1776 here */
1777 }
1396 1778
1397 rmb(); 1779 if (status & ISR_SRCI) {
1780 struct mac_regs __iomem *regs = vptr->mac_regs;
1781 int linked;
1782
1783 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1784 vptr->mii_status = check_connection_type(regs);
1398 1785
1786 /*
1787 * If it is a 3119, disable frame bursting in
1788 * halfduplex mode and enable it in fullduplex
1789 * mode
1790 */
1791 if (vptr->rev_id < REV_ID_VT3216_A0) {
1792 if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
1793 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1794 else
1795 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1796 }
1797 /*
1798 * Only enable CD heart beat counter in 10HD mode
1799 */
1800 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1801 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1802 else
1803 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1804 }
1399 /* 1805 /*
1400 * Don't drop CE or RL error frame although RXOK is off 1806 * Get link status from PHYSR0
1401 */ 1807 */
1402 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { 1808 linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1403 if (velocity_receive_frame(vptr, rd_curr) < 0)
1404 stats->rx_dropped++;
1405 } else {
1406 if (rd->rdesc0.RSR & RSR_CRC)
1407 stats->rx_crc_errors++;
1408 if (rd->rdesc0.RSR & RSR_FAE)
1409 stats->rx_frame_errors++;
1410 1809
1411 stats->rx_dropped++; 1810 if (linked) {
1811 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1812 netif_carrier_on(vptr->dev);
1813 } else {
1814 vptr->mii_status |= VELOCITY_LINK_FAIL;
1815 netif_carrier_off(vptr->dev);
1412 } 1816 }
1413 1817
1414 rd->size |= RX_INTEN; 1818 velocity_print_link_status(vptr);
1819 enable_flow_control_ability(vptr);
1415 1820
1416 rd_curr++; 1821 /*
1417 if (rd_curr >= vptr->options.numrx) 1822 * Re-enable auto-polling because SRCI will disable
1418 rd_curr = 0; 1823 * auto-polling
1419 } while (++works <= 15); 1824 */
1420 1825
1421 vptr->rx.curr = rd_curr; 1826 enable_mii_autopoll(regs);
1422 1827
1423 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1828 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1424 velocity_give_many_rx_descs(vptr); 1829 netif_stop_queue(vptr->dev);
1830 else
1831 netif_wake_queue(vptr->dev);
1425 1832
1426 VAR_USED(stats); 1833 };
1834 if (status & ISR_MIBFI)
1835 velocity_update_hw_mibs(vptr);
1836 if (status & ISR_LSTEI)
1837 mac_rx_queue_wake(vptr->mac_regs);
1838}
1839
1840/**
1841 * tx_srv - transmit interrupt service
1842 * @vptr; Velocity
1843 * @status:
1844 *
1845 * Scan the queues looking for transmitted packets that
1846 * we can complete and clean up. Update any statistics as
1847 * necessary/
1848 */
1849static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1850{
1851 struct tx_desc *td;
1852 int qnum;
1853 int full = 0;
1854 int idx;
1855 int works = 0;
1856 struct velocity_td_info *tdinfo;
1857 struct net_device_stats *stats = &vptr->dev->stats;
1858
1859 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1860 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1861 idx = (idx + 1) % vptr->options.numtx) {
1862
1863 /*
1864 * Get Tx Descriptor
1865 */
1866 td = &(vptr->tx.rings[qnum][idx]);
1867 tdinfo = &(vptr->tx.infos[qnum][idx]);
1868
1869 if (td->tdesc0.len & OWNED_BY_NIC)
1870 break;
1871
1872 if ((works++ > 15))
1873 break;
1874
1875 if (td->tdesc0.TSR & TSR0_TERR) {
1876 stats->tx_errors++;
1877 stats->tx_dropped++;
1878 if (td->tdesc0.TSR & TSR0_CDH)
1879 stats->tx_heartbeat_errors++;
1880 if (td->tdesc0.TSR & TSR0_CRS)
1881 stats->tx_carrier_errors++;
1882 if (td->tdesc0.TSR & TSR0_ABT)
1883 stats->tx_aborted_errors++;
1884 if (td->tdesc0.TSR & TSR0_OWC)
1885 stats->tx_window_errors++;
1886 } else {
1887 stats->tx_packets++;
1888 stats->tx_bytes += tdinfo->skb->len;
1889 }
1890 velocity_free_tx_buf(vptr, tdinfo);
1891 vptr->tx.used[qnum]--;
1892 }
1893 vptr->tx.tail[qnum] = idx;
1894
1895 if (AVAIL_TD(vptr, qnum) < 1)
1896 full = 1;
1897 }
1898 /*
1899 * Look to see if we should kick the transmit network
1900 * layer for more work.
1901 */
1902 if (netif_queue_stopped(vptr->dev) && (full == 0)
1903 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1904 netif_wake_queue(vptr->dev);
1905 }
1427 return works; 1906 return works;
1428} 1907}
1429 1908
@@ -1435,7 +1914,6 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1435 * Process the status bits for the received packet and determine 1914 * Process the status bits for the received packet and determine
1436 * if the checksum was computed and verified by the hardware 1915 * if the checksum was computed and verified by the hardware
1437 */ 1916 */
1438
1439static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) 1917static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1440{ 1918{
1441 skb->ip_summed = CHECKSUM_NONE; 1919 skb->ip_summed = CHECKSUM_NONE;
@@ -1502,6 +1980,7 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
1502 } 1980 }
1503} 1981}
1504 1982
1983
1505/** 1984/**
1506 * velocity_receive_frame - received packet processor 1985 * velocity_receive_frame - received packet processor
1507 * @vptr: velocity we are handling 1986 * @vptr: velocity we are handling
@@ -1510,7 +1989,6 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
1510 * A packet has arrived. We process the packet and if appropriate 1989 * A packet has arrived. We process the packet and if appropriate
1511 * pass the frame up the network stack 1990 * pass the frame up the network stack
1512 */ 1991 */
1513
1514static int velocity_receive_frame(struct velocity_info *vptr, int idx) 1992static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1515{ 1993{
1516 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1994 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
@@ -1572,314 +2050,118 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1572 return 0; 2050 return 0;
1573} 2051}
1574 2052
2053
1575/** 2054/**
1576 * velocity_alloc_rx_buf - allocate aligned receive buffer 2055 * velocity_rx_srv - service RX interrupt
1577 * @vptr: velocity 2056 * @vptr: velocity
1578 * @idx: ring index 2057 * @status: adapter status (unused)
1579 * 2058 *
1580 * Allocate a new full sized buffer for the reception of a frame and 2059 * Walk the receive ring of the velocity adapter and remove
1581 * map it into PCI space for the hardware to use. The hardware 2060 * any received packets from the receive queue. Hand the ring
1582 * requires *64* byte alignment of the buffer which makes life 2061 * slots back to the adapter for reuse.
1583 * less fun than would be ideal.
1584 */ 2062 */
1585 2063static int velocity_rx_srv(struct velocity_info *vptr, int status)
1586static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1587{ 2064{
1588 struct rx_desc *rd = &(vptr->rx.ring[idx]); 2065 struct net_device_stats *stats = &vptr->dev->stats;
1589 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 2066 int rd_curr = vptr->rx.curr;
1590 2067 int works = 0;
1591 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1592 if (rd_info->skb == NULL)
1593 return -ENOMEM;
1594 2068
1595 /* 2069 do {
1596 * Do the gymnastics to get the buffer head for data at 2070 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1597 * 64byte alignment.
1598 */
1599 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1600 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1601 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1602 2071
1603 /* 2072 if (!vptr->rx.info[rd_curr].skb)
1604 * Fill in the descriptor to match 2073 break;
1605 */
1606 2074
1607 *((u32 *) & (rd->rdesc0)) = 0; 2075 if (rd->rdesc0.len & OWNED_BY_NIC)
1608 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; 2076 break;
1609 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1610 rd->pa_high = 0;
1611 return 0;
1612}
1613 2077
1614/** 2078 rmb();
1615 * tx_srv - transmit interrupt service
1616 * @vptr; Velocity
1617 * @status:
1618 *
1619 * Scan the queues looking for transmitted packets that
1620 * we can complete and clean up. Update any statistics as
1621 * necessary/
1622 */
1623 2079
1624static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 2080 /*
1625{ 2081 * Don't drop CE or RL error frame although RXOK is off
1626 struct tx_desc *td; 2082 */
1627 int qnum; 2083 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
1628 int full = 0; 2084 if (velocity_receive_frame(vptr, rd_curr) < 0)
1629 int idx; 2085 stats->rx_dropped++;
1630 int works = 0; 2086 } else {
1631 struct velocity_td_info *tdinfo; 2087 if (rd->rdesc0.RSR & RSR_CRC)
1632 struct net_device_stats *stats = &vptr->dev->stats; 2088 stats->rx_crc_errors++;
2089 if (rd->rdesc0.RSR & RSR_FAE)
2090 stats->rx_frame_errors++;
1633 2091
1634 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { 2092 stats->rx_dropped++;
1635 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; 2093 }
1636 idx = (idx + 1) % vptr->options.numtx) {
1637 2094
1638 /* 2095 rd->size |= RX_INTEN;
1639 * Get Tx Descriptor
1640 */
1641 td = &(vptr->tx.rings[qnum][idx]);
1642 tdinfo = &(vptr->tx.infos[qnum][idx]);
1643 2096
1644 if (td->tdesc0.len & OWNED_BY_NIC) 2097 rd_curr++;
1645 break; 2098 if (rd_curr >= vptr->options.numrx)
2099 rd_curr = 0;
2100 } while (++works <= 15);
1646 2101
1647 if ((works++ > 15)) 2102 vptr->rx.curr = rd_curr;
1648 break;
1649 2103
1650 if (td->tdesc0.TSR & TSR0_TERR) { 2104 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1651 stats->tx_errors++; 2105 velocity_give_many_rx_descs(vptr);
1652 stats->tx_dropped++;
1653 if (td->tdesc0.TSR & TSR0_CDH)
1654 stats->tx_heartbeat_errors++;
1655 if (td->tdesc0.TSR & TSR0_CRS)
1656 stats->tx_carrier_errors++;
1657 if (td->tdesc0.TSR & TSR0_ABT)
1658 stats->tx_aborted_errors++;
1659 if (td->tdesc0.TSR & TSR0_OWC)
1660 stats->tx_window_errors++;
1661 } else {
1662 stats->tx_packets++;
1663 stats->tx_bytes += tdinfo->skb->len;
1664 }
1665 velocity_free_tx_buf(vptr, tdinfo);
1666 vptr->tx.used[qnum]--;
1667 }
1668 vptr->tx.tail[qnum] = idx;
1669 2106
1670 if (AVAIL_TD(vptr, qnum) < 1) 2107 VAR_USED(stats);
1671 full = 1;
1672 }
1673 /*
1674 * Look to see if we should kick the transmit network
1675 * layer for more work.
1676 */
1677 if (netif_queue_stopped(vptr->dev) && (full == 0)
1678 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1679 netif_wake_queue(vptr->dev);
1680 }
1681 return works; 2108 return works;
1682} 2109}
1683 2110
1684/**
1685 * velocity_print_link_status - link status reporting
1686 * @vptr: velocity to report on
1687 *
1688 * Turn the link status of the velocity card into a kernel log
1689 * description of the new link state, detailing speed and duplex
1690 * status
1691 */
1692
1693static void velocity_print_link_status(struct velocity_info *vptr)
1694{
1695
1696 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1697 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1698 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1699 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1700
1701 if (vptr->mii_status & VELOCITY_SPEED_1000)
1702 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1703 else if (vptr->mii_status & VELOCITY_SPEED_100)
1704 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1705 else
1706 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1707
1708 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1709 VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1710 else
1711 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1712 } else {
1713 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1714 switch (vptr->options.spd_dpx) {
1715 case SPD_DPX_100_HALF:
1716 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1717 break;
1718 case SPD_DPX_100_FULL:
1719 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1720 break;
1721 case SPD_DPX_10_HALF:
1722 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1723 break;
1724 case SPD_DPX_10_FULL:
1725 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1726 break;
1727 default:
1728 break;
1729 }
1730 }
1731}
1732 2111
1733/** 2112/**
1734 * velocity_error - handle error from controller 2113 * velocity_intr - interrupt callback
1735 * @vptr: velocity 2114 * @irq: interrupt number
1736 * @status: card status 2115 * @dev_instance: interrupting device
1737 *
1738 * Process an error report from the hardware and attempt to recover
1739 * the card itself. At the moment we cannot recover from some
1740 * theoretically impossible errors but this could be fixed using
1741 * the pci_device_failed logic to bounce the hardware
1742 * 2116 *
2117 * Called whenever an interrupt is generated by the velocity
2118 * adapter IRQ line. We may not be the source of the interrupt
2119 * and need to identify initially if we are, and if not exit as
2120 * efficiently as possible.
1743 */ 2121 */
1744 2122static irqreturn_t velocity_intr(int irq, void *dev_instance)
1745static void velocity_error(struct velocity_info *vptr, int status)
1746{ 2123{
2124 struct net_device *dev = dev_instance;
2125 struct velocity_info *vptr = netdev_priv(dev);
2126 u32 isr_status;
2127 int max_count = 0;
1747 2128
1748 if (status & ISR_TXSTLI) {
1749 struct mac_regs __iomem *regs = vptr->mac_regs;
1750 2129
1751 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0])); 2130 spin_lock(&vptr->lock);
1752 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR); 2131 isr_status = mac_read_isr(vptr->mac_regs);
1753 writew(TRDCSR_RUN, &regs->TDCSRClr);
1754 netif_stop_queue(vptr->dev);
1755 2132
1756 /* FIXME: port over the pci_device_failed code and use it 2133 /* Not us ? */
1757 here */ 2134 if (isr_status == 0) {
2135 spin_unlock(&vptr->lock);
2136 return IRQ_NONE;
1758 } 2137 }
1759 2138
1760 if (status & ISR_SRCI) { 2139 mac_disable_int(vptr->mac_regs);
1761 struct mac_regs __iomem *regs = vptr->mac_regs;
1762 int linked;
1763
1764 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1765 vptr->mii_status = check_connection_type(regs);
1766
1767 /*
1768 * If it is a 3119, disable frame bursting in
1769 * halfduplex mode and enable it in fullduplex
1770 * mode
1771 */
1772 if (vptr->rev_id < REV_ID_VT3216_A0) {
1773 if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
1774 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1775 else
1776 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1777 }
1778 /*
1779 * Only enable CD heart beat counter in 10HD mode
1780 */
1781 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1782 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1783 else
1784 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1785 }
1786 /*
1787 * Get link status from PHYSR0
1788 */
1789 linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1790
1791 if (linked) {
1792 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1793 netif_carrier_on(vptr->dev);
1794 } else {
1795 vptr->mii_status |= VELOCITY_LINK_FAIL;
1796 netif_carrier_off(vptr->dev);
1797 }
1798
1799 velocity_print_link_status(vptr);
1800 enable_flow_control_ability(vptr);
1801
1802 /*
1803 * Re-enable auto-polling because SRCI will disable
1804 * auto-polling
1805 */
1806
1807 enable_mii_autopoll(regs);
1808
1809 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1810 netif_stop_queue(vptr->dev);
1811 else
1812 netif_wake_queue(vptr->dev);
1813
1814 };
1815 if (status & ISR_MIBFI)
1816 velocity_update_hw_mibs(vptr);
1817 if (status & ISR_LSTEI)
1818 mac_rx_queue_wake(vptr->mac_regs);
1819}
1820
1821/**
1822 * velocity_free_tx_buf - free transmit buffer
1823 * @vptr: velocity
1824 * @tdinfo: buffer
1825 *
1826 * Release an transmit buffer. If the buffer was preallocated then
1827 * recycle it, if not then unmap the buffer.
1828 */
1829
1830static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
1831{
1832 struct sk_buff *skb = tdinfo->skb;
1833 int i;
1834 int pktlen;
1835 2140
1836 /* 2141 /*
1837 * Don't unmap the pre-allocated tx_bufs 2142 * Keep processing the ISR until we have completed
2143 * processing and the isr_status becomes zero
1838 */ 2144 */
1839 if (tdinfo->skb_dma) {
1840 2145
1841 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); 2146 while (isr_status != 0) {
1842 for (i = 0; i < tdinfo->nskb_dma; i++) { 2147 mac_write_isr(vptr->mac_regs, isr_status);
1843 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); 2148 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
1844 tdinfo->skb_dma[i] = 0; 2149 velocity_error(vptr, isr_status);
2150 if (isr_status & (ISR_PRXI | ISR_PPRXI))
2151 max_count += velocity_rx_srv(vptr, isr_status);
2152 if (isr_status & (ISR_PTXI | ISR_PPTXI))
2153 max_count += velocity_tx_srv(vptr, isr_status);
2154 isr_status = mac_read_isr(vptr->mac_regs);
2155 if (max_count > vptr->options.int_works) {
2156 printk(KERN_WARNING "%s: excessive work at interrupt.\n",
2157 dev->name);
2158 max_count = 0;
1845 } 2159 }
1846 } 2160 }
1847 dev_kfree_skb_irq(skb); 2161 spin_unlock(&vptr->lock);
1848 tdinfo->skb = NULL; 2162 mac_enable_int(vptr->mac_regs);
1849} 2163 return IRQ_HANDLED;
1850
1851static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1852{
1853 int ret;
1854
1855 velocity_set_rxbufsize(vptr, mtu);
1856
1857 ret = velocity_init_dma_rings(vptr);
1858 if (ret < 0)
1859 goto out;
1860
1861 ret = velocity_init_rd_ring(vptr);
1862 if (ret < 0)
1863 goto err_free_dma_rings_0;
1864
1865 ret = velocity_init_td_ring(vptr);
1866 if (ret < 0)
1867 goto err_free_rd_ring_1;
1868out:
1869 return ret;
1870 2164
1871err_free_rd_ring_1:
1872 velocity_free_rd_ring(vptr);
1873err_free_dma_rings_0:
1874 velocity_free_dma_rings(vptr);
1875 goto out;
1876}
1877
1878static void velocity_free_rings(struct velocity_info *vptr)
1879{
1880 velocity_free_td_ring(vptr);
1881 velocity_free_rd_ring(vptr);
1882 velocity_free_dma_rings(vptr);
1883} 2165}
1884 2166
1885/** 2167/**
@@ -1892,7 +2174,6 @@ static void velocity_free_rings(struct velocity_info *vptr)
1892 * All the ring allocation and set up is done on open for this 2174 * All the ring allocation and set up is done on open for this
1893 * adapter to minimise memory usage when inactive 2175 * adapter to minimise memory usage when inactive
1894 */ 2176 */
1895
1896static int velocity_open(struct net_device *dev) 2177static int velocity_open(struct net_device *dev)
1897{ 2178{
1898 struct velocity_info *vptr = netdev_priv(dev); 2179 struct velocity_info *vptr = netdev_priv(dev);
@@ -1926,6 +2207,24 @@ out:
1926} 2207}
1927 2208
1928/** 2209/**
2210 * velocity_shutdown - shut down the chip
2211 * @vptr: velocity to deactivate
2212 *
2213 * Shuts down the internal operations of the velocity and
2214 * disables interrupts, autopolling, transmit and receive
2215 */
2216static void velocity_shutdown(struct velocity_info *vptr)
2217{
2218 struct mac_regs __iomem *regs = vptr->mac_regs;
2219 mac_disable_int(regs);
2220 writel(CR0_STOP, &regs->CR0Set);
2221 writew(0xFFFF, &regs->TDCSRClr);
2222 writeb(0xFF, &regs->RDCSRClr);
2223 safe_disable_mii_autopoll(regs);
2224 mac_clear_isr(regs);
2225}
2226
2227/**
1929 * velocity_change_mtu - MTU change callback 2228 * velocity_change_mtu - MTU change callback
1930 * @dev: network device 2229 * @dev: network device
1931 * @new_mtu: desired MTU 2230 * @new_mtu: desired MTU
@@ -1934,7 +2233,6 @@ out:
1934 * this interface. It gets called on a change by the network layer. 2233 * this interface. It gets called on a change by the network layer.
1935 * Return zero for success or negative posix error code. 2234 * Return zero for success or negative posix error code.
1936 */ 2235 */
1937
1938static int velocity_change_mtu(struct net_device *dev, int new_mtu) 2236static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1939{ 2237{
1940 struct velocity_info *vptr = netdev_priv(dev); 2238 struct velocity_info *vptr = netdev_priv(dev);
@@ -2008,22 +2306,127 @@ out_0:
2008} 2306}
2009 2307
2010/** 2308/**
2011 * velocity_shutdown - shut down the chip 2309 * velocity_mii_ioctl - MII ioctl handler
2012 * @vptr: velocity to deactivate 2310 * @dev: network device
2311 * @ifr: the ifreq block for the ioctl
2312 * @cmd: the command
2013 * 2313 *
2014 * Shuts down the internal operations of the velocity and 2314 * Process MII requests made via ioctl from the network layer. These
2015 * disables interrupts, autopolling, transmit and receive 2315 * are used by tools like kudzu to interrogate the link state of the
2316 * hardware
2016 */ 2317 */
2017 2318static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2018static void velocity_shutdown(struct velocity_info *vptr)
2019{ 2319{
2320 struct velocity_info *vptr = netdev_priv(dev);
2020 struct mac_regs __iomem *regs = vptr->mac_regs; 2321 struct mac_regs __iomem *regs = vptr->mac_regs;
2021 mac_disable_int(regs); 2322 unsigned long flags;
2022 writel(CR0_STOP, &regs->CR0Set); 2323 struct mii_ioctl_data *miidata = if_mii(ifr);
2023 writew(0xFFFF, &regs->TDCSRClr); 2324 int err;
2024 writeb(0xFF, &regs->RDCSRClr); 2325
2025 safe_disable_mii_autopoll(regs); 2326 switch (cmd) {
2026 mac_clear_isr(regs); 2327 case SIOCGMIIPHY:
2328 miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2329 break;
2330 case SIOCGMIIREG:
2331 if (!capable(CAP_NET_ADMIN))
2332 return -EPERM;
2333 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2334 return -ETIMEDOUT;
2335 break;
2336 case SIOCSMIIREG:
2337 if (!capable(CAP_NET_ADMIN))
2338 return -EPERM;
2339 spin_lock_irqsave(&vptr->lock, flags);
2340 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2341 spin_unlock_irqrestore(&vptr->lock, flags);
2342 check_connection_type(vptr->mac_regs);
2343 if (err)
2344 return err;
2345 break;
2346 default:
2347 return -EOPNOTSUPP;
2348 }
2349 return 0;
2350}
2351
2352
2353/**
2354 * velocity_ioctl - ioctl entry point
2355 * @dev: network device
2356 * @rq: interface request ioctl
2357 * @cmd: command code
2358 *
2359 * Called when the user issues an ioctl request to the network
2360 * device in question. The velocity interface supports MII.
2361 */
2362static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2363{
2364 struct velocity_info *vptr = netdev_priv(dev);
2365 int ret;
2366
2367 /* If we are asked for information and the device is power
2368 saving then we need to bring the device back up to talk to it */
2369
2370 if (!netif_running(dev))
2371 pci_set_power_state(vptr->pdev, PCI_D0);
2372
2373 switch (cmd) {
2374 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2375 case SIOCGMIIREG: /* Read MII PHY register. */
2376 case SIOCSMIIREG: /* Write to MII PHY register. */
2377 ret = velocity_mii_ioctl(dev, rq, cmd);
2378 break;
2379
2380 default:
2381 ret = -EOPNOTSUPP;
2382 }
2383 if (!netif_running(dev))
2384 pci_set_power_state(vptr->pdev, PCI_D3hot);
2385
2386
2387 return ret;
2388}
2389
2390/**
2391 * velocity_get_status - statistics callback
2392 * @dev: network device
2393 *
2394 * Callback from the network layer to allow driver statistics
2395 * to be resynchronized with hardware collected state. In the
2396 * case of the velocity we need to pull the MIB counters from
2397 * the hardware into the counters before letting the network
2398 * layer display them.
2399 */
2400static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2401{
2402 struct velocity_info *vptr = netdev_priv(dev);
2403
2404 /* If the hardware is down, don't touch MII */
2405 if (!netif_running(dev))
2406 return &dev->stats;
2407
2408 spin_lock_irq(&vptr->lock);
2409 velocity_update_hw_mibs(vptr);
2410 spin_unlock_irq(&vptr->lock);
2411
2412 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2413 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2414 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2415
2416// unsigned long rx_dropped; /* no space in linux buffers */
2417 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2418 /* detailed rx_errors: */
2419// unsigned long rx_length_errors;
2420// unsigned long rx_over_errors; /* receiver ring buff overflow */
2421 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2422// unsigned long rx_frame_errors; /* recv'd frame alignment error */
2423// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2424// unsigned long rx_missed_errors; /* receiver missed packet */
2425
2426 /* detailed tx_errors */
2427// unsigned long tx_fifo_errors;
2428
2429 return &dev->stats;
2027} 2430}
2028 2431
2029/** 2432/**
@@ -2033,7 +2436,6 @@ static void velocity_shutdown(struct velocity_info *vptr)
2033 * Callback from the network layer when the velocity is being 2436 * Callback from the network layer when the velocity is being
2034 * deactivated by the network layer 2437 * deactivated by the network layer
2035 */ 2438 */
2036
2037static int velocity_close(struct net_device *dev) 2439static int velocity_close(struct net_device *dev)
2038{ 2440{
2039 struct velocity_info *vptr = netdev_priv(dev); 2441 struct velocity_info *vptr = netdev_priv(dev);
@@ -2063,7 +2465,6 @@ static int velocity_close(struct net_device *dev)
2063 * Called by the networ layer to request a packet is queued to 2465 * Called by the networ layer to request a packet is queued to
2064 * the velocity. Returns zero on success. 2466 * the velocity. Returns zero on success.
2065 */ 2467 */
2066
2067static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) 2468static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2068{ 2469{
2069 struct velocity_info *vptr = netdev_priv(dev); 2470 struct velocity_info *vptr = netdev_priv(dev);
@@ -2075,7 +2476,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2075 __le16 len; 2476 __le16 len;
2076 int index; 2477 int index;
2077 2478
2078
2079 if (skb_padto(skb, ETH_ZLEN)) 2479 if (skb_padto(skb, ETH_ZLEN))
2080 goto out; 2480 goto out;
2081 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); 2481 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
@@ -2145,780 +2545,533 @@ out:
2145 return NETDEV_TX_OK; 2545 return NETDEV_TX_OK;
2146} 2546}
2147 2547
2548
2549static const struct net_device_ops velocity_netdev_ops = {
2550 .ndo_open = velocity_open,
2551 .ndo_stop = velocity_close,
2552 .ndo_start_xmit = velocity_xmit,
2553 .ndo_get_stats = velocity_get_stats,
2554 .ndo_validate_addr = eth_validate_addr,
2555 .ndo_set_mac_address = eth_mac_addr,
2556 .ndo_set_multicast_list = velocity_set_multi,
2557 .ndo_change_mtu = velocity_change_mtu,
2558 .ndo_do_ioctl = velocity_ioctl,
2559 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2560 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2561 .ndo_vlan_rx_register = velocity_vlan_rx_register,
2562};
2563
2148/** 2564/**
2149 * velocity_intr - interrupt callback 2565 * velocity_init_info - init private data
2150 * @irq: interrupt number 2566 * @pdev: PCI device
2151 * @dev_instance: interrupting device 2567 * @vptr: Velocity info
2568 * @info: Board type
2152 * 2569 *
2153 * Called whenever an interrupt is generated by the velocity 2570 * Set up the initial velocity_info struct for the device that has been
2154 * adapter IRQ line. We may not be the source of the interrupt 2571 * discovered.
2155 * and need to identify initially if we are, and if not exit as
2156 * efficiently as possible.
2157 */ 2572 */
2158 2573static void __devinit velocity_init_info(struct pci_dev *pdev,
2159static irqreturn_t velocity_intr(int irq, void *dev_instance) 2574 struct velocity_info *vptr,
2575 const struct velocity_info_tbl *info)
2160{ 2576{
2161 struct net_device *dev = dev_instance; 2577 memset(vptr, 0, sizeof(struct velocity_info));
2162 struct velocity_info *vptr = netdev_priv(dev);
2163 u32 isr_status;
2164 int max_count = 0;
2165
2166
2167 spin_lock(&vptr->lock);
2168 isr_status = mac_read_isr(vptr->mac_regs);
2169
2170 /* Not us ? */
2171 if (isr_status == 0) {
2172 spin_unlock(&vptr->lock);
2173 return IRQ_NONE;
2174 }
2175
2176 mac_disable_int(vptr->mac_regs);
2177
2178 /*
2179 * Keep processing the ISR until we have completed
2180 * processing and the isr_status becomes zero
2181 */
2182
2183 while (isr_status != 0) {
2184 mac_write_isr(vptr->mac_regs, isr_status);
2185 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2186 velocity_error(vptr, isr_status);
2187 if (isr_status & (ISR_PRXI | ISR_PPRXI))
2188 max_count += velocity_rx_srv(vptr, isr_status);
2189 if (isr_status & (ISR_PTXI | ISR_PPTXI))
2190 max_count += velocity_tx_srv(vptr, isr_status);
2191 isr_status = mac_read_isr(vptr->mac_regs);
2192 if (max_count > vptr->options.int_works) {
2193 printk(KERN_WARNING "%s: excessive work at interrupt.\n",
2194 dev->name);
2195 max_count = 0;
2196 }
2197 }
2198 spin_unlock(&vptr->lock);
2199 mac_enable_int(vptr->mac_regs);
2200 return IRQ_HANDLED;
2201 2578
2579 vptr->pdev = pdev;
2580 vptr->chip_id = info->chip_id;
2581 vptr->tx.numq = info->txqueue;
2582 vptr->multicast_limit = MCAM_SIZE;
2583 spin_lock_init(&vptr->lock);
2584 INIT_LIST_HEAD(&vptr->list);
2202} 2585}
2203 2586
2204
2205/** 2587/**
2206 * velocity_set_multi - filter list change callback 2588 * velocity_get_pci_info - retrieve PCI info for device
2207 * @dev: network device 2589 * @vptr: velocity device
2590 * @pdev: PCI device it matches
2208 * 2591 *
2209 * Called by the network layer when the filter lists need to change 2592 * Retrieve the PCI configuration space data that interests us from
2210 * for a velocity adapter. Reload the CAMs with the new address 2593 * the kernel PCI layer
2211 * filter ruleset.
2212 */ 2594 */
2213 2595static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2214static void velocity_set_multi(struct net_device *dev)
2215{ 2596{
2216 struct velocity_info *vptr = netdev_priv(dev); 2597 vptr->rev_id = pdev->revision;
2217 struct mac_regs __iomem *regs = vptr->mac_regs;
2218 u8 rx_mode;
2219 int i;
2220 struct dev_mc_list *mclist;
2221 2598
2222 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2599 pci_set_master(pdev);
2223 writel(0xffffffff, &regs->MARCAM[0]);
2224 writel(0xffffffff, &regs->MARCAM[4]);
2225 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
2226 } else if ((dev->mc_count > vptr->multicast_limit)
2227 || (dev->flags & IFF_ALLMULTI)) {
2228 writel(0xffffffff, &regs->MARCAM[0]);
2229 writel(0xffffffff, &regs->MARCAM[4]);
2230 rx_mode = (RCR_AM | RCR_AB);
2231 } else {
2232 int offset = MCAM_SIZE - vptr->multicast_limit;
2233 mac_get_cam_mask(regs, vptr->mCAMmask);
2234 2600
2235 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { 2601 vptr->ioaddr = pci_resource_start(pdev, 0);
2236 mac_set_cam(regs, i + offset, mclist->dmi_addr); 2602 vptr->memaddr = pci_resource_start(pdev, 1);
2237 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
2238 }
2239 2603
2240 mac_set_cam_mask(regs, vptr->mCAMmask); 2604 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2241 rx_mode = RCR_AM | RCR_AB | RCR_AP; 2605 dev_err(&pdev->dev,
2606 "region #0 is not an I/O resource, aborting.\n");
2607 return -EINVAL;
2242 } 2608 }
2243 if (dev->mtu > 1500)
2244 rx_mode |= RCR_AL;
2245 2609
2246 BYTE_REG_BITS_ON(rx_mode, &regs->RCR); 2610 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2611 dev_err(&pdev->dev,
2612 "region #1 is an I/O resource, aborting.\n");
2613 return -EINVAL;
2614 }
2615
2616 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2617 dev_err(&pdev->dev, "region #1 is too small.\n");
2618 return -EINVAL;
2619 }
2620 vptr->pdev = pdev;
2247 2621
2622 return 0;
2248} 2623}
2249 2624
2250/** 2625/**
2251 * velocity_get_status - statistics callback 2626 * velocity_print_info - per driver data
2252 * @dev: network device 2627 * @vptr: velocity
2253 * 2628 *
2254 * Callback from the network layer to allow driver statistics 2629 * Print per driver data as the kernel driver finds Velocity
2255 * to be resynchronized with hardware collected state. In the 2630 * hardware
2256 * case of the velocity we need to pull the MIB counters from
2257 * the hardware into the counters before letting the network
2258 * layer display them.
2259 */ 2631 */
2260 2632static void __devinit velocity_print_info(struct velocity_info *vptr)
2261static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2262{ 2633{
2263 struct velocity_info *vptr = netdev_priv(dev); 2634 struct net_device *dev = vptr->dev;
2264
2265 /* If the hardware is down, don't touch MII */
2266 if (!netif_running(dev))
2267 return &dev->stats;
2268
2269 spin_lock_irq(&vptr->lock);
2270 velocity_update_hw_mibs(vptr);
2271 spin_unlock_irq(&vptr->lock);
2272
2273 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2274 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2275 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2276
2277// unsigned long rx_dropped; /* no space in linux buffers */
2278 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2279 /* detailed rx_errors: */
2280// unsigned long rx_length_errors;
2281// unsigned long rx_over_errors; /* receiver ring buff overflow */
2282 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2283// unsigned long rx_frame_errors; /* recv'd frame alignment error */
2284// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2285// unsigned long rx_missed_errors; /* receiver missed packet */
2286
2287 /* detailed tx_errors */
2288// unsigned long tx_fifo_errors;
2289 2635
2290 return &dev->stats; 2636 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2637 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
2638 dev->name,
2639 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2640 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2291} 2641}
2292 2642
2293 2643static u32 velocity_get_link(struct net_device *dev)
2294/**
2295 * velocity_ioctl - ioctl entry point
2296 * @dev: network device
2297 * @rq: interface request ioctl
2298 * @cmd: command code
2299 *
2300 * Called when the user issues an ioctl request to the network
2301 * device in question. The velocity interface supports MII.
2302 */
2303
2304static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2305{ 2644{
2306 struct velocity_info *vptr = netdev_priv(dev); 2645 struct velocity_info *vptr = netdev_priv(dev);
2307 int ret; 2646 struct mac_regs __iomem *regs = vptr->mac_regs;
2308 2647 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2309 /* If we are asked for information and the device is power
2310 saving then we need to bring the device back up to talk to it */
2311
2312 if (!netif_running(dev))
2313 pci_set_power_state(vptr->pdev, PCI_D0);
2314
2315 switch (cmd) {
2316 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2317 case SIOCGMIIREG: /* Read MII PHY register. */
2318 case SIOCSMIIREG: /* Write to MII PHY register. */
2319 ret = velocity_mii_ioctl(dev, rq, cmd);
2320 break;
2321
2322 default:
2323 ret = -EOPNOTSUPP;
2324 }
2325 if (!netif_running(dev))
2326 pci_set_power_state(vptr->pdev, PCI_D3hot);
2327
2328
2329 return ret;
2330} 2648}
2331 2649
2332/*
2333 * Definition for our device driver. The PCI layer interface
2334 * uses this to handle all our card discover and plugging
2335 */
2336
2337static struct pci_driver velocity_driver = {
2338 .name = VELOCITY_NAME,
2339 .id_table = velocity_id_table,
2340 .probe = velocity_found1,
2341 .remove = __devexit_p(velocity_remove1),
2342#ifdef CONFIG_PM
2343 .suspend = velocity_suspend,
2344 .resume = velocity_resume,
2345#endif
2346};
2347 2650
2348/** 2651/**
2349 * velocity_init_module - load time function 2652 * velocity_found1 - set up discovered velocity card
2653 * @pdev: PCI device
2654 * @ent: PCI device table entry that matched
2350 * 2655 *
2351 * Called when the velocity module is loaded. The PCI driver 2656 * Configure a discovered adapter from scratch. Return a negative
2352 * is registered with the PCI layer, and in turn will call 2657 * errno error code on failure paths.
2353 * the probe functions for each velocity adapter installed
2354 * in the system.
2355 */ 2658 */
2356 2659static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2357static int __init velocity_init_module(void)
2358{ 2660{
2359 int ret; 2661 static int first = 1;
2662 struct net_device *dev;
2663 int i;
2664 const char *drv_string;
2665 const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2666 struct velocity_info *vptr;
2667 struct mac_regs __iomem *regs;
2668 int ret = -ENOMEM;
2360 2669
2361 velocity_register_notifier(); 2670 /* FIXME: this driver, like almost all other ethernet drivers,
2362 ret = pci_register_driver(&velocity_driver); 2671 * can support more than MAX_UNITS.
2363 if (ret < 0) 2672 */
2364 velocity_unregister_notifier(); 2673 if (velocity_nics >= MAX_UNITS) {
2365 return ret; 2674 dev_notice(&pdev->dev, "already found %d NICs.\n",
2366} 2675 velocity_nics);
2676 return -ENODEV;
2677 }
2367 2678
2368/** 2679 dev = alloc_etherdev(sizeof(struct velocity_info));
2369 * velocity_cleanup - module unload 2680 if (!dev) {
2370 * 2681 dev_err(&pdev->dev, "allocate net device failed.\n");
2371 * When the velocity hardware is unloaded this function is called. 2682 goto out;
2372 * It will clean up the notifiers and the unregister the PCI 2683 }
2373 * driver interface for this hardware. This in turn cleans up
2374 * all discovered interfaces before returning from the function
2375 */
2376 2684
2377static void __exit velocity_cleanup_module(void) 2685 /* Chain it all together */
2378{
2379 velocity_unregister_notifier();
2380 pci_unregister_driver(&velocity_driver);
2381}
2382 2686
2383module_init(velocity_init_module); 2687 SET_NETDEV_DEV(dev, &pdev->dev);
2384module_exit(velocity_cleanup_module); 2688 vptr = netdev_priv(dev);
2385 2689
2386 2690
2387/* 2691 if (first) {
2388 * MII access , media link mode setting functions 2692 printk(KERN_INFO "%s Ver. %s\n",
2389 */ 2693 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2694 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2695 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2696 first = 0;
2697 }
2390 2698
2699 velocity_init_info(pdev, vptr, info);
2391 2700
2392/** 2701 vptr->dev = dev;
2393 * mii_init - set up MII
2394 * @vptr: velocity adapter
2395 * @mii_status: links tatus
2396 *
2397 * Set up the PHY for the current link state.
2398 */
2399 2702
2400static void mii_init(struct velocity_info *vptr, u32 mii_status) 2703 dev->irq = pdev->irq;
2401{
2402 u16 BMCR;
2403 2704
2404 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { 2705 ret = pci_enable_device(pdev);
2405 case PHYID_CICADA_CS8201: 2706 if (ret < 0)
2406 /* 2707 goto err_free_dev;
2407 * Reset to hardware default
2408 */
2409 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2410 /*
2411 * Turn on ECHODIS bit in NWay-forced full mode and turn it
2412 * off it in NWay-forced half mode for NWay-forced v.s.
2413 * legacy-forced issue.
2414 */
2415 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2416 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2417 else
2418 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2419 /*
2420 * Turn on Link/Activity LED enable bit for CIS8201
2421 */
2422 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
2423 break;
2424 case PHYID_VT3216_32BIT:
2425 case PHYID_VT3216_64BIT:
2426 /*
2427 * Reset to hardware default
2428 */
2429 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2430 /*
2431 * Turn on ECHODIS bit in NWay-forced full mode and turn it
2432 * off it in NWay-forced half mode for NWay-forced v.s.
2433 * legacy-forced issue
2434 */
2435 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2436 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2437 else
2438 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2439 break;
2440 2708
2441 case PHYID_MARVELL_1000: 2709 ret = velocity_get_pci_info(vptr, pdev);
2442 case PHYID_MARVELL_1000S: 2710 if (ret < 0) {
2443 /* 2711 /* error message already printed */
2444 * Assert CRS on Transmit 2712 goto err_disable;
2445 */
2446 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
2447 /*
2448 * Reset to hardware default
2449 */
2450 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2451 break;
2452 default:
2453 ;
2454 }
2455 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
2456 if (BMCR & BMCR_ISO) {
2457 BMCR &= ~BMCR_ISO;
2458 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
2459 } 2713 }
2460}
2461 2714
2462/** 2715 ret = pci_request_regions(pdev, VELOCITY_NAME);
2463 * safe_disable_mii_autopoll - autopoll off 2716 if (ret < 0) {
2464 * @regs: velocity registers 2717 dev_err(&pdev->dev, "No PCI resources.\n");
2465 * 2718 goto err_disable;
2466 * Turn off the autopoll and wait for it to disable on the chip
2467 */
2468
2469static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
2470{
2471 u16 ww;
2472
2473 /* turn off MAUTO */
2474 writeb(0, &regs->MIICR);
2475 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2476 udelay(1);
2477 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2478 break;
2479 } 2719 }
2480}
2481 2720
2482/** 2721 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2483 * enable_mii_autopoll - turn on autopolling 2722 if (regs == NULL) {
2484 * @regs: velocity registers 2723 ret = -EIO;
2485 * 2724 goto err_release_res;
2486 * Enable the MII link status autopoll feature on the Velocity 2725 }
2487 * hardware. Wait for it to enable.
2488 */
2489
2490static void enable_mii_autopoll(struct mac_regs __iomem *regs)
2491{
2492 int ii;
2493 2726
2494 writeb(0, &(regs->MIICR)); 2727 vptr->mac_regs = regs;
2495 writeb(MIIADR_SWMPL, &regs->MIIADR);
2496 2728
2497 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { 2729 mac_wol_reset(regs);
2498 udelay(1);
2499 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2500 break;
2501 }
2502 2730
2503 writeb(MIICR_MAUTO, &regs->MIICR); 2731 dev->base_addr = vptr->ioaddr;
2504 2732
2505 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { 2733 for (i = 0; i < 6; i++)
2506 udelay(1); 2734 dev->dev_addr[i] = readb(&regs->PAR[i]);
2507 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2508 break;
2509 }
2510 2735
2511}
2512 2736
2513/** 2737 drv_string = dev_driver_string(&pdev->dev);
2514 * velocity_mii_read - read MII data
2515 * @regs: velocity registers
2516 * @index: MII register index
2517 * @data: buffer for received data
2518 *
2519 * Perform a single read of an MII 16bit register. Returns zero
2520 * on success or -ETIMEDOUT if the PHY did not respond.
2521 */
2522 2738
2523static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data) 2739 velocity_get_options(&vptr->options, velocity_nics, drv_string);
2524{
2525 u16 ww;
2526 2740
2527 /* 2741 /*
2528 * Disable MIICR_MAUTO, so that mii addr can be set normally 2742 * Mask out the options cannot be set to the chip
2529 */ 2743 */
2530 safe_disable_mii_autopoll(regs);
2531
2532 writeb(index, &regs->MIIADR);
2533 2744
2534 BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR); 2745 vptr->options.flags &= info->flags;
2535 2746
2536 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 2747 /*
2537 if (!(readb(&regs->MIICR) & MIICR_RCMD)) 2748 * Enable the chip specified capbilities
2538 break; 2749 */
2539 }
2540 2750
2541 *data = readw(&regs->MIIDATA); 2751 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2542 2752
2543 enable_mii_autopoll(regs); 2753 vptr->wol_opts = vptr->options.wol_opts;
2544 if (ww == W_MAX_TIMEOUT) 2754 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2545 return -ETIMEDOUT;
2546 return 0;
2547}
2548 2755
2549/** 2756 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2550 * velocity_mii_write - write MII data
2551 * @regs: velocity registers
2552 * @index: MII register index
2553 * @data: 16bit data for the MII register
2554 *
2555 * Perform a single write to an MII 16bit register. Returns zero
2556 * on success or -ETIMEDOUT if the PHY did not respond.
2557 */
2558 2757
2559static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data) 2758 dev->irq = pdev->irq;
2560{ 2759 dev->netdev_ops = &velocity_netdev_ops;
2561 u16 ww; 2760 dev->ethtool_ops = &velocity_ethtool_ops;
2562 2761
2563 /* 2762 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2564 * Disable MIICR_MAUTO, so that mii addr can be set normally 2763 NETIF_F_HW_VLAN_RX;
2565 */
2566 safe_disable_mii_autopoll(regs);
2567 2764
2568 /* MII reg offset */ 2765 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2569 writeb(mii_addr, &regs->MIIADR); 2766 dev->features |= NETIF_F_IP_CSUM;
2570 /* set MII data */
2571 writew(data, &regs->MIIDATA);
2572 2767
2573 /* turn on MIICR_WCMD */ 2768 ret = register_netdev(dev);
2574 BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR); 2769 if (ret < 0)
2770 goto err_iounmap;
2575 2771
2576 /* W_MAX_TIMEOUT is the timeout period */ 2772 if (!velocity_get_link(dev)) {
2577 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 2773 netif_carrier_off(dev);
2578 udelay(5); 2774 vptr->mii_status |= VELOCITY_LINK_FAIL;
2579 if (!(readb(&regs->MIICR) & MIICR_WCMD))
2580 break;
2581 } 2775 }
2582 enable_mii_autopoll(regs);
2583 2776
2584 if (ww == W_MAX_TIMEOUT) 2777 velocity_print_info(vptr);
2585 return -ETIMEDOUT; 2778 pci_set_drvdata(pdev, dev);
2586 return 0;
2587}
2588 2779
2589/** 2780 /* and leave the chip powered down */
2590 * velocity_get_opt_media_mode - get media selection
2591 * @vptr: velocity adapter
2592 *
2593 * Get the media mode stored in EEPROM or module options and load
2594 * mii_status accordingly. The requested link state information
2595 * is also returned.
2596 */
2597 2781
2598static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) 2782 pci_set_power_state(pdev, PCI_D3hot);
2599{ 2783#ifdef CONFIG_PM
2600 u32 status = 0; 2784 {
2785 unsigned long flags;
2601 2786
2602 switch (vptr->options.spd_dpx) { 2787 spin_lock_irqsave(&velocity_dev_list_lock, flags);
2603 case SPD_DPX_AUTO: 2788 list_add(&vptr->list, &velocity_dev_list);
2604 status = VELOCITY_AUTONEG_ENABLE; 2789 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
2605 break;
2606 case SPD_DPX_100_FULL:
2607 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
2608 break;
2609 case SPD_DPX_10_FULL:
2610 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
2611 break;
2612 case SPD_DPX_100_HALF:
2613 status = VELOCITY_SPEED_100;
2614 break;
2615 case SPD_DPX_10_HALF:
2616 status = VELOCITY_SPEED_10;
2617 break;
2618 } 2790 }
2619 vptr->mii_status = status; 2791#endif
2620 return status; 2792 velocity_nics++;
2621} 2793out:
2622 2794 return ret;
2623/**
2624 * mii_set_auto_on - autonegotiate on
2625 * @vptr: velocity
2626 *
2627 * Enable autonegotation on this interface
2628 */
2629 2795
2630static void mii_set_auto_on(struct velocity_info *vptr) 2796err_iounmap:
2631{ 2797 iounmap(regs);
2632 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) 2798err_release_res:
2633 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); 2799 pci_release_regions(pdev);
2634 else 2800err_disable:
2635 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); 2801 pci_disable_device(pdev);
2802err_free_dev:
2803 free_netdev(dev);
2804 goto out;
2636} 2805}
2637 2806
2638 2807
2639/* 2808#ifdef CONFIG_PM
2640static void mii_set_auto_off(struct velocity_info *vptr)
2641{
2642 MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2643}
2644*/
2645
2646/** 2809/**
2647 * set_mii_flow_control - flow control setup 2810 * wol_calc_crc - WOL CRC
2648 * @vptr: velocity interface 2811 * @pattern: data pattern
2812 * @mask_pattern: mask
2649 * 2813 *
2650 * Set up the flow control on this interface according to 2814 * Compute the wake on lan crc hashes for the packet header
2651 * the supplied user/eeprom options. 2815 * we are interested in.
2652 */ 2816 */
2653 2817static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2654static void set_mii_flow_control(struct velocity_info *vptr)
2655{ 2818{
2656 /*Enable or Disable PAUSE in ANAR */ 2819 u16 crc = 0xFFFF;
2657 switch (vptr->options.flow_cntl) { 2820 u8 mask;
2658 case FLOW_CNTL_TX: 2821 int i, j;
2659 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2660 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2661 break;
2662 2822
2663 case FLOW_CNTL_RX: 2823 for (i = 0; i < size; i++) {
2664 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 2824 mask = mask_pattern[i];
2665 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2666 break;
2667 2825
2668 case FLOW_CNTL_TX_RX: 2826 /* Skip this loop if the mask equals to zero */
2669 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 2827 if (mask == 0x00)
2670 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 2828 continue;
2671 break;
2672 2829
2673 case FLOW_CNTL_DISABLE: 2830 for (j = 0; j < 8; j++) {
2674 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 2831 if ((mask & 0x01) == 0) {
2675 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 2832 mask >>= 1;
2676 break; 2833 continue;
2677 default: 2834 }
2678 break; 2835 mask >>= 1;
2836 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2837 }
2679 } 2838 }
2839 /* Finally, invert the result once to get the correct data */
2840 crc = ~crc;
2841 return bitrev32(crc) >> 16;
2680} 2842}
2681 2843
2682/** 2844/**
2683 * velocity_set_media_mode - set media mode 2845 * velocity_set_wol - set up for wake on lan
2684 * @mii_status: old MII link state 2846 * @vptr: velocity to set WOL status on
2685 * 2847 *
2686 * Check the media link state and configure the flow control 2848 * Set a card up for wake on lan either by unicast or by
2687 * PHY and also velocity hardware setup accordingly. In particular 2849 * ARP packet.
2688 * we need to set up CD polling and frame bursting. 2850 *
2851 * FIXME: check static buffer is safe here
2689 */ 2852 */
2690 2853static int velocity_set_wol(struct velocity_info *vptr)
2691static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
2692{ 2854{
2693 u32 curr_status;
2694 struct mac_regs __iomem *regs = vptr->mac_regs; 2855 struct mac_regs __iomem *regs = vptr->mac_regs;
2856 static u8 buf[256];
2857 int i;
2695 2858
2696 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); 2859 static u32 mask_pattern[2][4] = {
2697 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); 2860 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2861 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
2862 };
2698 2863
2699 /* Set mii link status */ 2864 writew(0xFFFF, &regs->WOLCRClr);
2700 set_mii_flow_control(vptr); 2865 writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2866 writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2701 2867
2702 /* 2868 /*
2703 Check if new status is consisent with current status 2869 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2704 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) 2870 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2705 || (mii_status==curr_status)) {
2706 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
2707 vptr->mii_status=check_connection_type(vptr->mac_regs);
2708 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
2709 return 0;
2710 }
2711 */ 2871 */
2712 2872
2713 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2873 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2714 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); 2874 writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2715 2875
2716 /* 2876 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2717 * If connection type is AUTO 2877 struct arp_packet *arp = (struct arp_packet *) buf;
2718 */ 2878 u16 crc;
2719 if (mii_status & VELOCITY_AUTONEG_ENABLE) { 2879 memset(buf, 0, sizeof(struct arp_packet) + 7);
2720 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
2721 /* clear force MAC mode bit */
2722 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
2723 /* set duplex mode of MAC according to duplex mode of MII */
2724 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
2725 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2726 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
2727 2880
2728 /* enable AUTO-NEGO mode */ 2881 for (i = 0; i < 4; i++)
2729 mii_set_auto_on(vptr); 2882 writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2730 } else {
2731 u16 ANAR;
2732 u8 CHIPGCR;
2733 2883
2734 /* 2884 arp->type = htons(ETH_P_ARP);
2735 * 1. if it's 3119, disable frame bursting in halfduplex mode 2885 arp->ar_op = htons(1);
2736 * and enable it in fullduplex mode
2737 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
2738 * 3. only enable CD heart beat counter in 10HD mode
2739 */
2740 2886
2741 /* set force MAC mode bit */ 2887 memcpy(arp->ar_tip, vptr->ip_addr, 4);
2742 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2743 2888
2744 CHIPGCR = readb(&regs->CHIPGCR); 2889 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2745 CHIPGCR &= ~CHIPGCR_FCGMII; 2890 (u8 *) & mask_pattern[0][0]);
2746 2891
2747 if (mii_status & VELOCITY_DUPLEX_FULL) { 2892 writew(crc, &regs->PatternCRC[0]);
2748 CHIPGCR |= CHIPGCR_FCFDX; 2893 writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2749 writeb(CHIPGCR, &regs->CHIPGCR); 2894 }
2750 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); 2895
2751 if (vptr->rev_id < REV_ID_VT3216_A0) 2896 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2752 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR); 2897 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2753 } else { 2898
2754 CHIPGCR &= ~CHIPGCR_FCFDX; 2899 writew(0x0FFF, &regs->WOLSRClr);
2755 VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); 2900
2756 writeb(CHIPGCR, &regs->CHIPGCR); 2901 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2757 if (vptr->rev_id < REV_ID_VT3216_A0) 2902 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2758 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); 2903 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
2759 }
2760 2904
2761 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 2905 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2906 }
2762 2907
2763 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) 2908 if (vptr->mii_status & VELOCITY_SPEED_1000)
2764 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); 2909 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
2765 else
2766 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
2767 2910
2768 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ 2911 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2769 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); 2912
2770 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); 2913 {
2771 if (mii_status & VELOCITY_SPEED_100) { 2914 u8 GCR;
2772 if (mii_status & VELOCITY_DUPLEX_FULL) 2915 GCR = readb(&regs->CHIPGCR);
2773 ANAR |= ANAR_TXFD; 2916 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2774 else 2917 writeb(GCR, &regs->CHIPGCR);
2775 ANAR |= ANAR_TX;
2776 } else {
2777 if (mii_status & VELOCITY_DUPLEX_FULL)
2778 ANAR |= ANAR_10FD;
2779 else
2780 ANAR |= ANAR_10;
2781 }
2782 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
2783 /* enable AUTO-NEGO mode */
2784 mii_set_auto_on(vptr);
2785 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
2786 } 2918 }
2787 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ 2919
2788 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ 2920 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2789 return VELOCITY_LINK_CHANGE; 2921 /* Turn on SWPTAG just before entering power mode */
2922 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2923 /* Go to bed ..... */
2924 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2925
2926 return 0;
2790} 2927}
2791 2928
2792/** 2929/**
2793 * mii_check_media_mode - check media state 2930 * velocity_save_context - save registers
2794 * @regs: velocity registers 2931 * @vptr: velocity
2932 * @context: buffer for stored context
2795 * 2933 *
2796 * Check the current MII status and determine the link status 2934 * Retrieve the current configuration from the velocity hardware
2797 * accordingly 2935 * and stash it in the context structure, for use by the context
2936 * restore functions. This allows us to save things we need across
2937 * power down states
2798 */ 2938 */
2799 2939static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2800static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
2801{ 2940{
2802 u32 status = 0; 2941 struct mac_regs __iomem *regs = vptr->mac_regs;
2803 u16 ANAR; 2942 u16 i;
2943 u8 __iomem *ptr = (u8 __iomem *)regs;
2804 2944
2805 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) 2945 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2806 status |= VELOCITY_LINK_FAIL; 2946 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2807 2947
2808 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) 2948 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
2809 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; 2949 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2810 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
2811 status |= (VELOCITY_SPEED_1000);
2812 else {
2813 velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2814 if (ANAR & ANAR_TXFD)
2815 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
2816 else if (ANAR & ANAR_TX)
2817 status |= VELOCITY_SPEED_100;
2818 else if (ANAR & ANAR_10FD)
2819 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
2820 else
2821 status |= (VELOCITY_SPEED_10);
2822 }
2823 2950
2824 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 2951 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
2825 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 2952 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2826 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2827 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2828 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2829 status |= VELOCITY_AUTONEG_ENABLE;
2830 }
2831 }
2832 2953
2833 return status;
2834} 2954}
2835 2955
2836static u32 check_connection_type(struct mac_regs __iomem *regs) 2956static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
2837{ 2957{
2838 u32 status = 0; 2958 struct net_device *dev = pci_get_drvdata(pdev);
2839 u8 PHYSR0; 2959 struct velocity_info *vptr = netdev_priv(dev);
2840 u16 ANAR; 2960 unsigned long flags;
2841 PHYSR0 = readb(&regs->PHYSR0);
2842
2843 /*
2844 if (!(PHYSR0 & PHYSR0_LINKGD))
2845 status|=VELOCITY_LINK_FAIL;
2846 */
2847 2961
2848 if (PHYSR0 & PHYSR0_FDPX) 2962 if (!netif_running(vptr->dev))
2849 status |= VELOCITY_DUPLEX_FULL; 2963 return 0;
2850 2964
2851 if (PHYSR0 & PHYSR0_SPDG) 2965 netif_device_detach(vptr->dev);
2852 status |= VELOCITY_SPEED_1000;
2853 else if (PHYSR0 & PHYSR0_SPD10)
2854 status |= VELOCITY_SPEED_10;
2855 else
2856 status |= VELOCITY_SPEED_100;
2857 2966
2858 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 2967 spin_lock_irqsave(&vptr->lock, flags);
2859 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 2968 pci_save_state(pdev);
2860 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 2969#ifdef ETHTOOL_GWOL
2861 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 2970 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
2862 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) 2971 velocity_get_ip(vptr);
2863 status |= VELOCITY_AUTONEG_ENABLE; 2972 velocity_save_context(vptr, &vptr->context);
2864 } 2973 velocity_shutdown(vptr);
2974 velocity_set_wol(vptr);
2975 pci_enable_wake(pdev, PCI_D3hot, 1);
2976 pci_set_power_state(pdev, PCI_D3hot);
2977 } else {
2978 velocity_save_context(vptr, &vptr->context);
2979 velocity_shutdown(vptr);
2980 pci_disable_device(pdev);
2981 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2865 } 2982 }
2866 2983#else
2867 return status; 2984 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2985#endif
2986 spin_unlock_irqrestore(&vptr->lock, flags);
2987 return 0;
2868} 2988}
2869 2989
2870/** 2990/**
2871 * enable_flow_control_ability - flow control 2991 * velocity_restore_context - restore registers
2872 * @vptr: veloity to configure 2992 * @vptr: velocity
2993 * @context: buffer for stored context
2873 * 2994 *
2874 * Set up flow control according to the flow control options 2995 * Reload the register configuration from the velocity context
2875 * determined by the eeprom/configuration. 2996 * created by velocity_save_context.
2876 */ 2997 */
2877 2998static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
2878static void enable_flow_control_ability(struct velocity_info *vptr)
2879{ 2999{
2880
2881 struct mac_regs __iomem *regs = vptr->mac_regs; 3000 struct mac_regs __iomem *regs = vptr->mac_regs;
3001 int i;
3002 u8 __iomem *ptr = (u8 __iomem *)regs;
2882 3003
2883 switch (vptr->options.flow_cntl) { 3004 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3005 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
2884 3006
2885 case FLOW_CNTL_DEFAULT: 3007 /* Just skip cr0 */
2886 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0)) 3008 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
2887 writel(CR0_FDXRFCEN, &regs->CR0Set); 3009 /* Clear */
2888 else 3010 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
2889 writel(CR0_FDXRFCEN, &regs->CR0Clr); 3011 /* Set */
3012 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3013 }
2890 3014
2891 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0)) 3015 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
2892 writel(CR0_FDXTFCEN, &regs->CR0Set); 3016 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
2893 else
2894 writel(CR0_FDXTFCEN, &regs->CR0Clr);
2895 break;
2896 3017
2897 case FLOW_CNTL_TX: 3018 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
2898 writel(CR0_FDXTFCEN, &regs->CR0Set); 3019 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
2899 writel(CR0_FDXRFCEN, &regs->CR0Clr);
2900 break;
2901 3020
2902 case FLOW_CNTL_RX: 3021 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
2903 writel(CR0_FDXRFCEN, &regs->CR0Set); 3022 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
2904 writel(CR0_FDXTFCEN, &regs->CR0Clr); 3023}
2905 break;
2906 3024
2907 case FLOW_CNTL_TX_RX: 3025static int velocity_resume(struct pci_dev *pdev)
2908 writel(CR0_FDXTFCEN, &regs->CR0Set); 3026{
2909 writel(CR0_FDXRFCEN, &regs->CR0Set); 3027 struct net_device *dev = pci_get_drvdata(pdev);
2910 break; 3028 struct velocity_info *vptr = netdev_priv(dev);
3029 unsigned long flags;
3030 int i;
2911 3031
2912 case FLOW_CNTL_DISABLE: 3032 if (!netif_running(vptr->dev))
2913 writel(CR0_FDXRFCEN, &regs->CR0Clr); 3033 return 0;
2914 writel(CR0_FDXTFCEN, &regs->CR0Clr);
2915 break;
2916 3034
2917 default: 3035 pci_set_power_state(pdev, PCI_D0);
2918 break; 3036 pci_enable_wake(pdev, 0, 0);
3037 pci_restore_state(pdev);
3038
3039 mac_wol_reset(vptr->mac_regs);
3040
3041 spin_lock_irqsave(&vptr->lock, flags);
3042 velocity_restore_context(vptr, &vptr->context);
3043 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3044 mac_disable_int(vptr->mac_regs);
3045
3046 velocity_tx_srv(vptr, 0);
3047
3048 for (i = 0; i < vptr->tx.numq; i++) {
3049 if (vptr->tx.used[i])
3050 mac_tx_queue_wake(vptr->mac_regs, i);
2919 } 3051 }
2920 3052
3053 mac_enable_int(vptr->mac_regs);
3054 spin_unlock_irqrestore(&vptr->lock, flags);
3055 netif_device_attach(vptr->dev);
3056
3057 return 0;
2921} 3058}
3059#endif
3060
3061/*
3062 * Definition for our device driver. The PCI layer interface
3063 * uses this to handle all our card discover and plugging
3064 */
3065static struct pci_driver velocity_driver = {
3066 .name = VELOCITY_NAME,
3067 .id_table = velocity_id_table,
3068 .probe = velocity_found1,
3069 .remove = __devexit_p(velocity_remove1),
3070#ifdef CONFIG_PM
3071 .suspend = velocity_suspend,
3072 .resume = velocity_resume,
3073#endif
3074};
2922 3075
2923 3076
2924/** 3077/**
@@ -2928,7 +3081,6 @@ static void enable_flow_control_ability(struct velocity_info *vptr)
2928 * Called before an ethtool operation. We need to make sure the 3081 * Called before an ethtool operation. We need to make sure the
2929 * chip is out of D3 state before we poke at it. 3082 * chip is out of D3 state before we poke at it.
2930 */ 3083 */
2931
2932static int velocity_ethtool_up(struct net_device *dev) 3084static int velocity_ethtool_up(struct net_device *dev)
2933{ 3085{
2934 struct velocity_info *vptr = netdev_priv(dev); 3086 struct velocity_info *vptr = netdev_priv(dev);
@@ -2944,7 +3096,6 @@ static int velocity_ethtool_up(struct net_device *dev)
2944 * Called after an ethtool operation. Restore the chip back to D3 3096 * Called after an ethtool operation. Restore the chip back to D3
2945 * state if it isn't running. 3097 * state if it isn't running.
2946 */ 3098 */
2947
2948static void velocity_ethtool_down(struct net_device *dev) 3099static void velocity_ethtool_down(struct net_device *dev)
2949{ 3100{
2950 struct velocity_info *vptr = netdev_priv(dev); 3101 struct velocity_info *vptr = netdev_priv(dev);
@@ -3009,13 +3160,6 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
3009 return ret; 3160 return ret;
3010} 3161}
3011 3162
3012static u32 velocity_get_link(struct net_device *dev)
3013{
3014 struct velocity_info *vptr = netdev_priv(dev);
3015 struct mac_regs __iomem *regs = vptr->mac_regs;
3016 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
3017}
3018
3019static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3163static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3020{ 3164{
3021 struct velocity_info *vptr = netdev_priv(dev); 3165 struct velocity_info *vptr = netdev_priv(dev);
@@ -3094,331 +3238,86 @@ static const struct ethtool_ops velocity_ethtool_ops = {
3094 .complete = velocity_ethtool_down 3238 .complete = velocity_ethtool_down
3095}; 3239};
3096 3240
3097/** 3241#ifdef CONFIG_PM
3098 * velocity_mii_ioctl - MII ioctl handler 3242#ifdef CONFIG_INET
3099 * @dev: network device 3243static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3100 * @ifr: the ifreq block for the ioctl
3101 * @cmd: the command
3102 *
3103 * Process MII requests made via ioctl from the network layer. These
3104 * are used by tools like kudzu to interrogate the link state of the
3105 * hardware
3106 */
3107
3108static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3109{ 3244{
3110 struct velocity_info *vptr = netdev_priv(dev); 3245 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3111 struct mac_regs __iomem *regs = vptr->mac_regs; 3246 struct net_device *dev = ifa->ifa_dev->dev;
3247 struct velocity_info *vptr;
3112 unsigned long flags; 3248 unsigned long flags;
3113 struct mii_ioctl_data *miidata = if_mii(ifr);
3114 int err;
3115 3249
3116 switch (cmd) { 3250 if (dev_net(dev) != &init_net)
3117 case SIOCGMIIPHY: 3251 return NOTIFY_DONE;
3118 miidata->phy_id = readb(&regs->MIIADR) & 0x1f; 3252
3119 break; 3253 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3120 case SIOCGMIIREG: 3254 list_for_each_entry(vptr, &velocity_dev_list, list) {
3121 if (!capable(CAP_NET_ADMIN)) 3255 if (vptr->dev == dev) {
3122 return -EPERM; 3256 velocity_get_ip(vptr);
3123 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) 3257 break;
3124 return -ETIMEDOUT; 3258 }
3125 break;
3126 case SIOCSMIIREG:
3127 if (!capable(CAP_NET_ADMIN))
3128 return -EPERM;
3129 spin_lock_irqsave(&vptr->lock, flags);
3130 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
3131 spin_unlock_irqrestore(&vptr->lock, flags);
3132 check_connection_type(vptr->mac_regs);
3133 if (err)
3134 return err;
3135 break;
3136 default:
3137 return -EOPNOTSUPP;
3138 } 3259 }
3139 return 0; 3260 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3140}
3141 3261
3142#ifdef CONFIG_PM 3262 return NOTIFY_DONE;
3263}
3264#endif /* CONFIG_INET */
3265#endif /* CONFIG_PM */
3143 3266
3144/** 3267#if defined(CONFIG_PM) && defined(CONFIG_INET)
3145 * velocity_save_context - save registers 3268static struct notifier_block velocity_inetaddr_notifier = {
3146 * @vptr: velocity 3269 .notifier_call = velocity_netdev_event,
3147 * @context: buffer for stored context 3270};
3148 *
3149 * Retrieve the current configuration from the velocity hardware
3150 * and stash it in the context structure, for use by the context
3151 * restore functions. This allows us to save things we need across
3152 * power down states
3153 */
3154 3271
3155static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) 3272static void velocity_register_notifier(void)
3156{ 3273{
3157 struct mac_regs __iomem *regs = vptr->mac_regs; 3274 register_inetaddr_notifier(&velocity_inetaddr_notifier);
3158 u16 i;
3159 u8 __iomem *ptr = (u8 __iomem *)regs;
3160
3161 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3162 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3163
3164 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3165 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3166
3167 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3168 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3169
3170} 3275}
3171 3276
3172/** 3277static void velocity_unregister_notifier(void)
3173 * velocity_restore_context - restore registers
3174 * @vptr: velocity
3175 * @context: buffer for stored context
3176 *
3177 * Reload the register configuration from the velocity context
3178 * created by velocity_save_context.
3179 */
3180
3181static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3182{ 3278{
3183 struct mac_regs __iomem *regs = vptr->mac_regs; 3279 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3184 int i; 3280}
3185 u8 __iomem *ptr = (u8 __iomem *)regs;
3186
3187 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3188 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3189
3190 /* Just skip cr0 */
3191 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3192 /* Clear */
3193 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3194 /* Set */
3195 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3196 }
3197 3281
3198 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) 3282#else
3199 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3200 3283
3201 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) 3284#define velocity_register_notifier() do {} while (0)
3202 writel(*((u32 *) (context->mac_reg + i)), ptr + i); 3285#define velocity_unregister_notifier() do {} while (0)
3203 3286
3204 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) 3287#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */
3205 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3206}
3207 3288
3208/** 3289/**
3209 * wol_calc_crc - WOL CRC 3290 * velocity_init_module - load time function
3210 * @pattern: data pattern
3211 * @mask_pattern: mask
3212 * 3291 *
3213 * Compute the wake on lan crc hashes for the packet header 3292 * Called when the velocity module is loaded. The PCI driver
3214 * we are interested in. 3293 * is registered with the PCI layer, and in turn will call
3294 * the probe functions for each velocity adapter installed
3295 * in the system.
3215 */ 3296 */
3216 3297static int __init velocity_init_module(void)
3217static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
3218{ 3298{
3219 u16 crc = 0xFFFF; 3299 int ret;
3220 u8 mask;
3221 int i, j;
3222
3223 for (i = 0; i < size; i++) {
3224 mask = mask_pattern[i];
3225
3226 /* Skip this loop if the mask equals to zero */
3227 if (mask == 0x00)
3228 continue;
3229 3300
3230 for (j = 0; j < 8; j++) { 3301 velocity_register_notifier();
3231 if ((mask & 0x01) == 0) { 3302 ret = pci_register_driver(&velocity_driver);
3232 mask >>= 1; 3303 if (ret < 0)
3233 continue; 3304 velocity_unregister_notifier();
3234 } 3305 return ret;
3235 mask >>= 1;
3236 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3237 }
3238 }
3239 /* Finally, invert the result once to get the correct data */
3240 crc = ~crc;
3241 return bitrev32(crc) >> 16;
3242} 3306}
3243 3307
3244/** 3308/**
3245 * velocity_set_wol - set up for wake on lan 3309 * velocity_cleanup - module unload
3246 * @vptr: velocity to set WOL status on
3247 *
3248 * Set a card up for wake on lan either by unicast or by
3249 * ARP packet.
3250 * 3310 *
3251 * FIXME: check static buffer is safe here 3311 * When the velocity hardware is unloaded this function is called.
3312 * It will clean up the notifiers and the unregister the PCI
3313 * driver interface for this hardware. This in turn cleans up
3314 * all discovered interfaces before returning from the function
3252 */ 3315 */
3253 3316static void __exit velocity_cleanup_module(void)
3254static int velocity_set_wol(struct velocity_info *vptr)
3255{
3256 struct mac_regs __iomem *regs = vptr->mac_regs;
3257 static u8 buf[256];
3258 int i;
3259
3260 static u32 mask_pattern[2][4] = {
3261 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3262 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
3263 };
3264
3265 writew(0xFFFF, &regs->WOLCRClr);
3266 writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3267 writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3268
3269 /*
3270 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3271 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3272 */
3273
3274 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3275 writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3276
3277 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3278 struct arp_packet *arp = (struct arp_packet *) buf;
3279 u16 crc;
3280 memset(buf, 0, sizeof(struct arp_packet) + 7);
3281
3282 for (i = 0; i < 4; i++)
3283 writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3284
3285 arp->type = htons(ETH_P_ARP);
3286 arp->ar_op = htons(1);
3287
3288 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3289
3290 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3291 (u8 *) & mask_pattern[0][0]);
3292
3293 writew(crc, &regs->PatternCRC[0]);
3294 writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3295 }
3296
3297 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3298 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3299
3300 writew(0x0FFF, &regs->WOLSRClr);
3301
3302 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3303 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3304 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
3305
3306 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
3307 }
3308
3309 if (vptr->mii_status & VELOCITY_SPEED_1000)
3310 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
3311
3312 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3313
3314 {
3315 u8 GCR;
3316 GCR = readb(&regs->CHIPGCR);
3317 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3318 writeb(GCR, &regs->CHIPGCR);
3319 }
3320
3321 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3322 /* Turn on SWPTAG just before entering power mode */
3323 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3324 /* Go to bed ..... */
3325 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3326
3327 return 0;
3328}
3329
3330static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3331{
3332 struct net_device *dev = pci_get_drvdata(pdev);
3333 struct velocity_info *vptr = netdev_priv(dev);
3334 unsigned long flags;
3335
3336 if (!netif_running(vptr->dev))
3337 return 0;
3338
3339 netif_device_detach(vptr->dev);
3340
3341 spin_lock_irqsave(&vptr->lock, flags);
3342 pci_save_state(pdev);
3343#ifdef ETHTOOL_GWOL
3344 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3345 velocity_get_ip(vptr);
3346 velocity_save_context(vptr, &vptr->context);
3347 velocity_shutdown(vptr);
3348 velocity_set_wol(vptr);
3349 pci_enable_wake(pdev, PCI_D3hot, 1);
3350 pci_set_power_state(pdev, PCI_D3hot);
3351 } else {
3352 velocity_save_context(vptr, &vptr->context);
3353 velocity_shutdown(vptr);
3354 pci_disable_device(pdev);
3355 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3356 }
3357#else
3358 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3359#endif
3360 spin_unlock_irqrestore(&vptr->lock, flags);
3361 return 0;
3362}
3363
3364static int velocity_resume(struct pci_dev *pdev)
3365{
3366 struct net_device *dev = pci_get_drvdata(pdev);
3367 struct velocity_info *vptr = netdev_priv(dev);
3368 unsigned long flags;
3369 int i;
3370
3371 if (!netif_running(vptr->dev))
3372 return 0;
3373
3374 pci_set_power_state(pdev, PCI_D0);
3375 pci_enable_wake(pdev, 0, 0);
3376 pci_restore_state(pdev);
3377
3378 mac_wol_reset(vptr->mac_regs);
3379
3380 spin_lock_irqsave(&vptr->lock, flags);
3381 velocity_restore_context(vptr, &vptr->context);
3382 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3383 mac_disable_int(vptr->mac_regs);
3384
3385 velocity_tx_srv(vptr, 0);
3386
3387 for (i = 0; i < vptr->tx.numq; i++) {
3388 if (vptr->tx.used[i])
3389 mac_tx_queue_wake(vptr->mac_regs, i);
3390 }
3391
3392 mac_enable_int(vptr->mac_regs);
3393 spin_unlock_irqrestore(&vptr->lock, flags);
3394 netif_device_attach(vptr->dev);
3395
3396 return 0;
3397}
3398
3399#ifdef CONFIG_INET
3400
3401static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3402{ 3317{
3403 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3318 velocity_unregister_notifier();
3404 struct net_device *dev = ifa->ifa_dev->dev; 3319 pci_unregister_driver(&velocity_driver);
3405 struct velocity_info *vptr;
3406 unsigned long flags;
3407
3408 if (dev_net(dev) != &init_net)
3409 return NOTIFY_DONE;
3410
3411 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3412 list_for_each_entry(vptr, &velocity_dev_list, list) {
3413 if (vptr->dev == dev) {
3414 velocity_get_ip(vptr);
3415 break;
3416 }
3417 }
3418 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3419
3420 return NOTIFY_DONE;
3421} 3320}
3422 3321
3423#endif 3322module_init(velocity_init_module);
3424#endif 3323module_exit(velocity_cleanup_module);