aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/arm/ks8695net.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c77
-rw-r--r--drivers/net/ax88796.c810
-rw-r--r--drivers/net/benet/be.h16
-rw-r--r--drivers/net/benet/be_cmds.c150
-rw-r--r--drivers/net/benet/be_cmds.h61
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/benet/be_hw.h47
-rw-r--r--drivers/net/benet/be_main.c225
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c29
-rw-r--r--drivers/net/bnx2.h5
-rw-r--r--drivers/net/bnx2x/bnx2x.h33
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h118
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2727
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c650
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/bonding/bond_3ad.c4
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1158
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c215
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c360
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c893
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/cnic.c182
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c65
-rw-r--r--drivers/net/e1000e/ich8lan.c3
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c176
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h11
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c324
-rw-r--r--drivers/net/enic/vnic_dev.c19
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/fec.c650
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/igb_main.c10
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c177
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c94
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h23
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c115
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c211
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c34
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c53
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c44
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/jme.c306
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/mlx4/main.c15
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/niu.c61
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c118
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/r8169.c43
-rw-r--r--drivers/net/sfc/efx.c80
-rw-r--r--drivers/net/sfc/efx.h17
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/filter.c117
-rw-r--r--drivers/net/sfc/net_driver.h69
-rw-r--r--drivers/net/sfc/nic.c51
-rw-r--r--drivers/net/sfc/regs.h6
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c90
-rw-r--r--drivers/net/sh_eth.c208
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h1
-rw-r--r--drivers/net/tg3.c264
-rw-r--r--drivers/net/tg3.h16
-rw-r--r--drivers/net/tlan.c3775
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c244
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c274
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxge/vxge-config.c34
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c216
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c66
-rw-r--r--drivers/net/xen-netfront.c96
162 files changed, 12878 insertions, 6356 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 16fe4f9b719b..f4b39274308a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
238config AX88796 238config AX88796
239 tristate "ASIX AX88796 NE2000 clone support" 239 tristate "ASIX AX88796 NE2000 clone support"
240 depends on ARM || MIPS || SUPERH 240 depends on ARM || MIPS || SUPERH
241 select CRC32 241 select PHYLIB
242 select MII 242 select MDIO_BITBANG
243 help 243 help
244 AX88796 driver, using platform bus to provide 244 AX88796 driver, using platform bus to provide
245 chip detection and resources 245 chip detection and resources
@@ -1944,7 +1944,8 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 1947 IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
1948 default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
1948 select PHYLIB 1949 select PHYLIB
1949 help 1950 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1951 Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2864,7 +2865,7 @@ config MLX4_CORE
2864 default n 2865 default n
2865 2866
2866config MLX4_DEBUG 2867config MLX4_DEBUG
2867 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) 2868 bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
2868 depends on MLX4_CORE 2869 depends on MLX4_CORE
2869 default y 2870 default y
2870 ---help--- 2871 ---help---
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 62d6f88cbab5..aa07657744c3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1644,7 +1644,7 @@ ks8695_cleanup(void)
1644module_init(ks8695_init); 1644module_init(ks8695_init);
1645module_exit(ks8695_cleanup); 1645module_exit(ks8695_cleanup);
1646 1646
1647MODULE_AUTHOR("Simtec Electronics") 1647MODULE_AUTHOR("Simtec Electronics");
1648MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); 1648MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1649MODULE_LICENSE("GPL"); 1649MODULE_LICENSE("GPL");
1650MODULE_ALIAS("platform:" MODULENAME); 1650MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf672009948..23f2ab0f2fa8 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
345 */ 345 */
346static int atl1c_phy_setup_adv(struct atl1c_hw *hw) 346static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
347{ 347{
348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; 348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & 349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
350 ~GIGA_CR_1000T_SPEED_MASK; 350 ~GIGA_CR_1000T_SPEED_MASK;
351 351
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
373 } 373 }
374 374
375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || 375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
376 atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) 376 atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
377 return -1; 377 return -1;
378 return 0; 378 return 0;
379} 379}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
517 "Error Setting up Auto-Negotiation\n"); 517 "Error Setting up Auto-Negotiation\n");
518 return ret_val; 518 return ret_val;
519 } 519 }
520 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 520 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
521 break; 521 break;
522 case MEDIA_TYPE_100M_FULL: 522 case MEDIA_TYPE_100M_FULL:
523 mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; 523 mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
524 break; 524 break;
525 case MEDIA_TYPE_100M_HALF: 525 case MEDIA_TYPE_100M_HALF:
526 mii_bmcr_data |= BMCR_SPEED_100; 526 mii_bmcr_data |= BMCR_SPEED100;
527 break; 527 break;
528 case MEDIA_TYPE_10M_FULL: 528 case MEDIA_TYPE_10M_FULL:
529 mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; 529 mii_bmcr_data |= BMCR_FULLDPLX;
530 break; 530 break;
531 case MEDIA_TYPE_10M_HALF: 531 case MEDIA_TYPE_10M_HALF:
532 mii_bmcr_data |= BMCR_SPEED_10;
533 break; 532 break;
534 default: 533 default:
535 if (netif_msg_link(adapter)) 534 if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
657 err = atl1c_phy_setup_adv(hw); 656 err = atl1c_phy_setup_adv(hw);
658 if (err) 657 if (err)
659 return err; 658 return err;
660 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 659 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
661 660
662 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); 661 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
663} 662}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa1..655fc6c4a8a4 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
736#define REG_DEBUG_DATA0 0x1900 736#define REG_DEBUG_DATA0 0x1900
737#define REG_DEBUG_DATA1 0x1904 737#define REG_DEBUG_DATA1 0x1904
738 738
739/* PHY Control Register */
740#define MII_BMCR 0x00
741#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
742#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
743#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
744#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
745#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
746#define BMCR_POWER_DOWN 0x0800 /* Power down */
747#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
748#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
749#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
750#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
751#define BMCR_SPEED_MASK 0x2040
752#define BMCR_SPEED_1000 0x0040
753#define BMCR_SPEED_100 0x2000
754#define BMCR_SPEED_10 0x0000
755
756/* PHY Status Register */
757#define MII_BMSR 0x01
758#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
759#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
760#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
761#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
762#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
763#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
764#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
765#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
766#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
767#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
768#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
769#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
770#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
771#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
772#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
773
774#define MII_PHYSID1 0x02
775#define MII_PHYSID2 0x03
776#define L1D_MPW_PHYID1 0xD01C /* V7 */ 739#define L1D_MPW_PHYID1 0xD01C /* V7 */
777#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ 740#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
778#define L1D_MPW_PHYID3 0xD01E /* V8 */ 741#define L1D_MPW_PHYID3 0xD01E /* V8 */
779 742
780 743
781/* Autoneg Advertisement Register */ 744/* Autoneg Advertisement Register */
782#define MII_ADVERTISE 0x04 745#define ADVERTISE_DEFAULT_CAP \
783#define ADVERTISE_SPEED_MASK 0x01E0 746 (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
784#define ADVERTISE_DEFAULT_CAP 0x0DE0
785 747
786/* 1000BASE-T Control Register */ 748/* 1000BASE-T Control Register */
787#define MII_GIGA_CR 0x09
788#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ 749#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
789 750
790#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ 751#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf20eb5..e60595f0247c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, 48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, 49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, 50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
51 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
51 /* required last entry */ 52 /* required last entry */
52 { 0 } 53 { 0 }
53}; 54};
@@ -2717,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2717 goto err_reset; 2718 goto err_reset;
2718 } 2719 }
2719 2720
2720 device_init_wakeup(&pdev->dev, 1);
2721 /* reset the controller to 2721 /* reset the controller to
2722 * put the device in a known good starting state */ 2722 * put the device in a known good starting state */
2723 err = atl1c_phy_init(&adapter->hw); 2723 err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b948..1209297433b8 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
95 ecmd->advertising = hw->autoneg_advertised | 95 ecmd->advertising = hw->autoneg_advertised |
96 ADVERTISED_TP | ADVERTISED_Autoneg; 96 ADVERTISED_TP | ADVERTISED_Autoneg;
97 97
98 adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; 98 adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; 99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
100 if (hw->autoneg_advertised & ADVERTISE_10_HALF) 100 if (hw->autoneg_advertised & ADVERTISE_10_HALF)
101 adv4 |= MII_AR_10T_HD_CAPS; 101 adv4 |= ADVERTISE_10HALF;
102 if (hw->autoneg_advertised & ADVERTISE_10_FULL) 102 if (hw->autoneg_advertised & ADVERTISE_10_FULL)
103 adv4 |= MII_AR_10T_FD_CAPS; 103 adv4 |= ADVERTISE_10FULL;
104 if (hw->autoneg_advertised & ADVERTISE_100_HALF) 104 if (hw->autoneg_advertised & ADVERTISE_100_HALF)
105 adv4 |= MII_AR_100TX_HD_CAPS; 105 adv4 |= ADVERTISE_100HALF;
106 if (hw->autoneg_advertised & ADVERTISE_100_FULL) 106 if (hw->autoneg_advertised & ADVERTISE_100_FULL)
107 adv4 |= MII_AR_100TX_FD_CAPS; 107 adv4 |= ADVERTISE_100FULL;
108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) 108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
109 adv9 |= MII_AT001_CR_1000T_FD_CAPS; 109 adv9 |= ADVERTISE_1000FULL;
110 110
111 if (adv4 != hw->mii_autoneg_adv_reg || 111 if (adv4 != hw->mii_autoneg_adv_reg ||
112 adv9 != hw->mii_1000t_ctrl_reg) { 112 adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8c..923063d2e5bb 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
318 * Advertisement Register (Address 4) and the 1000 mb speed bits in 318 * Advertisement Register (Address 4) and the 1000 mb speed bits in
319 * the 1000Base-T control Register (Address 9). 319 * the 1000Base-T control Register (Address 9).
320 */ 320 */
321 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; 321 mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; 322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
323 323
324 /* 324 /*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
327 */ 327 */
328 switch (hw->media_type) { 328 switch (hw->media_type) {
329 case MEDIA_TYPE_AUTO_SENSOR: 329 case MEDIA_TYPE_AUTO_SENSOR:
330 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | 330 mii_autoneg_adv_reg |= ADVERTISE_ALL;
331 MII_AR_10T_FD_CAPS | 331 hw->autoneg_advertised = ADVERTISE_ALL;
332 MII_AR_100TX_HD_CAPS |
333 MII_AR_100TX_FD_CAPS);
334 hw->autoneg_advertised = ADVERTISE_10_HALF |
335 ADVERTISE_10_FULL |
336 ADVERTISE_100_HALF |
337 ADVERTISE_100_FULL;
338 if (hw->nic_type == athr_l1e) { 332 if (hw->nic_type == athr_l1e) {
339 mii_1000t_ctrl_reg |= 333 mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
340 MII_AT001_CR_1000T_FD_CAPS;
341 hw->autoneg_advertised |= ADVERTISE_1000_FULL; 334 hw->autoneg_advertised |= ADVERTISE_1000_FULL;
342 } 335 }
343 break; 336 break;
344 337
345 case MEDIA_TYPE_100M_FULL: 338 case MEDIA_TYPE_100M_FULL:
346 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; 339 mii_autoneg_adv_reg |= ADVERTISE_100FULL;
347 hw->autoneg_advertised = ADVERTISE_100_FULL; 340 hw->autoneg_advertised = ADVERTISE_100_FULL;
348 break; 341 break;
349 342
350 case MEDIA_TYPE_100M_HALF: 343 case MEDIA_TYPE_100M_HALF:
351 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; 344 mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
352 hw->autoneg_advertised = ADVERTISE_100_HALF; 345 hw->autoneg_advertised = ADVERTISE_100_HALF;
353 break; 346 break;
354 347
355 case MEDIA_TYPE_10M_FULL: 348 case MEDIA_TYPE_10M_FULL:
356 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; 349 mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
357 hw->autoneg_advertised = ADVERTISE_10_FULL; 350 hw->autoneg_advertised = ADVERTISE_10_FULL;
358 break; 351 break;
359 352
360 default: 353 default:
361 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; 354 mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
362 hw->autoneg_advertised = ADVERTISE_10_HALF; 355 hw->autoneg_advertised = ADVERTISE_10_HALF;
363 break; 356 break;
364 } 357 }
365 358
366 /* flow control fixed to enable all */ 359 /* flow control fixed to enable all */
367 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); 360 mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
368 361
369 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; 362 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
370 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; 363 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
374 return ret_val; 367 return ret_val;
375 368
376 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 369 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
377 ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, 370 ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
378 mii_1000t_ctrl_reg); 371 mii_1000t_ctrl_reg);
379 if (ret_val) 372 if (ret_val)
380 return ret_val; 373 return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
397 int ret_val; 390 int ret_val;
398 u16 phy_data; 391 u16 phy_data;
399 392
400 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 393 phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
401 394
402 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); 395 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
403 if (ret_val) { 396 if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
645 return err; 638 return err;
646 639
647 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 640 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
648 err = atl1e_write_phy_reg(hw, MII_AT001_CR, 641 err = atl1e_write_phy_reg(hw, MII_CTRL1000,
649 hw->mii_1000t_ctrl_reg); 642 hw->mii_1000t_ctrl_reg);
650 if (err) 643 if (err)
651 return err; 644 return err;
652 } 645 }
653 646
654 err = atl1e_write_phy_reg(hw, MII_BMCR, 647 err = atl1e_write_phy_reg(hw, MII_BMCR,
655 MII_CR_RESET | MII_CR_AUTO_NEG_EN | 648 BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
656 MII_CR_RESTART_AUTO_NEG);
657 return err; 649 return err;
658} 650}
659 651
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cfa..74df16aef793 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
629 629
630/***************************** MII definition ***************************************/ 630/***************************** MII definition ***************************************/
631/* PHY Common Register */ 631/* PHY Common Register */
632#define MII_BMCR 0x00
633#define MII_BMSR 0x01
634#define MII_PHYSID1 0x02
635#define MII_PHYSID2 0x03
636#define MII_ADVERTISE 0x04
637#define MII_LPA 0x05
638#define MII_EXPANSION 0x06
639#define MII_AT001_CR 0x09
640#define MII_AT001_SR 0x0A
641#define MII_AT001_ESR 0x0F
642#define MII_AT001_PSCR 0x10 632#define MII_AT001_PSCR 0x10
643#define MII_AT001_PSSR 0x11 633#define MII_AT001_PSSR 0x11
644#define MII_INT_CTRL 0x12 634#define MII_INT_CTRL 0x12
645#define MII_INT_STATUS 0x13 635#define MII_INT_STATUS 0x13
646#define MII_SMARTSPEED 0x14 636#define MII_SMARTSPEED 0x14
647#define MII_RERRCOUNTER 0x15
648#define MII_SREVISION 0x16
649#define MII_RESV1 0x17
650#define MII_LBRERROR 0x18 637#define MII_LBRERROR 0x18
651#define MII_PHYADDR 0x19
652#define MII_RESV2 0x1a 638#define MII_RESV2 0x1a
653#define MII_TPISTATUS 0x1b
654#define MII_NCONFIG 0x1c
655 639
656#define MII_DBG_ADDR 0x1D 640#define MII_DBG_ADDR 0x1D
657#define MII_DBG_DATA 0x1E 641#define MII_DBG_DATA 0x1E
658 642
659
660/* PHY Control Register */
661#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
662#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
663#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
664#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
665#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
666#define MII_CR_POWER_DOWN 0x0800 /* Power down */
667#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
668#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
669#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
670#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
671#define MII_CR_SPEED_MASK 0x2040
672#define MII_CR_SPEED_1000 0x0040
673#define MII_CR_SPEED_100 0x2000
674#define MII_CR_SPEED_10 0x0000
675
676
677/* PHY Status Register */
678#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
679#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
680#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
681#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
682#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
683#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
684#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
685#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
686#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
687#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
688#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
689#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
690#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
691#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
692#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
693
694/* Link partner ability register. */
695#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
696#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
697#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
698#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
699#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
700#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
701#define MII_LPA_PAUSE 0x0400 /* PAUSE */
702#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
703#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
704#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
705#define MII_LPA_NPAGE 0x8000 /* Next page bit */
706
707/* Autoneg Advertisement Register */ 643/* Autoneg Advertisement Register */
708#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ 644#define MII_AR_DEFAULT_CAP_MASK 0
709#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
710#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
711#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
712#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
713#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
714#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
715#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
716#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
717#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
718#define MII_AR_SPEED_MASK 0x01E0
719#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
720 645
721/* 1000BASE-T Control Register */ 646/* 1000BASE-T Control Register */
722#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 647#define MII_AT001_CR_1000T_SPEED_MASK \
723#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 648 (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
724#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ 649#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
725/* 0=DTE device */
726#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
727/* 0=Configure PHY as Slave */
728#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
729/* 0=Automatic Master/Slave config */
730#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
731#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
732#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
733#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
734#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
735#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
736#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
737
738/* 1000BASE-T Status Register */
739#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
740#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
741#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
742#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
743#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
744#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
745#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
746#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
747
748/* Extended Status Register */
749#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
750#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
751#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
752#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
753 650
754/* AT001 PHY Specific Control Register */ 651/* AT001 PHY Specific Control Register */
755#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ 652#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8baf394e..bf7500ccd73f 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2053 2053
2054 mii_advertise_data = MII_AR_10T_HD_CAPS; 2054 mii_advertise_data = ADVERTISE_10HALF;
2055 2055
2056 if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || 2056 if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2057 (atl1e_write_phy_reg(hw, 2057 (atl1e_write_phy_reg(hw,
2058 MII_ADVERTISE, mii_advertise_data) != 0) || 2058 MII_ADVERTISE, mii_advertise_data) != 0) ||
2059 (atl1e_phy_commit(hw)) != 0) { 2059 (atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b527687c28f..67f40b9c16ed 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
951 951
952 adapter->wol = 0; 952 adapter->wol = 0;
953 device_set_wakeup_enable(&adapter->pdev->dev, false);
953 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 954 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
954 adapter->ict = 50000; /* 100ms */ 955 adapter->ict = 50000; /* 100ms */
955 adapter->link_speed = SPEED_0; /* hardware init */ 956 adapter->link_speed = SPEED_0; /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
2735} 2736}
2736 2737
2737#ifdef CONFIG_PM 2738#ifdef CONFIG_PM
2738static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2739static int atl1_suspend(struct device *dev)
2739{ 2740{
2741 struct pci_dev *pdev = to_pci_dev(dev);
2740 struct net_device *netdev = pci_get_drvdata(pdev); 2742 struct net_device *netdev = pci_get_drvdata(pdev);
2741 struct atl1_adapter *adapter = netdev_priv(netdev); 2743 struct atl1_adapter *adapter = netdev_priv(netdev);
2742 struct atl1_hw *hw = &adapter->hw; 2744 struct atl1_hw *hw = &adapter->hw;
2743 u32 ctrl = 0; 2745 u32 ctrl = 0;
2744 u32 wufc = adapter->wol; 2746 u32 wufc = adapter->wol;
2745 u32 val; 2747 u32 val;
2746 int retval;
2747 u16 speed; 2748 u16 speed;
2748 u16 duplex; 2749 u16 duplex;
2749 2750
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2751 if (netif_running(netdev)) 2752 if (netif_running(netdev))
2752 atl1_down(adapter); 2753 atl1_down(adapter);
2753 2754
2754 retval = pci_save_state(pdev);
2755 if (retval)
2756 return retval;
2757
2758 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2755 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2759 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2756 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2760 val = ctrl & BMSR_LSTATUS; 2757 val = ctrl & BMSR_LSTATUS;
2761 if (val) 2758 if (val)
2762 wufc &= ~ATLX_WUFC_LNKC; 2759 wufc &= ~ATLX_WUFC_LNKC;
2760 if (!wufc)
2761 goto disable_wol;
2763 2762
2764 if (val && wufc) { 2763 if (val) {
2765 val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2764 val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
2766 if (val) { 2765 if (val) {
2767 if (netif_msg_ifdown(adapter)) 2766 if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2798 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2797 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2799 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2798 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2800 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2799 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2801 2800 } else {
2802 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2803 goto exit;
2804 }
2805
2806 if (!val && wufc) {
2807 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2801 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2808 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2802 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2809 ioread32(hw->hw_addr + REG_WOL_CTRL); 2803 ioread32(hw->hw_addr + REG_WOL_CTRL);
2810 iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2804 iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
2811 ioread32(hw->hw_addr + REG_MAC_CTRL); 2805 ioread32(hw->hw_addr + REG_MAC_CTRL);
2812 hw->phy_configured = false; 2806 hw->phy_configured = false;
2813 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2814 goto exit;
2815 } 2807 }
2816 2808
2817disable_wol: 2809 return 0;
2810
2811 disable_wol:
2818 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2812 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2819 ioread32(hw->hw_addr + REG_WOL_CTRL); 2813 ioread32(hw->hw_addr + REG_WOL_CTRL);
2820 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2814 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
2822 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2816 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2823 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2817 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2824 hw->phy_configured = false; 2818 hw->phy_configured = false;
2825 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2826exit:
2827 if (netif_running(netdev))
2828 pci_disable_msi(adapter->pdev);
2829 pci_disable_device(pdev);
2830 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2831 2819
2832 return 0; 2820 return 0;
2833} 2821}
2834 2822
2835static int atl1_resume(struct pci_dev *pdev) 2823static int atl1_resume(struct device *dev)
2836{ 2824{
2825 struct pci_dev *pdev = to_pci_dev(dev);
2837 struct net_device *netdev = pci_get_drvdata(pdev); 2826 struct net_device *netdev = pci_get_drvdata(pdev);
2838 struct atl1_adapter *adapter = netdev_priv(netdev); 2827 struct atl1_adapter *adapter = netdev_priv(netdev);
2839 u32 err;
2840 2828
2841 pci_set_power_state(pdev, PCI_D0);
2842 pci_restore_state(pdev);
2843
2844 err = pci_enable_device(pdev);
2845 if (err) {
2846 if (netif_msg_ifup(adapter))
2847 dev_printk(KERN_DEBUG, &pdev->dev,
2848 "error enabling pci device\n");
2849 return err;
2850 }
2851
2852 pci_set_master(pdev);
2853 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2829 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2854 pci_enable_wake(pdev, PCI_D3hot, 0);
2855 pci_enable_wake(pdev, PCI_D3cold, 0);
2856 2830
2857 atl1_reset_hw(&adapter->hw); 2831 atl1_reset_hw(&adapter->hw);
2858 2832
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
2864 2838
2865 return 0; 2839 return 0;
2866} 2840}
2841
2842static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
2843#define ATL1_PM_OPS (&atl1_pm_ops)
2844
2867#else 2845#else
2868#define atl1_suspend NULL 2846
2869#define atl1_resume NULL 2847static int atl1_suspend(struct device *dev) { return 0; }
2848
2849#define ATL1_PM_OPS NULL
2870#endif 2850#endif
2871 2851
2872static void atl1_shutdown(struct pci_dev *pdev) 2852static void atl1_shutdown(struct pci_dev *pdev)
2873{ 2853{
2874#ifdef CONFIG_PM 2854 struct net_device *netdev = pci_get_drvdata(pdev);
2875 atl1_suspend(pdev, PMSG_SUSPEND); 2855 struct atl1_adapter *adapter = netdev_priv(netdev);
2876#endif 2856
2857 atl1_suspend(&pdev->dev);
2858 pci_wake_from_d3(pdev, adapter->wol);
2859 pci_set_power_state(pdev, PCI_D3hot);
2877} 2860}
2878 2861
2879#ifdef CONFIG_NET_POLL_CONTROLLER 2862#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
3117 .id_table = atl1_pci_tbl, 3100 .id_table = atl1_pci_tbl,
3118 .probe = atl1_probe, 3101 .probe = atl1_probe,
3119 .remove = __devexit_p(atl1_remove), 3102 .remove = __devexit_p(atl1_remove),
3120 .suspend = atl1_suspend, 3103 .shutdown = atl1_shutdown,
3121 .resume = atl1_resume, 3104 .driver.pm = ATL1_PM_OPS,
3122 .shutdown = atl1_shutdown
3123}; 3105};
3124 3106
3125/* 3107/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
3409 adapter->wol = 0; 3391 adapter->wol = 0;
3410 if (wol->wolopts & WAKE_MAGIC) 3392 if (wol->wolopts & WAKE_MAGIC)
3411 adapter->wol |= ATLX_WUFC_MAG; 3393 adapter->wol |= ATLX_WUFC_MAG;
3394
3395 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
3396
3412 return 0; 3397 return 0;
3413} 3398}
3414 3399
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4bebff3faeab..e7cb8c8b9776 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12*/ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -17,46 +17,45 @@
17#include <linux/isapnp.h> 17#include <linux/isapnp.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/io.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
24#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
25#include <linux/ethtool.h> 26#include <linux/ethtool.h>
26#include <linux/mii.h> 27#include <linux/mdio-bitbang.h>
28#include <linux/phy.h>
27#include <linux/eeprom_93cx6.h> 29#include <linux/eeprom_93cx6.h>
28#include <linux/slab.h> 30#include <linux/slab.h>
29 31
30#include <net/ax88796.h> 32#include <net/ax88796.h>
31 33
32#include <asm/system.h> 34#include <asm/system.h>
33#include <asm/io.h>
34
35static int phy_debug = 0;
36 35
37/* Rename the lib8390.c functions to show that they are in this driver */ 36/* Rename the lib8390.c functions to show that they are in this driver */
38#define __ei_open ax_ei_open 37#define __ei_open ax_ei_open
39#define __ei_close ax_ei_close 38#define __ei_close ax_ei_close
40#define __ei_poll ax_ei_poll 39#define __ei_poll ax_ei_poll
41#define __ei_start_xmit ax_ei_start_xmit 40#define __ei_start_xmit ax_ei_start_xmit
42#define __ei_tx_timeout ax_ei_tx_timeout 41#define __ei_tx_timeout ax_ei_tx_timeout
43#define __ei_get_stats ax_ei_get_stats 42#define __ei_get_stats ax_ei_get_stats
44#define __ei_set_multicast_list ax_ei_set_multicast_list 43#define __ei_set_multicast_list ax_ei_set_multicast_list
45#define __ei_interrupt ax_ei_interrupt 44#define __ei_interrupt ax_ei_interrupt
46#define ____alloc_ei_netdev ax__alloc_ei_netdev 45#define ____alloc_ei_netdev ax__alloc_ei_netdev
47#define __NS8390_init ax_NS8390_init 46#define __NS8390_init ax_NS8390_init
48 47
49/* force unsigned long back to 'void __iomem *' */ 48/* force unsigned long back to 'void __iomem *' */
50#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) 49#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
51 50
52#define ei_inb(_a) readb(ax_convert_addr(_a)) 51#define ei_inb(_a) readb(ax_convert_addr(_a))
53#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) 52#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
54 53
55#define ei_inb_p(_a) ei_inb(_a) 54#define ei_inb_p(_a) ei_inb(_a)
56#define ei_outb_p(_v, _a) ei_outb(_v, _a) 55#define ei_outb_p(_v, _a) ei_outb(_v, _a)
57 56
58/* define EI_SHIFT() to take into account our register offsets */ 57/* define EI_SHIFT() to take into account our register offsets */
59#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) 58#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
60 59
61/* Ensure we have our RCR base value */ 60/* Ensure we have our RCR base value */
62#define AX88796_PLATFORM 61#define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
74#define NE_DATAPORT EI_SHIFT(0x10) 73#define NE_DATAPORT EI_SHIFT(0x10)
75 74
76#define NE1SM_START_PG 0x20 /* First page of TX buffer */ 75#define NE1SM_START_PG 0x20 /* First page of TX buffer */
77#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ 76#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
78#define NESM_START_PG 0x40 /* First page of TX buffer */ 77#define NESM_START_PG 0x40 /* First page of TX buffer */
79#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 78#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
80 79
80#define AX_GPOC_PPDSET BIT(6)
81
81/* device private data */ 82/* device private data */
82 83
83struct ax_device { 84struct ax_device {
84 struct timer_list mii_timer; 85 struct mii_bus *mii_bus;
85 spinlock_t mii_lock; 86 struct mdiobb_ctrl bb_ctrl;
86 struct mii_if_info mii; 87 struct phy_device *phy_dev;
87 88 void __iomem *addr_memr;
88 u32 msg_enable; 89 u8 reg_memr;
89 void __iomem *map2; 90 int link;
90 struct platform_device *dev; 91 int speed;
91 struct resource *mem; 92 int duplex;
92 struct resource *mem2; 93
93 struct ax_plat_data *plat; 94 void __iomem *map2;
94 95 const struct ax_plat_data *plat;
95 unsigned char running; 96
96 unsigned char resume_open; 97 unsigned char running;
97 unsigned int irqflags; 98 unsigned char resume_open;
98 99 unsigned int irqflags;
99 u32 reg_offsets[0x20]; 100
101 u32 reg_offsets[0x20];
100}; 102};
101 103
102static inline struct ax_device *to_ax_dev(struct net_device *dev) 104static inline struct ax_device *to_ax_dev(struct net_device *dev)
103{ 105{
104 struct ei_device *ei_local = netdev_priv(dev); 106 struct ei_device *ei_local = netdev_priv(dev);
105 return (struct ax_device *)(ei_local+1); 107 return (struct ax_device *)(ei_local + 1);
106} 108}
107 109
108/* ax_initial_check 110/*
111 * ax_initial_check
109 * 112 *
110 * do an initial probe for the card to check wether it exists 113 * do an initial probe for the card to check wether it exists
111 * and is functional 114 * and is functional
112 */ 115 */
113
114static int ax_initial_check(struct net_device *dev) 116static int ax_initial_check(struct net_device *dev)
115{ 117{
116 struct ei_device *ei_local = netdev_priv(dev); 118 struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
122 if (reg0 == 0xFF) 124 if (reg0 == 0xFF)
123 return -ENODEV; 125 return -ENODEV;
124 126
125 ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); 127 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
126 regd = ei_inb(ioaddr + 0x0d); 128 regd = ei_inb(ioaddr + 0x0d);
127 ei_outb(0xff, ioaddr + 0x0d); 129 ei_outb(0xff, ioaddr + 0x0d);
128 ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); 130 ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
129 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ 131 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
130 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { 132 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
131 ei_outb(reg0, ioaddr); 133 ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
136 return 0; 138 return 0;
137} 139}
138 140
139/* Hard reset the card. This used to pause for the same period that a 141/*
140 8390 reset command required, but that shouldn't be necessary. */ 142 * Hard reset the card. This used to pause for the same period that a
141 143 * 8390 reset command required, but that shouldn't be necessary.
144 */
142static void ax_reset_8390(struct net_device *dev) 145static void ax_reset_8390(struct net_device *dev)
143{ 146{
144 struct ei_device *ei_local = netdev_priv(dev); 147 struct ei_device *ei_local = netdev_priv(dev);
145 struct ax_device *ax = to_ax_dev(dev);
146 unsigned long reset_start_time = jiffies; 148 unsigned long reset_start_time = jiffies;
147 void __iomem *addr = (void __iomem *)dev->base_addr; 149 void __iomem *addr = (void __iomem *)dev->base_addr;
148 150
149 if (ei_debug > 1) 151 if (ei_debug > 1)
150 dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); 152 netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
151 153
152 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); 154 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
153 155
154 ei_status.txing = 0; 156 ei_local->txing = 0;
155 ei_status.dmaing = 0; 157 ei_local->dmaing = 0;
156 158
157 /* This check _should_not_ be necessary, omit eventually. */ 159 /* This check _should_not_ be necessary, omit eventually. */
158 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 160 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
159 if (jiffies - reset_start_time > 2*HZ/100) { 161 if (jiffies - reset_start_time > 2 * HZ / 100) {
160 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 162 netdev_warn(dev, "%s: did not complete.\n", __func__);
161 __func__, dev->name);
162 break; 163 break;
163 } 164 }
164 } 165 }
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
171 int ring_page) 172 int ring_page)
172{ 173{
173 struct ei_device *ei_local = netdev_priv(dev); 174 struct ei_device *ei_local = netdev_priv(dev);
174 struct ax_device *ax = to_ax_dev(dev);
175 void __iomem *nic_base = ei_local->mem; 175 void __iomem *nic_base = ei_local->mem;
176 176
177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
178 if (ei_status.dmaing) { 178 if (ei_local->dmaing) {
179 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 179 netdev_err(dev, "DMAing conflict in %s "
180 "[DMAstat:%d][irqlock:%d].\n", 180 "[DMAstat:%d][irqlock:%d].\n",
181 dev->name, __func__, 181 __func__,
182 ei_status.dmaing, ei_status.irqlock); 182 ei_local->dmaing, ei_local->irqlock);
183 return; 183 return;
184 } 184 }
185 185
186 ei_status.dmaing |= 0x01; 186 ei_local->dmaing |= 0x01;
187 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 187 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); 188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
189 ei_outb(0, nic_base + EN0_RCNTHI); 189 ei_outb(0, nic_base + EN0_RCNTHI);
190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ 190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
191 ei_outb(ring_page, nic_base + EN0_RSARHI); 191 ei_outb(ring_page, nic_base + EN0_RSARHI);
192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
193 193
194 if (ei_status.word16) 194 if (ei_local->word16)
195 readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); 195 readsw(nic_base + NE_DATAPORT, hdr,
196 sizeof(struct e8390_pkt_hdr) >> 1);
196 else 197 else
197 readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); 198 readsb(nic_base + NE_DATAPORT, hdr,
199 sizeof(struct e8390_pkt_hdr));
198 200
199 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 201 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
200 ei_status.dmaing &= ~0x01; 202 ei_local->dmaing &= ~0x01;
201 203
202 le16_to_cpus(&hdr->count); 204 le16_to_cpus(&hdr->count);
203} 205}
204 206
205 207
206/* Block input and output, similar to the Crynwr packet driver. If you 208/*
207 are porting to a new ethercard, look at the packet driver source for hints. 209 * Block input and output, similar to the Crynwr packet driver. If
208 The NEx000 doesn't share the on-board packet memory -- you have to put 210 * you are porting to a new ethercard, look at the packet driver
209 the packet out through the "remote DMA" dataport using ei_outb. */ 211 * source for hints. The NEx000 doesn't share the on-board packet
210 212 * memory -- you have to put the packet out through the "remote DMA"
213 * dataport using ei_outb.
214 */
211static void ax_block_input(struct net_device *dev, int count, 215static void ax_block_input(struct net_device *dev, int count,
212 struct sk_buff *skb, int ring_offset) 216 struct sk_buff *skb, int ring_offset)
213{ 217{
214 struct ei_device *ei_local = netdev_priv(dev); 218 struct ei_device *ei_local = netdev_priv(dev);
215 struct ax_device *ax = to_ax_dev(dev);
216 void __iomem *nic_base = ei_local->mem; 219 void __iomem *nic_base = ei_local->mem;
217 char *buf = skb->data; 220 char *buf = skb->data;
218 221
219 if (ei_status.dmaing) { 222 if (ei_local->dmaing) {
220 dev_err(&ax->dev->dev, 223 netdev_err(dev,
221 "%s: DMAing conflict in %s " 224 "DMAing conflict in %s "
222 "[DMAstat:%d][irqlock:%d].\n", 225 "[DMAstat:%d][irqlock:%d].\n",
223 dev->name, __func__, 226 __func__,
224 ei_status.dmaing, ei_status.irqlock); 227 ei_local->dmaing, ei_local->irqlock);
225 return; 228 return;
226 } 229 }
227 230
228 ei_status.dmaing |= 0x01; 231 ei_local->dmaing |= 0x01;
229 232
230 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 233 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
231 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 234 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
232 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 235 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
233 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); 236 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
234 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); 237 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
235 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 238 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
236 239
237 if (ei_status.word16) { 240 if (ei_local->word16) {
238 readsw(nic_base + NE_DATAPORT, buf, count >> 1); 241 readsw(nic_base + NE_DATAPORT, buf, count >> 1);
239 if (count & 0x01) 242 if (count & 0x01)
240 buf[count-1] = ei_inb(nic_base + NE_DATAPORT); 243 buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
243 readsb(nic_base + NE_DATAPORT, buf, count); 246 readsb(nic_base + NE_DATAPORT, buf, count);
244 } 247 }
245 248
246 ei_status.dmaing &= ~1; 249 ei_local->dmaing &= ~1;
247} 250}
248 251
249static void ax_block_output(struct net_device *dev, int count, 252static void ax_block_output(struct net_device *dev, int count,
250 const unsigned char *buf, const int start_page) 253 const unsigned char *buf, const int start_page)
251{ 254{
252 struct ei_device *ei_local = netdev_priv(dev); 255 struct ei_device *ei_local = netdev_priv(dev);
253 struct ax_device *ax = to_ax_dev(dev);
254 void __iomem *nic_base = ei_local->mem; 256 void __iomem *nic_base = ei_local->mem;
255 unsigned long dma_start; 257 unsigned long dma_start;
256 258
257 /* Round the count up for word writes. Do we need to do this? 259 /*
258 What effect will an odd byte count have on the 8390? 260 * Round the count up for word writes. Do we need to do this?
259 I should check someday. */ 261 * What effect will an odd byte count have on the 8390? I
260 262 * should check someday.
261 if (ei_status.word16 && (count & 0x01)) 263 */
264 if (ei_local->word16 && (count & 0x01))
262 count++; 265 count++;
263 266
264 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 267 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
265 if (ei_status.dmaing) { 268 if (ei_local->dmaing) {
266 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 269 netdev_err(dev, "DMAing conflict in %s."
267 "[DMAstat:%d][irqlock:%d]\n", 270 "[DMAstat:%d][irqlock:%d]\n",
268 dev->name, __func__, 271 __func__,
269 ei_status.dmaing, ei_status.irqlock); 272 ei_local->dmaing, ei_local->irqlock);
270 return; 273 return;
271 } 274 }
272 275
273 ei_status.dmaing |= 0x01; 276 ei_local->dmaing |= 0x01;
274 /* We should already be in page 0, but to be safe... */ 277 /* We should already be in page 0, but to be safe... */
275 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); 278 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
276 279
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
278 281
279 /* Now the normal output. */ 282 /* Now the normal output. */
280 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 283 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
281 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 284 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
282 ei_outb(0x00, nic_base + EN0_RSARLO); 285 ei_outb(0x00, nic_base + EN0_RSARLO);
283 ei_outb(start_page, nic_base + EN0_RSARHI); 286 ei_outb(start_page, nic_base + EN0_RSARHI);
284 287
285 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); 288 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
286 if (ei_status.word16) { 289 if (ei_local->word16)
287 writesw(nic_base + NE_DATAPORT, buf, count>>1); 290 writesw(nic_base + NE_DATAPORT, buf, count >> 1);
288 } else { 291 else
289 writesb(nic_base + NE_DATAPORT, buf, count); 292 writesb(nic_base + NE_DATAPORT, buf, count);
290 }
291 293
292 dma_start = jiffies; 294 dma_start = jiffies;
293 295
294 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 296 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
295 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 297 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
296 dev_warn(&ax->dev->dev, 298 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
297 "%s: timeout waiting for Tx RDC.\n", dev->name);
298 ax_reset_8390(dev); 299 ax_reset_8390(dev);
299 ax_NS8390_init(dev,1); 300 ax_NS8390_init(dev, 1);
300 break; 301 break;
301 } 302 }
302 } 303 }
303 304
304 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 305 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
305 ei_status.dmaing &= ~0x01; 306 ei_local->dmaing &= ~0x01;
306} 307}
307 308
308/* definitions for accessing MII/EEPROM interface */ 309/* definitions for accessing MII/EEPROM interface */
309 310
310#define AX_MEMR EI_SHIFT(0x14) 311#define AX_MEMR EI_SHIFT(0x14)
311#define AX_MEMR_MDC (1<<0) 312#define AX_MEMR_MDC BIT(0)
312#define AX_MEMR_MDIR (1<<1) 313#define AX_MEMR_MDIR BIT(1)
313#define AX_MEMR_MDI (1<<2) 314#define AX_MEMR_MDI BIT(2)
314#define AX_MEMR_MDO (1<<3) 315#define AX_MEMR_MDO BIT(3)
315#define AX_MEMR_EECS (1<<4) 316#define AX_MEMR_EECS BIT(4)
316#define AX_MEMR_EEI (1<<5) 317#define AX_MEMR_EEI BIT(5)
317#define AX_MEMR_EEO (1<<6) 318#define AX_MEMR_EEO BIT(6)
318#define AX_MEMR_EECLK (1<<7) 319#define AX_MEMR_EECLK BIT(7)
319 320
320/* ax_mii_ei_outbits 321static void ax_handle_link_change(struct net_device *dev)
321 *
322 * write the specified set of bits to the phy
323*/
324
325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 322{
328 struct ei_device *ei_local = netdev_priv(dev); 323 struct ax_device *ax = to_ax_dev(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 324 struct phy_device *phy_dev = ax->phy_dev;
330 unsigned int memr; 325 int status_change = 0;
331
332 /* clock low, data to output mode */
333 memr = ei_inb(memr_addr);
334 memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
335 ei_outb(memr, memr_addr);
336
337 for (len--; len >= 0; len--) {
338 if (bits & (1 << len))
339 memr |= AX_MEMR_MDO;
340 else
341 memr &= ~AX_MEMR_MDO;
342
343 ei_outb(memr, memr_addr);
344
345 /* clock high */
346 326
347 ei_outb(memr | AX_MEMR_MDC, memr_addr); 327 if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
348 udelay(1); 328 (ax->duplex != phy_dev->duplex))) {
349 329
350 /* clock low */ 330 ax->speed = phy_dev->speed;
351 ei_outb(memr, memr_addr); 331 ax->duplex = phy_dev->duplex;
332 status_change = 1;
352 } 333 }
353 334
354 /* leaves the clock line low, mdir input */ 335 if (phy_dev->link != ax->link) {
355 memr |= AX_MEMR_MDIR; 336 if (!phy_dev->link) {
356 ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR); 337 ax->speed = 0;
357} 338 ax->duplex = -1;
358 339 }
359/* ax_phy_ei_inbits 340 ax->link = phy_dev->link;
360 *
361 * read a specified number of bits from the phy
362*/
363
364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no)
366{
367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr;
370 unsigned int result = 0;
371
372 /* clock low, data to input mode */
373 memr = ei_inb(memr_addr);
374 memr &= ~AX_MEMR_MDC;
375 memr |= AX_MEMR_MDIR;
376 ei_outb(memr, memr_addr);
377
378 for (no--; no >= 0; no--) {
379 ei_outb(memr | AX_MEMR_MDC, memr_addr);
380
381 udelay(1);
382
383 if (ei_inb(memr_addr) & AX_MEMR_MDI)
384 result |= (1<<no);
385 341
386 ei_outb(memr, memr_addr); 342 status_change = 1;
387 } 343 }
388 344
389 return result; 345 if (status_change)
390} 346 phy_print_status(phy_dev);
391
392/* ax_phy_issueaddr
393 *
394 * use the low level bit shifting routines to send the address
395 * and command to the specified phy
396*/
397
398static void
399ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
400{
401 if (phy_debug)
402 pr_debug("%s: dev %p, %04x, %04x, %d\n",
403 __func__, dev, phy_addr, reg, opc);
404
405 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
406 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
407 ax_mii_ei_outbits(dev, opc, 2); /* op code */
408 ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */
409 ax_mii_ei_outbits(dev, reg, 5); /* reg address */
410} 347}
411 348
412static int 349static int ax_mii_probe(struct net_device *dev)
413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 350{
415 struct ei_device *ei_local = netdev_priv(dev); 351 struct ax_device *ax = to_ax_dev(dev);
416 unsigned long flags; 352 struct phy_device *phy_dev = NULL;
417 unsigned int result; 353 int ret;
418 354
419 spin_lock_irqsave(&ei_local->page_lock, flags); 355 /* find the first phy */
356 phy_dev = phy_find_first(ax->mii_bus);
357 if (!phy_dev) {
358 netdev_err(dev, "no PHY found\n");
359 return -ENODEV;
360 }
420 361
421 ax_phy_issueaddr(dev, phy_addr, reg, 2); 362 ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
363 PHY_INTERFACE_MODE_MII);
364 if (ret) {
365 netdev_err(dev, "Could not attach to PHY\n");
366 return ret;
367 }
422 368
423 result = ax_phy_ei_inbits(dev, 17); 369 /* mask with MAC supported features */
424 result &= ~(3<<16); 370 phy_dev->supported &= PHY_BASIC_FEATURES;
371 phy_dev->advertising = phy_dev->supported;
425 372
426 spin_unlock_irqrestore(&ei_local->page_lock, flags); 373 ax->phy_dev = phy_dev;
427 374
428 if (phy_debug) 375 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
429 pr_debug("%s: %04x.%04x => read %04x\n", __func__, 376 phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
430 phy_addr, reg, result);
431 377
432 return result; 378 return 0;
433} 379}
434 380
435static void 381static void ax_phy_switch(struct net_device *dev, int on)
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 382{
438 struct ei_device *ei = netdev_priv(dev); 383 struct ei_device *ei_local = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 384 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags;
441
442 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
443 __func__, dev, phy_addr, reg, value);
444
445 spin_lock_irqsave(&ei->page_lock, flags);
446
447 ax_phy_issueaddr(dev, phy_addr, reg, 1);
448 ax_mii_ei_outbits(dev, 2, 2); /* send TA */
449 ax_mii_ei_outbits(dev, value, 16);
450
451 spin_unlock_irqrestore(&ei->page_lock, flags);
452}
453 385
454static void ax_mii_expiry(unsigned long data) 386 u8 reg_gpoc = ax->plat->gpoc_val;
455{
456 struct net_device *dev = (struct net_device *)data;
457 struct ax_device *ax = to_ax_dev(dev);
458 unsigned long flags;
459 387
460 spin_lock_irqsave(&ax->mii_lock, flags); 388 if (!!on)
461 mii_check_media(&ax->mii, netif_msg_link(ax), 0); 389 reg_gpoc &= ~AX_GPOC_PPDSET;
462 spin_unlock_irqrestore(&ax->mii_lock, flags); 390 else
391 reg_gpoc |= AX_GPOC_PPDSET;
463 392
464 if (ax->running) { 393 ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
465 ax->mii_timer.expires = jiffies + HZ*2;
466 add_timer(&ax->mii_timer);
467 }
468} 394}
469 395
470static int ax_open(struct net_device *dev) 396static int ax_open(struct net_device *dev)
471{ 397{
472 struct ax_device *ax = to_ax_dev(dev); 398 struct ax_device *ax = to_ax_dev(dev);
473 struct ei_device *ei_local = netdev_priv(dev);
474 int ret; 399 int ret;
475 400
476 dev_dbg(&ax->dev->dev, "%s: open\n", dev->name); 401 netdev_dbg(dev, "open\n");
477 402
478 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, 403 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
479 dev->name, dev); 404 dev->name, dev);
480 if (ret) 405 if (ret)
481 return ret; 406 goto failed_request_irq;
482
483 ret = ax_ei_open(dev);
484 if (ret) {
485 free_irq(dev->irq, dev);
486 return ret;
487 }
488 407
489 /* turn the phy on (if turned off) */ 408 /* turn the phy on (if turned off) */
409 ax_phy_switch(dev, 1);
490 410
491 ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17)); 411 ret = ax_mii_probe(dev);
492 ax->running = 1; 412 if (ret)
493 413 goto failed_mii_probe;
494 /* start the MII timer */ 414 phy_start(ax->phy_dev);
495
496 init_timer(&ax->mii_timer);
497 415
498 ax->mii_timer.expires = jiffies+1; 416 ret = ax_ei_open(dev);
499 ax->mii_timer.data = (unsigned long) dev; 417 if (ret)
500 ax->mii_timer.function = ax_mii_expiry; 418 goto failed_ax_ei_open;
501 419
502 add_timer(&ax->mii_timer); 420 ax->running = 1;
503 421
504 return 0; 422 return 0;
423
424 failed_ax_ei_open:
425 phy_disconnect(ax->phy_dev);
426 failed_mii_probe:
427 ax_phy_switch(dev, 0);
428 free_irq(dev->irq, dev);
429 failed_request_irq:
430 return ret;
505} 431}
506 432
507static int ax_close(struct net_device *dev) 433static int ax_close(struct net_device *dev)
508{ 434{
509 struct ax_device *ax = to_ax_dev(dev); 435 struct ax_device *ax = to_ax_dev(dev);
510 struct ei_device *ei_local = netdev_priv(dev);
511 436
512 dev_dbg(&ax->dev->dev, "%s: close\n", dev->name); 437 netdev_dbg(dev, "close\n");
513
514 /* turn the phy off */
515
516 ei_outb(ax->plat->gpoc_val | (1<<6),
517 ei_local->mem + EI_SHIFT(0x17));
518 438
519 ax->running = 0; 439 ax->running = 0;
520 wmb(); 440 wmb();
521 441
522 del_timer_sync(&ax->mii_timer);
523 ax_ei_close(dev); 442 ax_ei_close(dev);
524 443
444 /* turn the phy off */
445 ax_phy_switch(dev, 0);
446 phy_disconnect(ax->phy_dev);
447
525 free_irq(dev->irq, dev); 448 free_irq(dev->irq, dev);
526 return 0; 449 return 0;
527} 450}
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
529static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 452static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
530{ 453{
531 struct ax_device *ax = to_ax_dev(dev); 454 struct ax_device *ax = to_ax_dev(dev);
532 unsigned long flags; 455 struct phy_device *phy_dev = ax->phy_dev;
533 int rc;
534 456
535 if (!netif_running(dev)) 457 if (!netif_running(dev))
536 return -EINVAL; 458 return -EINVAL;
537 459
538 spin_lock_irqsave(&ax->mii_lock, flags); 460 if (!phy_dev)
539 rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL); 461 return -ENODEV;
540 spin_unlock_irqrestore(&ax->mii_lock, flags);
541 462
542 return rc; 463 return phy_mii_ioctl(phy_dev, req, cmd);
543} 464}
544 465
545/* ethtool ops */ 466/* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
547static void ax_get_drvinfo(struct net_device *dev, 468static void ax_get_drvinfo(struct net_device *dev,
548 struct ethtool_drvinfo *info) 469 struct ethtool_drvinfo *info)
549{ 470{
550 struct ax_device *ax = to_ax_dev(dev); 471 struct platform_device *pdev = to_platform_device(dev->dev.parent);
551 472
552 strcpy(info->driver, DRV_NAME); 473 strcpy(info->driver, DRV_NAME);
553 strcpy(info->version, DRV_VERSION); 474 strcpy(info->version, DRV_VERSION);
554 strcpy(info->bus_info, ax->dev->name); 475 strcpy(info->bus_info, pdev->name);
555} 476}
556 477
557static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 478static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
558{ 479{
559 struct ax_device *ax = to_ax_dev(dev); 480 struct ax_device *ax = to_ax_dev(dev);
560 unsigned long flags; 481 struct phy_device *phy_dev = ax->phy_dev;
561 482
562 spin_lock_irqsave(&ax->mii_lock, flags); 483 if (!phy_dev)
563 mii_ethtool_gset(&ax->mii, cmd); 484 return -ENODEV;
564 spin_unlock_irqrestore(&ax->mii_lock, flags);
565 485
566 return 0; 486 return phy_ethtool_gset(phy_dev, cmd);
567} 487}
568 488
569static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 489static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570{ 490{
571 struct ax_device *ax = to_ax_dev(dev); 491 struct ax_device *ax = to_ax_dev(dev);
572 unsigned long flags; 492 struct phy_device *phy_dev = ax->phy_dev;
573 int rc;
574 493
575 spin_lock_irqsave(&ax->mii_lock, flags); 494 if (!phy_dev)
576 rc = mii_ethtool_sset(&ax->mii, cmd); 495 return -ENODEV;
577 spin_unlock_irqrestore(&ax->mii_lock, flags);
578
579 return rc;
580}
581
582static int ax_nway_reset(struct net_device *dev)
583{
584 struct ax_device *ax = to_ax_dev(dev);
585 return mii_nway_restart(&ax->mii);
586}
587 496
588static u32 ax_get_link(struct net_device *dev) 497 return phy_ethtool_sset(phy_dev, cmd);
589{
590 struct ax_device *ax = to_ax_dev(dev);
591 return mii_link_ok(&ax->mii);
592} 498}
593 499
594static const struct ethtool_ops ax_ethtool_ops = { 500static const struct ethtool_ops ax_ethtool_ops = {
595 .get_drvinfo = ax_get_drvinfo, 501 .get_drvinfo = ax_get_drvinfo,
596 .get_settings = ax_get_settings, 502 .get_settings = ax_get_settings,
597 .set_settings = ax_set_settings, 503 .set_settings = ax_set_settings,
598 .nway_reset = ax_nway_reset, 504 .get_link = ethtool_op_get_link,
599 .get_link = ax_get_link,
600}; 505};
601 506
602#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
640 .ndo_get_stats = ax_ei_get_stats, 545 .ndo_get_stats = ax_ei_get_stats,
641 .ndo_set_multicast_list = ax_ei_set_multicast_list, 546 .ndo_set_multicast_list = ax_ei_set_multicast_list,
642 .ndo_validate_addr = eth_validate_addr, 547 .ndo_validate_addr = eth_validate_addr,
643 .ndo_set_mac_address = eth_mac_addr, 548 .ndo_set_mac_address = eth_mac_addr,
644 .ndo_change_mtu = eth_change_mtu, 549 .ndo_change_mtu = eth_change_mtu,
645#ifdef CONFIG_NET_POLL_CONTROLLER 550#ifdef CONFIG_NET_POLL_CONTROLLER
646 .ndo_poll_controller = ax_ei_poll, 551 .ndo_poll_controller = ax_ei_poll,
647#endif 552#endif
648}; 553};
649 554
555static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
556{
557 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
558
559 if (level)
560 ax->reg_memr |= AX_MEMR_MDC;
561 else
562 ax->reg_memr &= ~AX_MEMR_MDC;
563
564 ei_outb(ax->reg_memr, ax->addr_memr);
565}
566
567static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
568{
569 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
570
571 if (output)
572 ax->reg_memr &= ~AX_MEMR_MDIR;
573 else
574 ax->reg_memr |= AX_MEMR_MDIR;
575
576 ei_outb(ax->reg_memr, ax->addr_memr);
577}
578
579static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
580{
581 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
582
583 if (value)
584 ax->reg_memr |= AX_MEMR_MDO;
585 else
586 ax->reg_memr &= ~AX_MEMR_MDO;
587
588 ei_outb(ax->reg_memr, ax->addr_memr);
589}
590
591static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
592{
593 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
594 int reg_memr = ei_inb(ax->addr_memr);
595
596 return reg_memr & AX_MEMR_MDI ? 1 : 0;
597}
598
599static struct mdiobb_ops bb_ops = {
600 .owner = THIS_MODULE,
601 .set_mdc = ax_bb_mdc,
602 .set_mdio_dir = ax_bb_dir,
603 .set_mdio_data = ax_bb_set_data,
604 .get_mdio_data = ax_bb_get_data,
605};
606
650/* setup code */ 607/* setup code */
651 608
609static int ax_mii_init(struct net_device *dev)
610{
611 struct platform_device *pdev = to_platform_device(dev->dev.parent);
612 struct ei_device *ei_local = netdev_priv(dev);
613 struct ax_device *ax = to_ax_dev(dev);
614 int err, i;
615
616 ax->bb_ctrl.ops = &bb_ops;
617 ax->addr_memr = ei_local->mem + AX_MEMR;
618 ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
619 if (!ax->mii_bus) {
620 err = -ENOMEM;
621 goto out;
622 }
623
624 ax->mii_bus->name = "ax88796_mii_bus";
625 ax->mii_bus->parent = dev->dev.parent;
626 snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
627
628 ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
629 if (!ax->mii_bus->irq) {
630 err = -ENOMEM;
631 goto out_free_mdio_bitbang;
632 }
633
634 for (i = 0; i < PHY_MAX_ADDR; i++)
635 ax->mii_bus->irq[i] = PHY_POLL;
636
637 err = mdiobus_register(ax->mii_bus);
638 if (err)
639 goto out_free_irq;
640
641 return 0;
642
643 out_free_irq:
644 kfree(ax->mii_bus->irq);
645 out_free_mdio_bitbang:
646 free_mdio_bitbang(ax->mii_bus);
647 out:
648 return err;
649}
650
652static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) 651static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
653{ 652{
654 void __iomem *ioaddr = ei_local->mem; 653 void __iomem *ioaddr = ei_local->mem;
655 struct ax_device *ax = to_ax_dev(dev); 654 struct ax_device *ax = to_ax_dev(dev);
656 655
657 /* Select page 0*/ 656 /* Select page 0 */
658 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD); 657 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
659 658
660 /* set to byte access */ 659 /* set to byte access */
661 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); 660 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
662 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); 661 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
663} 662}
664 663
665/* ax_init_dev 664/*
665 * ax_init_dev
666 * 666 *
667 * initialise the specified device, taking care to note the MAC 667 * initialise the specified device, taking care to note the MAC
668 * address it may already have (if configured), ensure 668 * address it may already have (if configured), ensure
669 * the device is ready to be used by lib8390.c and registerd with 669 * the device is ready to be used by lib8390.c and registerd with
670 * the network layer. 670 * the network layer.
671 */ 671 */
672 672static int ax_init_dev(struct net_device *dev)
673static int ax_init_dev(struct net_device *dev, int first_init)
674{ 673{
675 struct ei_device *ei_local = netdev_priv(dev); 674 struct ei_device *ei_local = netdev_priv(dev);
676 struct ax_device *ax = to_ax_dev(dev); 675 struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
690 689
691 /* read the mac from the card prom if we need it */ 690 /* read the mac from the card prom if we need it */
692 691
693 if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) { 692 if (ax->plat->flags & AXFLG_HAS_EEPROM) {
694 unsigned char SA_prom[32]; 693 unsigned char SA_prom[32];
695 694
696 for(i = 0; i < sizeof(SA_prom); i+=2) { 695 for (i = 0; i < sizeof(SA_prom); i += 2) {
697 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); 696 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
698 SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT); 697 SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
699 } 698 }
700 699
701 if (ax->plat->wordlength == 2) 700 if (ax->plat->wordlength == 2)
702 for (i = 0; i < 16; i++) 701 for (i = 0; i < 16; i++)
703 SA_prom[i] = SA_prom[i+i]; 702 SA_prom[i] = SA_prom[i+i];
704 703
705 memcpy(dev->dev_addr, SA_prom, 6); 704 memcpy(dev->dev_addr, SA_prom, 6);
706 } 705 }
707 706
708#ifdef CONFIG_AX88796_93CX6 707#ifdef CONFIG_AX88796_93CX6
709 if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { 708 if (ax->plat->flags & AXFLG_HAS_93CX6) {
710 unsigned char mac_addr[6]; 709 unsigned char mac_addr[6];
711 struct eeprom_93cx6 eeprom; 710 struct eeprom_93cx6 eeprom;
712 711
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
719 (__le16 __force *)mac_addr, 718 (__le16 __force *)mac_addr,
720 sizeof(mac_addr) >> 1); 719 sizeof(mac_addr) >> 1);
721 720
722 memcpy(dev->dev_addr, mac_addr, 6); 721 memcpy(dev->dev_addr, mac_addr, 6);
723 } 722 }
724#endif 723#endif
725 if (ax->plat->wordlength == 2) { 724 if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
732 stop_page = NE1SM_STOP_PG; 731 stop_page = NE1SM_STOP_PG;
733 } 732 }
734 733
735 /* load the mac-address from the device if this is the 734 /* load the mac-address from the device */
736 * first time we've initialised */ 735 if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
737 736 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
738 if (first_init) { 737 ei_local->mem + E8390_CMD); /* 0x61 */
739 if (ax->plat->flags & AXFLG_MAC_FROMDEV) { 738 for (i = 0; i < ETHER_ADDR_LEN; i++)
740 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, 739 dev->dev_addr[i] =
741 ei_local->mem + E8390_CMD); /* 0x61 */ 740 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
742 for (i = 0; i < ETHER_ADDR_LEN; i++)
743 dev->dev_addr[i] =
744 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
745 }
746
747 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
748 ax->plat->mac_addr)
749 memcpy(dev->dev_addr, ax->plat->mac_addr,
750 ETHER_ADDR_LEN);
751 } 741 }
752 742
743 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
744 ax->plat->mac_addr)
745 memcpy(dev->dev_addr, ax->plat->mac_addr,
746 ETHER_ADDR_LEN);
747
753 ax_reset_8390(dev); 748 ax_reset_8390(dev);
754 749
755 ei_status.name = "AX88796"; 750 ei_local->name = "AX88796";
756 ei_status.tx_start_page = start_page; 751 ei_local->tx_start_page = start_page;
757 ei_status.stop_page = stop_page; 752 ei_local->stop_page = stop_page;
758 ei_status.word16 = (ax->plat->wordlength == 2); 753 ei_local->word16 = (ax->plat->wordlength == 2);
759 ei_status.rx_start_page = start_page + TX_PAGES; 754 ei_local->rx_start_page = start_page + TX_PAGES;
760 755
761#ifdef PACKETBUF_MEMSIZE 756#ifdef PACKETBUF_MEMSIZE
762 /* Allow the packet buffer size to be overridden by know-it-alls. */ 757 /* Allow the packet buffer size to be overridden by know-it-alls. */
763 ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; 758 ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
764#endif 759#endif
765 760
766 ei_status.reset_8390 = &ax_reset_8390; 761 ei_local->reset_8390 = &ax_reset_8390;
767 ei_status.block_input = &ax_block_input; 762 ei_local->block_input = &ax_block_input;
768 ei_status.block_output = &ax_block_output; 763 ei_local->block_output = &ax_block_output;
769 ei_status.get_8390_hdr = &ax_get_8390_hdr; 764 ei_local->get_8390_hdr = &ax_get_8390_hdr;
770 ei_status.priv = 0; 765 ei_local->priv = 0;
771
772 dev->netdev_ops = &ax_netdev_ops;
773 dev->ethtool_ops = &ax_ethtool_ops;
774
775 ax->msg_enable = NETIF_MSG_LINK;
776 ax->mii.phy_id_mask = 0x1f;
777 ax->mii.reg_num_mask = 0x1f;
778 ax->mii.phy_id = 0x10; /* onboard phy */
779 ax->mii.force_media = 0;
780 ax->mii.full_duplex = 0;
781 ax->mii.mdio_read = ax_phy_read;
782 ax->mii.mdio_write = ax_phy_write;
783 ax->mii.dev = dev;
784 766
785 ax_NS8390_init(dev, 0); 767 dev->netdev_ops = &ax_netdev_ops;
768 dev->ethtool_ops = &ax_ethtool_ops;
786 769
787 if (first_init) 770 ret = ax_mii_init(dev);
788 dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n", 771 if (ret)
789 ei_status.word16 ? 16:8, dev->irq, dev->base_addr, 772 goto out_irq;
790 dev->dev_addr); 773
774 ax_NS8390_init(dev, 0);
791 775
792 ret = register_netdev(dev); 776 ret = register_netdev(dev);
793 if (ret) 777 if (ret)
794 goto out_irq; 778 goto out_irq;
795 779
780 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
781 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
782 dev->dev_addr);
783
796 return 0; 784 return 0;
797 785
798 out_irq: 786 out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
802 return ret; 790 return ret;
803} 791}
804 792
805static int ax_remove(struct platform_device *_dev) 793static int ax_remove(struct platform_device *pdev)
806{ 794{
807 struct net_device *dev = platform_get_drvdata(_dev); 795 struct net_device *dev = platform_get_drvdata(pdev);
808 struct ax_device *ax; 796 struct ei_device *ei_local = netdev_priv(dev);
809 797 struct ax_device *ax = to_ax_dev(dev);
810 ax = to_ax_dev(dev); 798 struct resource *mem;
811 799
812 unregister_netdev(dev); 800 unregister_netdev(dev);
813 free_irq(dev->irq, dev); 801 free_irq(dev->irq, dev);
814 802
815 iounmap(ei_status.mem); 803 iounmap(ei_local->mem);
816 release_resource(ax->mem); 804 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 kfree(ax->mem); 805 release_mem_region(mem->start, resource_size(mem));
818 806
819 if (ax->map2) { 807 if (ax->map2) {
820 iounmap(ax->map2); 808 iounmap(ax->map2);
821 release_resource(ax->mem2); 809 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
822 kfree(ax->mem2); 810 release_mem_region(mem->start, resource_size(mem));
823 } 811 }
824 812
825 free_netdev(dev); 813 free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
827 return 0; 815 return 0;
828} 816}
829 817
830/* ax_probe 818/*
819 * ax_probe
831 * 820 *
832 * This is the entry point when the platform device system uses to 821 * This is the entry point when the platform device system uses to
833 * notify us of a new device to attach to. Allocate memory, find 822 * notify us of a new device to attach to. Allocate memory, find the
834 * the resources and information passed, and map the necessary registers. 823 * resources and information passed, and map the necessary registers.
835*/ 824 */
836
837static int ax_probe(struct platform_device *pdev) 825static int ax_probe(struct platform_device *pdev)
838{ 826{
839 struct net_device *dev; 827 struct net_device *dev;
840 struct ax_device *ax; 828 struct ei_device *ei_local;
841 struct resource *res; 829 struct ax_device *ax;
842 size_t size; 830 struct resource *irq, *mem, *mem2;
831 resource_size_t mem_size, mem2_size = 0;
843 int ret = 0; 832 int ret = 0;
844 833
845 dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); 834 dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
847 return -ENOMEM; 836 return -ENOMEM;
848 837
849 /* ok, let's setup our device */ 838 /* ok, let's setup our device */
839 SET_NETDEV_DEV(dev, &pdev->dev);
840 ei_local = netdev_priv(dev);
850 ax = to_ax_dev(dev); 841 ax = to_ax_dev(dev);
851 842
852 memset(ax, 0, sizeof(struct ax_device));
853
854 spin_lock_init(&ax->mii_lock);
855
856 ax->dev = pdev;
857 ax->plat = pdev->dev.platform_data; 843 ax->plat = pdev->dev.platform_data;
858 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
859 845
860 ei_status.rxcr_base = ax->plat->rcr_val; 846 ei_local->rxcr_base = ax->plat->rcr_val;
861 847
862 /* find the platform resources */ 848 /* find the platform resources */
863 849 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
864 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 850 if (!irq) {
865 if (res == NULL) {
866 dev_err(&pdev->dev, "no IRQ specified\n"); 851 dev_err(&pdev->dev, "no IRQ specified\n");
867 ret = -ENXIO; 852 ret = -ENXIO;
868 goto exit_mem; 853 goto exit_mem;
869 } 854 }
870 855
871 dev->irq = res->start; 856 dev->irq = irq->start;
872 ax->irqflags = res->flags & IRQF_TRIGGER_MASK; 857 ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
873 858
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 859 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
875 if (res == NULL) { 860 if (!mem) {
876 dev_err(&pdev->dev, "no MEM specified\n"); 861 dev_err(&pdev->dev, "no MEM specified\n");
877 ret = -ENXIO; 862 ret = -ENXIO;
878 goto exit_mem; 863 goto exit_mem;
879 } 864 }
880 865
881 size = (res->end - res->start) + 1; 866 mem_size = resource_size(mem);
882
883 /* setup the register offsets from either the platform data
884 * or by using the size of the resource provided */
885 867
868 /*
869 * setup the register offsets from either the platform data or
870 * by using the size of the resource provided
871 */
886 if (ax->plat->reg_offsets) 872 if (ax->plat->reg_offsets)
887 ei_status.reg_offset = ax->plat->reg_offsets; 873 ei_local->reg_offset = ax->plat->reg_offsets;
888 else { 874 else {
889 ei_status.reg_offset = ax->reg_offsets; 875 ei_local->reg_offset = ax->reg_offsets;
890 for (ret = 0; ret < 0x18; ret++) 876 for (ret = 0; ret < 0x18; ret++)
891 ax->reg_offsets[ret] = (size / 0x18) * ret; 877 ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
892 } 878 }
893 879
894 ax->mem = request_mem_region(res->start, size, pdev->name); 880 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
895 if (ax->mem == NULL) {
896 dev_err(&pdev->dev, "cannot reserve registers\n"); 881 dev_err(&pdev->dev, "cannot reserve registers\n");
897 ret = -ENXIO; 882 ret = -ENXIO;
898 goto exit_mem; 883 goto exit_mem;
899 } 884 }
900 885
901 ei_status.mem = ioremap(res->start, size); 886 ei_local->mem = ioremap(mem->start, mem_size);
902 dev->base_addr = (unsigned long)ei_status.mem; 887 dev->base_addr = (unsigned long)ei_local->mem;
903 888
904 if (ei_status.mem == NULL) { 889 if (ei_local->mem == NULL) {
905 dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", 890 dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
906 (unsigned long long)res->start,
907 (unsigned long long)res->end);
908 891
909 ret = -ENXIO; 892 ret = -ENXIO;
910 goto exit_req; 893 goto exit_req;
911 } 894 }
912 895
913 /* look for reset area */ 896 /* look for reset area */
914 897 mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
915 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 898 if (!mem2) {
916 if (res == NULL) {
917 if (!ax->plat->reg_offsets) { 899 if (!ax->plat->reg_offsets) {
918 for (ret = 0; ret < 0x20; ret++) 900 for (ret = 0; ret < 0x20; ret++)
919 ax->reg_offsets[ret] = (size / 0x20) * ret; 901 ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
920 } 902 }
921
922 ax->map2 = NULL;
923 } else { 903 } else {
924 size = (res->end - res->start) + 1; 904 mem2_size = resource_size(mem2);
925 905
926 ax->mem2 = request_mem_region(res->start, size, pdev->name); 906 if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
927 if (ax->mem2 == NULL) {
928 dev_err(&pdev->dev, "cannot reserve registers\n"); 907 dev_err(&pdev->dev, "cannot reserve registers\n");
929 ret = -ENXIO; 908 ret = -ENXIO;
930 goto exit_mem1; 909 goto exit_mem1;
931 } 910 }
932 911
933 ax->map2 = ioremap(res->start, size); 912 ax->map2 = ioremap(mem2->start, mem2_size);
934 if (ax->map2 == NULL) { 913 if (!ax->map2) {
935 dev_err(&pdev->dev, "cannot map reset register\n"); 914 dev_err(&pdev->dev, "cannot map reset register\n");
936 ret = -ENXIO; 915 ret = -ENXIO;
937 goto exit_mem2; 916 goto exit_mem2;
938 } 917 }
939 918
940 ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem; 919 ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
941 } 920 }
942 921
943 /* got resources, now initialise and register device */ 922 /* got resources, now initialise and register device */
944 923 ret = ax_init_dev(dev);
945 ret = ax_init_dev(dev, 1);
946 if (!ret) 924 if (!ret)
947 return 0; 925 return 0;
948 926
949 if (ax->map2 == NULL) 927 if (!ax->map2)
950 goto exit_mem1; 928 goto exit_mem1;
951 929
952 iounmap(ax->map2); 930 iounmap(ax->map2);
953 931
954 exit_mem2: 932 exit_mem2:
955 release_resource(ax->mem2); 933 release_mem_region(mem2->start, mem2_size);
956 kfree(ax->mem2);
957 934
958 exit_mem1: 935 exit_mem1:
959 iounmap(ei_status.mem); 936 iounmap(ei_local->mem);
960 937
961 exit_req: 938 exit_req:
962 release_resource(ax->mem); 939 release_mem_region(mem->start, mem_size);
963 kfree(ax->mem);
964 940
965 exit_mem: 941 exit_mem:
966 free_netdev(dev); 942 free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
974static int ax_suspend(struct platform_device *dev, pm_message_t state) 950static int ax_suspend(struct platform_device *dev, pm_message_t state)
975{ 951{
976 struct net_device *ndev = platform_get_drvdata(dev); 952 struct net_device *ndev = platform_get_drvdata(dev);
977 struct ax_device *ax = to_ax_dev(ndev); 953 struct ax_device *ax = to_ax_dev(ndev);
978 954
979 ax->resume_open = ax->running; 955 ax->resume_open = ax->running;
980 956
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
987static int ax_resume(struct platform_device *pdev) 963static int ax_resume(struct platform_device *pdev)
988{ 964{
989 struct net_device *ndev = platform_get_drvdata(pdev); 965 struct net_device *ndev = platform_get_drvdata(pdev);
990 struct ax_device *ax = to_ax_dev(ndev); 966 struct ax_device *ax = to_ax_dev(ndev);
991 967
992 ax_initial_setup(ndev, netdev_priv(ndev)); 968 ax_initial_setup(ndev, netdev_priv(ndev));
993 ax_NS8390_init(ndev, ax->resume_open); 969 ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
1001 977
1002#else 978#else
1003#define ax_suspend NULL 979#define ax_suspend NULL
1004#define ax_resume NULL 980#define ax_resume NULL
1005#endif 981#endif
1006 982
1007static struct platform_driver axdrv = { 983static struct platform_driver axdrv = {
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93350dd..ed709a5d07d7 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -225,6 +225,10 @@ struct be_rx_obj {
225 u32 cache_line_barrier[15]; 225 u32 cache_line_barrier[15];
226}; 226};
227 227
228struct be_drv_stats {
229 u8 be_on_die_temperature;
230};
231
228struct be_vf_cfg { 232struct be_vf_cfg {
229 unsigned char vf_mac_addr[ETH_ALEN]; 233 unsigned char vf_mac_addr[ETH_ALEN];
230 u32 vf_if_handle; 234 u32 vf_if_handle;
@@ -234,6 +238,7 @@ struct be_vf_cfg {
234}; 238};
235 239
236#define BE_INVALID_PMAC_ID 0xffffffff 240#define BE_INVALID_PMAC_ID 0xffffffff
241
237struct be_adapter { 242struct be_adapter {
238 struct pci_dev *pdev; 243 struct pci_dev *pdev;
239 struct net_device *netdev; 244 struct net_device *netdev;
@@ -269,6 +274,7 @@ struct be_adapter {
269 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 274 u32 big_page_size; /* Compounded page size shared by rx wrbs */
270 275
271 u8 msix_vec_next_idx; 276 u8 msix_vec_next_idx;
277 struct be_drv_stats drv_stats;
272 278
273 struct vlan_group *vlan_grp; 279 struct vlan_group *vlan_grp;
274 u16 vlans_added; 280 u16 vlans_added;
@@ -281,6 +287,7 @@ struct be_adapter {
281 struct be_dma_mem stats_cmd; 287 struct be_dma_mem stats_cmd;
282 /* Work queue used to perform periodic tasks like getting statistics */ 288 /* Work queue used to perform periodic tasks like getting statistics */
283 struct delayed_work work; 289 struct delayed_work work;
290 u16 work_counter;
284 291
285 /* Ethtool knobs and info */ 292 /* Ethtool knobs and info */
286 bool rx_csum; /* BE card must perform rx-checksumming */ 293 bool rx_csum; /* BE card must perform rx-checksumming */
@@ -298,7 +305,7 @@ struct be_adapter {
298 u32 rx_fc; /* Rx flow control */ 305 u32 rx_fc; /* Rx flow control */
299 u32 tx_fc; /* Tx flow control */ 306 u32 tx_fc; /* Tx flow control */
300 bool ue_detected; 307 bool ue_detected;
301 bool stats_ioctl_sent; 308 bool stats_cmd_sent;
302 int link_speed; 309 int link_speed;
303 u8 port_type; 310 u8 port_type;
304 u8 transceiver; 311 u8 transceiver;
@@ -311,6 +318,8 @@ struct be_adapter {
311 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 318 struct be_vf_cfg vf_cfg[BE_MAX_VF];
312 u8 is_virtfn; 319 u8 is_virtfn;
313 u32 sli_family; 320 u32 sli_family;
321 u8 hba_port_num;
322 u16 pvid;
314}; 323};
315 324
316#define be_physfn(adapter) (!adapter->is_virtfn) 325#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +459,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
450 mac[5] = (u8)(addr & 0xFF); 459 mac[5] = (u8)(addr & 0xFF);
451 mac[4] = (u8)((addr >> 8) & 0xFF); 460 mac[4] = (u8)((addr >> 8) & 0xFF);
452 mac[3] = (u8)((addr >> 16) & 0xFF); 461 mac[3] = (u8)((addr >> 16) & 0xFF);
453 mac[2] = 0xC9; 462 /* Use the OUI from the current MAC address */
454 mac[1] = 0x00; 463 memcpy(mac, adapter->netdev->dev_addr, 3);
455 mac[0] = 0x00;
456} 464}
457 465
458extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 466extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72c..1822ecdadc7e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -18,11 +18,20 @@
18#include "be.h" 18#include "be.h"
19#include "be_cmds.h" 19#include "be_cmds.h"
20 20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
21static void be_mcc_notify(struct be_adapter *adapter) 24static void be_mcc_notify(struct be_adapter *adapter)
22{ 25{
23 struct be_queue_info *mccq = &adapter->mcc_obj.q; 26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 u32 val = 0; 27 u32 val = 0;
25 28
29 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
26 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28 37
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
75 be_dws_le_to_cpu(&resp->hw_stats, 84 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats)); 85 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter); 86 netdev_stats_update(adapter);
78 adapter->stats_ioctl_sent = false; 87 adapter->stats_cmd_sent = false;
79 } 88 }
80 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && 89 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { 90 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
102{ 111{
103 if (evt->valid) { 112 if (evt->valid) {
104 adapter->vlan_prio_bmap = evt->available_priority_bmap; 113 adapter->vlan_prio_bmap = evt->available_priority_bmap;
114 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
105 adapter->recommended_prio = 115 adapter->recommended_prio =
106 evt->reco_default_priority << VLAN_PRIO_SHIFT; 116 evt->reco_default_priority << VLAN_PRIO_SHIFT;
107 } 117 }
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
117 } 127 }
118} 128}
119 129
130/*Grp5 PVID evt*/
131static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
132 struct be_async_event_grp5_pvid_state *evt)
133{
134 if (evt->enabled)
135 adapter->pvid = evt->tag;
136 else
137 adapter->pvid = 0;
138}
139
120static void be_async_grp5_evt_process(struct be_adapter *adapter, 140static void be_async_grp5_evt_process(struct be_adapter *adapter,
121 u32 trailer, struct be_mcc_compl *evt) 141 u32 trailer, struct be_mcc_compl *evt)
122{ 142{
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
134 be_async_grp5_qos_speed_process(adapter, 154 be_async_grp5_qos_speed_process(adapter,
135 (struct be_async_event_grp5_qos_link_speed *)evt); 155 (struct be_async_event_grp5_qos_link_speed *)evt);
136 break; 156 break;
157 case ASYNC_EVENT_PVID_STATE:
158 be_async_grp5_pvid_state_process(adapter,
159 (struct be_async_event_grp5_pvid_state *)evt);
160 break;
137 default: 161 default:
138 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 162 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139 break; 163 break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
216 int i, num, status = 0; 240 int i, num, status = 0;
217 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 241 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
218 242
243 if (adapter->eeh_err)
244 return -EIO;
245
219 for (i = 0; i < mcc_timeout; i++) { 246 for (i = 0; i < mcc_timeout; i++) {
220 num = be_process_mcc(adapter, &status); 247 num = be_process_mcc(adapter, &status);
221 if (num) 248 if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
245 int msecs = 0; 272 int msecs = 0;
246 u32 ready; 273 u32 ready;
247 274
275 if (adapter->eeh_err) {
276 dev_err(&adapter->pdev->dev,
277 "Error detected in card.Cannot issue commands\n");
278 return -EIO;
279 }
280
248 do { 281 do {
249 ready = ioread32(db); 282 ready = ioread32(db);
250 if (ready == 0xffffffff) { 283 if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
598 631
599/* Uses synchronous MCCQ */ 632/* Uses synchronous MCCQ */
600int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 633int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
601 u32 if_id, u32 *pmac_id) 634 u32 if_id, u32 *pmac_id, u32 domain)
602{ 635{
603 struct be_mcc_wrb *wrb; 636 struct be_mcc_wrb *wrb;
604 struct be_cmd_req_pmac_add *req; 637 struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
619 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
620 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); 653 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
621 654
655 req->hdr.domain = domain;
622 req->if_id = cpu_to_le32(if_id); 656 req->if_id = cpu_to_le32(if_id);
623 memcpy(req->mac_address, mac_addr, ETH_ALEN); 657 memcpy(req->mac_address, mac_addr, ETH_ALEN);
624 658
@@ -634,7 +668,7 @@ err:
634} 668}
635 669
636/* Uses synchronous MCCQ */ 670/* Uses synchronous MCCQ */
637int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) 671int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
638{ 672{
639 struct be_mcc_wrb *wrb; 673 struct be_mcc_wrb *wrb;
640 struct be_cmd_req_pmac_del *req; 674 struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
655 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 689 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
656 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); 690 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
657 691
692 req->hdr.domain = dom;
658 req->if_id = cpu_to_le32(if_id); 693 req->if_id = cpu_to_le32(if_id);
659 req->pmac_id = cpu_to_le32(pmac_id); 694 req->pmac_id = cpu_to_le32(pmac_id);
660 695
@@ -995,7 +1030,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
995} 1030}
996 1031
997/* Uses mbox */ 1032/* Uses mbox */
998int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) 1033int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
999{ 1034{
1000 struct be_mcc_wrb *wrb; 1035 struct be_mcc_wrb *wrb;
1001 struct be_cmd_req_if_destroy *req; 1036 struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1051,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
1016 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1051 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); 1052 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1018 1053
1054 req->hdr.domain = domain;
1019 req->interface_id = cpu_to_le32(interface_id); 1055 req->interface_id = cpu_to_le32(interface_id);
1020 1056
1021 status = be_mbox_notify_wait(adapter); 1057 status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1072,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1036 struct be_sge *sge; 1072 struct be_sge *sge;
1037 int status = 0; 1073 int status = 0;
1038 1074
1075 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1076 be_cmd_get_die_temperature(adapter);
1077
1039 spin_lock_bh(&adapter->mcc_lock); 1078 spin_lock_bh(&adapter->mcc_lock);
1040 1079
1041 wrb = wrb_from_mccq(adapter); 1080 wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1095,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1056 sge->len = cpu_to_le32(nonemb_cmd->size); 1095 sge->len = cpu_to_le32(nonemb_cmd->size);
1057 1096
1058 be_mcc_notify(adapter); 1097 be_mcc_notify(adapter);
1059 adapter->stats_ioctl_sent = true; 1098 adapter->stats_cmd_sent = true;
1060 1099
1061err: 1100err:
1062 spin_unlock_bh(&adapter->mcc_lock); 1101 spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1142,44 @@ err:
1103 return status; 1142 return status;
1104} 1143}
1105 1144
1145/* Uses synchronous mcc */
1146int be_cmd_get_die_temperature(struct be_adapter *adapter)
1147{
1148 struct be_mcc_wrb *wrb;
1149 struct be_cmd_req_get_cntl_addnl_attribs *req;
1150 int status;
1151
1152 spin_lock_bh(&adapter->mcc_lock);
1153
1154 wrb = wrb_from_mccq(adapter);
1155 if (!wrb) {
1156 status = -EBUSY;
1157 goto err;
1158 }
1159 req = embedded_payload(wrb);
1160
1161 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1162 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1163
1164 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1165 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1166
1167 status = be_mcc_notify_wait(adapter);
1168 if (!status) {
1169 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1170 embedded_payload(wrb);
1171 adapter->drv_stats.be_on_die_temperature =
1172 resp->on_die_temperature;
1173 }
1174 /* If IOCTL fails once, do not bother issuing it again */
1175 else
1176 be_get_temp_freq = 0;
1177
1178err:
1179 spin_unlock_bh(&adapter->mcc_lock);
1180 return status;
1181}
1182
1106/* Uses Mbox */ 1183/* Uses Mbox */
1107int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1184int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1108{ 1185{
@@ -1786,6 +1863,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1786 spin_lock_bh(&adapter->mcc_lock); 1863 spin_lock_bh(&adapter->mcc_lock);
1787 1864
1788 wrb = wrb_from_mccq(adapter); 1865 wrb = wrb_from_mccq(adapter);
1866 if (!wrb) {
1867 status = -EBUSY;
1868 goto err;
1869 }
1789 req = nonemb_cmd->va; 1870 req = nonemb_cmd->va;
1790 sge = nonembedded_sgl(wrb); 1871 sge = nonembedded_sgl(wrb);
1791 1872
@@ -1801,6 +1882,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1801 1882
1802 status = be_mcc_notify_wait(adapter); 1883 status = be_mcc_notify_wait(adapter);
1803 1884
1885err:
1804 spin_unlock_bh(&adapter->mcc_lock); 1886 spin_unlock_bh(&adapter->mcc_lock);
1805 return status; 1887 return status;
1806} 1888}
@@ -1863,8 +1945,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1863 OPCODE_COMMON_SET_QOS, sizeof(*req)); 1945 OPCODE_COMMON_SET_QOS, sizeof(*req));
1864 1946
1865 req->hdr.domain = domain; 1947 req->hdr.domain = domain;
1866 req->valid_bits = BE_QOS_BITS_NIC; 1948 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
1867 req->max_bps_nic = bps; 1949 req->max_bps_nic = cpu_to_le32(bps);
1868 1950
1869 status = be_mcc_notify_wait(adapter); 1951 status = be_mcc_notify_wait(adapter);
1870 1952
@@ -1872,3 +1954,57 @@ err:
1872 spin_unlock_bh(&adapter->mcc_lock); 1954 spin_unlock_bh(&adapter->mcc_lock);
1873 return status; 1955 return status;
1874} 1956}
1957
1958int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1959{
1960 struct be_mcc_wrb *wrb;
1961 struct be_cmd_req_cntl_attribs *req;
1962 struct be_cmd_resp_cntl_attribs *resp;
1963 struct be_sge *sge;
1964 int status;
1965 int payload_len = max(sizeof(*req), sizeof(*resp));
1966 struct mgmt_controller_attrib *attribs;
1967 struct be_dma_mem attribs_cmd;
1968
1969 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1970 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1971 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1972 &attribs_cmd.dma);
1973 if (!attribs_cmd.va) {
1974 dev_err(&adapter->pdev->dev,
1975 "Memory allocation failure\n");
1976 return -ENOMEM;
1977 }
1978
1979 if (mutex_lock_interruptible(&adapter->mbox_lock))
1980 return -1;
1981
1982 wrb = wrb_from_mbox(adapter);
1983 if (!wrb) {
1984 status = -EBUSY;
1985 goto err;
1986 }
1987 req = attribs_cmd.va;
1988 sge = nonembedded_sgl(wrb);
1989
1990 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1991 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
1992 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1993 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
1994 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
1995 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
1996 sge->len = cpu_to_le32(attribs_cmd.size);
1997
1998 status = be_mbox_notify_wait(adapter);
1999 if (!status) {
2000 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2001 sizeof(struct be_cmd_resp_hdr));
2002 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2003 }
2004
2005err:
2006 mutex_unlock(&adapter->mbox_lock);
2007 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2008 attribs_cmd.dma);
2009 return status;
2010}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8a9fa3..93e5768fc705 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -88,6 +88,7 @@ struct be_mcc_compl {
88#define ASYNC_EVENT_CODE_GRP_5 0x5 88#define ASYNC_EVENT_CODE_GRP_5 0x5
89#define ASYNC_EVENT_QOS_SPEED 0x1 89#define ASYNC_EVENT_QOS_SPEED 0x1
90#define ASYNC_EVENT_COS_PRIORITY 0x2 90#define ASYNC_EVENT_COS_PRIORITY 0x2
91#define ASYNC_EVENT_PVID_STATE 0x3
91struct be_async_event_trailer { 92struct be_async_event_trailer {
92 u32 code; 93 u32 code;
93}; 94};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
134 struct be_async_event_trailer trailer; 135 struct be_async_event_trailer trailer;
135} __packed; 136} __packed;
136 137
138/* When the event code of an async trailer is GRP5 and event type is
139 * PVID state, the mcc_compl must be interpreted as follows
140 */
141struct be_async_event_grp5_pvid_state {
142 u8 enabled;
143 u8 rsvd0;
144 u16 tag;
145 u32 event_tag;
146 u32 rsvd1;
147 struct be_async_event_trailer trailer;
148} __packed;
149
137struct be_mcc_mailbox { 150struct be_mcc_mailbox {
138 struct be_mcc_wrb wrb; 151 struct be_mcc_wrb wrb;
139 struct be_mcc_compl compl; 152 struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
156#define OPCODE_COMMON_SET_QOS 28 169#define OPCODE_COMMON_SET_QOS 28
157#define OPCODE_COMMON_MCC_CREATE_EXT 90 170#define OPCODE_COMMON_MCC_CREATE_EXT 90
158#define OPCODE_COMMON_SEEPROM_READ 30 171#define OPCODE_COMMON_SEEPROM_READ 30
172#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
159#define OPCODE_COMMON_NTWK_RX_FILTER 34 173#define OPCODE_COMMON_NTWK_RX_FILTER 34
160#define OPCODE_COMMON_GET_FW_VERSION 35 174#define OPCODE_COMMON_GET_FW_VERSION 35
161#define OPCODE_COMMON_SET_FLOW_CONTROL 36 175#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,7 @@ struct be_mcc_mailbox {
176#define OPCODE_COMMON_GET_BEACON_STATE 70 190#define OPCODE_COMMON_GET_BEACON_STATE 70
177#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
178#define OPCODE_COMMON_GET_PHY_DETAILS 102 192#define OPCODE_COMMON_GET_PHY_DETAILS 102
193#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
179 194
180#define OPCODE_ETH_RSS_CONFIG 1 195#define OPCODE_ETH_RSS_CONFIG 1
181#define OPCODE_ETH_ACPI_CONFIG 2 196#define OPCODE_ETH_ACPI_CONFIG 2
@@ -619,7 +634,10 @@ struct be_rxf_stats {
619 u32 rx_drops_invalid_ring; /* dword 145*/ 634 u32 rx_drops_invalid_ring; /* dword 145*/
620 u32 forwarded_packets; /* dword 146*/ 635 u32 forwarded_packets; /* dword 146*/
621 u32 rx_drops_mtu; /* dword 147*/ 636 u32 rx_drops_mtu; /* dword 147*/
622 u32 rsvd0[15]; 637 u32 rsvd0[7];
638 u32 port0_jabber_events;
639 u32 port1_jabber_events;
640 u32 rsvd1[6];
623}; 641};
624 642
625struct be_erx_stats { 643struct be_erx_stats {
@@ -630,11 +648,16 @@ struct be_erx_stats {
630 u32 debug_pmem_pbuf_dealloc; /* dword 47*/ 648 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
631}; 649};
632 650
651struct be_pmem_stats {
652 u32 eth_red_drops;
653 u32 rsvd[4];
654};
655
633struct be_hw_stats { 656struct be_hw_stats {
634 struct be_rxf_stats rxf; 657 struct be_rxf_stats rxf;
635 u32 rsvd[48]; 658 u32 rsvd[48];
636 struct be_erx_stats erx; 659 struct be_erx_stats erx;
637 u32 rsvd1[6]; 660 struct be_pmem_stats pmem;
638}; 661};
639 662
640struct be_cmd_req_get_stats { 663struct be_cmd_req_get_stats {
@@ -647,6 +670,20 @@ struct be_cmd_resp_get_stats {
647 struct be_hw_stats hw_stats; 670 struct be_hw_stats hw_stats;
648}; 671};
649 672
673struct be_cmd_req_get_cntl_addnl_attribs {
674 struct be_cmd_req_hdr hdr;
675 u8 rsvd[8];
676};
677
678struct be_cmd_resp_get_cntl_addnl_attribs {
679 struct be_cmd_resp_hdr hdr;
680 u16 ipl_file_number;
681 u8 ipl_file_version;
682 u8 rsvd0;
683 u8 on_die_temperature; /* in degrees centigrade*/
684 u8 rsvd1[3];
685};
686
650struct be_cmd_req_vlan_config { 687struct be_cmd_req_vlan_config {
651 struct be_cmd_req_hdr hdr; 688 struct be_cmd_req_hdr hdr;
652 u8 interface_id; 689 u8 interface_id;
@@ -994,17 +1031,29 @@ struct be_cmd_resp_set_qos {
994 u32 rsvd; 1031 u32 rsvd;
995}; 1032};
996 1033
1034/*********************** Controller Attributes ***********************/
1035struct be_cmd_req_cntl_attribs {
1036 struct be_cmd_req_hdr hdr;
1037};
1038
1039struct be_cmd_resp_cntl_attribs {
1040 struct be_cmd_resp_hdr hdr;
1041 struct mgmt_controller_attrib attribs;
1042};
1043
997extern int be_pci_fnum_get(struct be_adapter *adapter); 1044extern int be_pci_fnum_get(struct be_adapter *adapter);
998extern int be_cmd_POST(struct be_adapter *adapter); 1045extern int be_cmd_POST(struct be_adapter *adapter);
999extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1046extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1000 u8 type, bool permanent, u32 if_handle); 1047 u8 type, bool permanent, u32 if_handle);
1001extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1048extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1002 u32 if_id, u32 *pmac_id); 1049 u32 if_id, u32 *pmac_id, u32 domain);
1003extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 1050extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1051 u32 pmac_id, u32 domain);
1004extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1052extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1005 u32 en_flags, u8 *mac, bool pmac_invalid, 1053 u32 en_flags, u8 *mac, bool pmac_invalid,
1006 u32 *if_handle, u32 *pmac_id, u32 domain); 1054 u32 *if_handle, u32 *pmac_id, u32 domain);
1007extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 1055extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
1056 u32 domain);
1008extern int be_cmd_eq_create(struct be_adapter *adapter, 1057extern int be_cmd_eq_create(struct be_adapter *adapter,
1009 struct be_queue_info *eq, int eq_delay); 1058 struct be_queue_info *eq, int eq_delay);
1010extern int be_cmd_cq_create(struct be_adapter *adapter, 1059extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1125,6 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1076 struct be_dma_mem *cmd); 1125 struct be_dma_mem *cmd);
1077extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1126extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1078extern void be_detect_dump_ue(struct be_adapter *adapter); 1127extern void be_detect_dump_ue(struct be_adapter *adapter);
1128extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1129extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1079 1130
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be0271efe0..6e5e43380c2a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 PMEMSTAT, DRVSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 32 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 33#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
43 field) 44 field)
44#define ERXSTAT_INFO(field) #field, ERXSTAT,\ 45#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45 FIELDINFO(struct be_erx_stats, field) 46 FIELDINFO(struct be_erx_stats, field)
47#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
48 FIELDINFO(struct be_pmem_stats, field)
49#define DRVSTAT_INFO(field) #field, DRVSTAT,\
50 FIELDINFO(struct be_drv_stats, \
51 field)
46 52
47static const struct be_ethtool_stat et_stats[] = { 53static const struct be_ethtool_stat et_stats[] = {
48 {NETSTAT_INFO(rx_packets)}, 54 {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
99 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 105 {MISCSTAT_INFO(rx_drops_too_many_frags)},
100 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 106 {MISCSTAT_INFO(rx_drops_invalid_ring)},
101 {MISCSTAT_INFO(forwarded_packets)}, 107 {MISCSTAT_INFO(forwarded_packets)},
102 {MISCSTAT_INFO(rx_drops_mtu)} 108 {MISCSTAT_INFO(rx_drops_mtu)},
109 {MISCSTAT_INFO(port0_jabber_events)},
110 {MISCSTAT_INFO(port1_jabber_events)},
111 {PMEMSTAT_INFO(eth_red_drops)},
112 {DRVSTAT_INFO(be_on_die_temperature)}
103}; 113};
104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 114#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
105 115
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
121 "MAC Loopback test", 131 "MAC Loopback test",
122 "PHY Loopback test", 132 "PHY Loopback test",
123 "External Loopback test", 133 "External Loopback test",
124 "DDR DMA test" 134 "DDR DMA test",
125 "Link test" 135 "Link test"
126}; 136};
127 137
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
276 case MISCSTAT: 286 case MISCSTAT:
277 p = &hw_stats->rxf; 287 p = &hw_stats->rxf;
278 break; 288 break;
289 case PMEMSTAT:
290 p = &hw_stats->pmem;
291 break;
292 case DRVSTAT:
293 p = &adapter->drv_stats;
294 break;
279 } 295 }
280 296
281 p = (u8 *)p + et_stats[i].offset; 297 p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
376 } 392 }
377 393
378 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info); 394 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
379 phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size, 395 phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
380 &phy_cmd.dma); 396 phy_cmd.size, &phy_cmd.dma,
397 GFP_KERNEL);
381 if (!phy_cmd.va) { 398 if (!phy_cmd.va) {
382 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 399 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
383 return -ENOMEM; 400 return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
416 adapter->port_type = ecmd->port; 433 adapter->port_type = ecmd->port;
417 adapter->transceiver = ecmd->transceiver; 434 adapter->transceiver = ecmd->transceiver;
418 adapter->autoneg = ecmd->autoneg; 435 adapter->autoneg = ecmd->autoneg;
419 pci_free_consistent(adapter->pdev, phy_cmd.size, 436 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
420 phy_cmd.va, phy_cmd.dma); 437 phy_cmd.dma);
421 } else { 438 } else {
422 ecmd->speed = adapter->link_speed; 439 ecmd->speed = adapter->link_speed;
423 ecmd->port = adapter->port_type; 440 ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
496 int status; 513 int status;
497 u32 cur; 514 u32 cur;
498 515
499 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur); 516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
500 517
501 if (cur == BEACON_STATE_ENABLED) 518 if (cur == BEACON_STATE_ENABLED)
502 return 0; 519 return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
504 if (data < 2) 521 if (data < 2)
505 data = 2; 522 data = 2;
506 523
507 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
508 BEACON_STATE_ENABLED); 525 BEACON_STATE_ENABLED);
509 set_current_state(TASK_INTERRUPTIBLE); 526 set_current_state(TASK_INTERRUPTIBLE);
510 schedule_timeout(data*HZ); 527 schedule_timeout(data*HZ);
511 528
512 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
513 BEACON_STATE_DISABLED); 530 BEACON_STATE_DISABLED);
514 531
515 return status; 532 return status;
516} 533}
517 534
535static bool
536be_is_wol_supported(struct be_adapter *adapter)
537{
538 if (!be_physfn(adapter))
539 return false;
540 else
541 return true;
542}
543
518static void 544static void
519be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 545be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
520{ 546{
521 struct be_adapter *adapter = netdev_priv(netdev); 547 struct be_adapter *adapter = netdev_priv(netdev);
522 548
523 wol->supported = WAKE_MAGIC; 549 if (be_is_wol_supported(adapter))
550 wol->supported = WAKE_MAGIC;
551
524 if (adapter->wol) 552 if (adapter->wol)
525 wol->wolopts = WAKE_MAGIC; 553 wol->wolopts = WAKE_MAGIC;
526 else 554 else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
536 if (wol->wolopts & ~WAKE_MAGIC) 564 if (wol->wolopts & ~WAKE_MAGIC)
537 return -EINVAL; 565 return -EINVAL;
538 566
539 if (wol->wolopts & WAKE_MAGIC) 567 if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
540 adapter->wol = true; 568 adapter->wol = true;
541 else 569 else
542 adapter->wol = false; 570 adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
554 }; 582 };
555 583
556 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 584 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
557 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 585 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
558 &ddrdma_cmd.dma); 586 &ddrdma_cmd.dma, GFP_KERNEL);
559 if (!ddrdma_cmd.va) { 587 if (!ddrdma_cmd.va) {
560 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 588 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
561 return -ENOMEM; 589 return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
569 } 597 }
570 598
571err: 599err:
572 pci_free_consistent(adapter->pdev, ddrdma_cmd.size, 600 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
573 ddrdma_cmd.va, ddrdma_cmd.dma); 601 ddrdma_cmd.dma);
574 return ret; 602 return ret;
575} 603}
576 604
577static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 605static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
578 u64 *status) 606 u64 *status)
579{ 607{
580 be_cmd_set_loopback(adapter, adapter->port_num, 608 be_cmd_set_loopback(adapter, adapter->hba_port_num,
581 loopback_type, 1); 609 loopback_type, 1);
582 *status = be_cmd_loopback_test(adapter, adapter->port_num, 610 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
583 loopback_type, 1500, 611 loopback_type, 1500,
584 2, 0xabc); 612 2, 0xabc);
585 be_cmd_set_loopback(adapter, adapter->port_num, 613 be_cmd_set_loopback(adapter, adapter->hba_port_num,
586 BE_NO_LOOPBACK, 1); 614 BE_NO_LOOPBACK, 1);
587 return *status; 615 return *status;
588} 616}
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
621 &qos_link_speed) != 0) { 649 &qos_link_speed) != 0) {
622 test->flags |= ETH_TEST_FL_FAILED; 650 test->flags |= ETH_TEST_FL_FAILED;
623 data[4] = -1; 651 data[4] = -1;
624 } else if (mac_speed) { 652 } else if (!mac_speed) {
653 test->flags |= ETH_TEST_FL_FAILED;
625 data[4] = 1; 654 data[4] = 1;
626 } 655 }
627} 656}
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
662 691
663 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 692 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
664 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 693 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
665 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size, 694 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
666 &eeprom_cmd.dma); 695 &eeprom_cmd.dma, GFP_KERNEL);
667 696
668 if (!eeprom_cmd.va) { 697 if (!eeprom_cmd.va) {
669 dev_err(&adapter->pdev->dev, 698 dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
677 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; 706 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
678 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); 707 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
679 } 708 }
680 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va, 709 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
681 eeprom_cmd.dma); 710 eeprom_cmd.dma);
682 711
683 return status; 712 return status;
684} 713}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d9778234..3f459f76cd1d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -327,6 +327,53 @@ struct be_eth_rx_compl {
327 u32 dw[4]; 327 u32 dw[4];
328}; 328};
329 329
330struct mgmt_hba_attribs {
331 u8 flashrom_version_string[32];
332 u8 manufacturer_name[32];
333 u32 supported_modes;
334 u32 rsvd0[3];
335 u8 ncsi_ver_string[12];
336 u32 default_extended_timeout;
337 u8 controller_model_number[32];
338 u8 controller_description[64];
339 u8 controller_serial_number[32];
340 u8 ip_version_string[32];
341 u8 firmware_version_string[32];
342 u8 bios_version_string[32];
343 u8 redboot_version_string[32];
344 u8 driver_version_string[32];
345 u8 fw_on_flash_version_string[32];
346 u32 functionalities_supported;
347 u16 max_cdblength;
348 u8 asic_revision;
349 u8 generational_guid[16];
350 u8 hba_port_count;
351 u16 default_link_down_timeout;
352 u8 iscsi_ver_min_max;
353 u8 multifunction_device;
354 u8 cache_valid;
355 u8 hba_status;
356 u8 max_domains_supported;
357 u8 phy_port;
358 u32 firmware_post_status;
359 u32 hba_mtu[8];
360 u32 rsvd1[4];
361};
362
363struct mgmt_controller_attrib {
364 struct mgmt_hba_attribs hba_attribs;
365 u16 pci_vendor_id;
366 u16 pci_device_id;
367 u16 pci_sub_vendor_id;
368 u16 pci_sub_system_id;
369 u8 pci_bus_number;
370 u8 pci_device_number;
371 u8 pci_function_number;
372 u8 interface_type;
373 u64 unique_identifier;
374 u32 rsvd0[5];
375};
376
330struct controller_id { 377struct controller_id {
331 u32 vendor; 378 u32 vendor;
332 u32 device; 379 u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b7152f..0bdccb10aac5 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 125{
126 struct be_dma_mem *mem = &q->dma_mem; 126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va) 127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size, 128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->va, mem->dma); 129 mem->dma);
130} 130}
131 131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 q->len = len; 138 q->len = len;
139 q->entry_size = entry_size; 139 q->entry_size = entry_size;
140 mem->size = len * entry_size; 140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); 141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
142 if (!mem->va) 143 if (!mem->va)
143 return -1; 144 return -1;
144 memset(mem->va, 0, mem->size); 145 memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
235 if (!be_physfn(adapter)) 236 if (!be_physfn(adapter))
236 goto netdev_addr; 237 goto netdev_addr;
237 238
238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
239 if (status) 241 if (status)
240 return status; 242 return status;
241 243
242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id); 245 adapter->if_handle, &adapter->pmac_id, 0);
244netdev_addr: 246netdev_addr:
245 if (!status) 247 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -312,11 +314,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
312 if (adapter->link_up != link_up) { 314 if (adapter->link_up != link_up) {
313 adapter->link_speed = -1; 315 adapter->link_speed = -1;
314 if (link_up) { 316 if (link_up) {
315 netif_start_queue(netdev);
316 netif_carrier_on(netdev); 317 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name); 318 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 } else { 319 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev); 320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name); 321 printk(KERN_INFO "%s: Link down\n", netdev->name);
322 } 322 }
@@ -486,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487} 487}
488 488
489static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, 489static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490 bool unmap_single) 490 bool unmap_single)
491{ 491{
492 dma_addr_t dma; 492 dma_addr_t dma;
@@ -496,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; 496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497 if (wrb->frag_len) { 497 if (wrb->frag_len) {
498 if (unmap_single) 498 if (unmap_single)
499 pci_unmap_single(pdev, dma, wrb->frag_len, 499 dma_unmap_single(dev, dma, wrb->frag_len,
500 PCI_DMA_TODEVICE); 500 DMA_TO_DEVICE);
501 else 501 else
502 pci_unmap_page(pdev, dma, wrb->frag_len, 502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503 PCI_DMA_TODEVICE);
504 } 503 }
505} 504}
506 505
@@ -509,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
509{ 508{
510 dma_addr_t busaddr; 509 dma_addr_t busaddr;
511 int i, copied = 0; 510 int i, copied = 0;
512 struct pci_dev *pdev = adapter->pdev; 511 struct device *dev = &adapter->pdev->dev;
513 struct sk_buff *first_skb = skb; 512 struct sk_buff *first_skb = skb;
514 struct be_queue_info *txq = &adapter->tx_obj.q; 513 struct be_queue_info *txq = &adapter->tx_obj.q;
515 struct be_eth_wrb *wrb; 514 struct be_eth_wrb *wrb;
@@ -523,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
523 522
524 if (skb->len > skb->data_len) { 523 if (skb->len > skb->data_len) {
525 int len = skb_headlen(skb); 524 int len = skb_headlen(skb);
526 busaddr = pci_map_single(pdev, skb->data, len, 525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
527 PCI_DMA_TODEVICE); 526 if (dma_mapping_error(dev, busaddr))
528 if (pci_dma_mapping_error(pdev, busaddr))
529 goto dma_err; 527 goto dma_err;
530 map_single = true; 528 map_single = true;
531 wrb = queue_head_node(txq); 529 wrb = queue_head_node(txq);
@@ -538,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
538 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
539 struct skb_frag_struct *frag = 537 struct skb_frag_struct *frag =
540 &skb_shinfo(skb)->frags[i]; 538 &skb_shinfo(skb)->frags[i];
541 busaddr = pci_map_page(pdev, frag->page, 539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
542 frag->page_offset, 540 frag->size, DMA_TO_DEVICE);
543 frag->size, PCI_DMA_TODEVICE); 541 if (dma_mapping_error(dev, busaddr))
544 if (pci_dma_mapping_error(pdev, busaddr))
545 goto dma_err; 542 goto dma_err;
546 wrb = queue_head_node(txq); 543 wrb = queue_head_node(txq);
547 wrb_fill(wrb, busaddr, frag->size); 544 wrb_fill(wrb, busaddr, frag->size);
@@ -565,7 +562,7 @@ dma_err:
565 txq->head = map_head; 562 txq->head = map_head;
566 while (copied) { 563 while (copied) {
567 wrb = queue_head_node(txq); 564 wrb = queue_head_node(txq);
568 unmap_tx_frag(pdev, wrb, map_single); 565 unmap_tx_frag(dev, wrb, map_single);
569 map_single = false; 566 map_single = false;
570 copied -= wrb->frag_len; 567 copied -= wrb->frag_len;
571 queue_head_inc(txq); 568 queue_head_inc(txq);
@@ -745,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
745 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
746 status = be_cmd_pmac_del(adapter, 743 status = be_cmd_pmac_del(adapter,
747 adapter->vf_cfg[vf].vf_if_handle, 744 adapter->vf_cfg[vf].vf_if_handle,
748 adapter->vf_cfg[vf].vf_pmac_id); 745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
749 746
750 status = be_cmd_pmac_add(adapter, mac, 747 status = be_cmd_pmac_add(adapter, mac,
751 adapter->vf_cfg[vf].vf_if_handle, 748 adapter->vf_cfg[vf].vf_if_handle,
752 &adapter->vf_cfg[vf].vf_pmac_id); 749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
753 750
754 if (status) 751 if (status)
755 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -824,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
824 rate = 10000; 821 rate = 10000;
825 822
826 adapter->vf_cfg[vf].vf_tx_rate = rate; 823 adapter->vf_cfg[vf].vf_tx_rate = rate;
827 status = be_cmd_set_qos(adapter, rate / 10, vf); 824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
828 825
829 if (status) 826 if (status)
830 dev_info(&adapter->pdev->dev, 827 dev_info(&adapter->pdev->dev,
@@ -890,8 +887,9 @@ get_rx_page_info(struct be_adapter *adapter,
890 BUG_ON(!rx_page_info->page); 887 BUG_ON(!rx_page_info->page);
891 888
892 if (rx_page_info->last_page_user) { 889 if (rx_page_info->last_page_user) {
893 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), 890 dma_unmap_page(&adapter->pdev->dev,
894 adapter->big_page_size, PCI_DMA_FROMDEVICE); 891 dma_unmap_addr(rx_page_info, bus),
892 adapter->big_page_size, DMA_FROM_DEVICE);
895 rx_page_info->last_page_user = false; 893 rx_page_info->last_page_user = false;
896 } 894 }
897 895
@@ -1049,6 +1047,9 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1049 if ((adapter->function_mode & 0x400) && !vtm) 1047 if ((adapter->function_mode & 0x400) && !vtm)
1050 vlanf = 0; 1048 vlanf = 0;
1051 1049
1050 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1051 vlanf = 0;
1052
1052 if (unlikely(vlanf)) { 1053 if (unlikely(vlanf)) {
1053 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1054 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1054 kfree_skb(skb); 1055 kfree_skb(skb);
@@ -1089,6 +1090,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1089 if ((adapter->function_mode & 0x400) && !vtm) 1090 if ((adapter->function_mode & 0x400) && !vtm)
1090 vlanf = 0; 1091 vlanf = 0;
1091 1092
1093 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1094 vlanf = 0;
1095
1092 skb = napi_get_frags(&eq_obj->napi); 1096 skb = napi_get_frags(&eq_obj->napi);
1093 if (!skb) { 1097 if (!skb) {
1094 be_rx_compl_discard(adapter, rxo, rxcp); 1098 be_rx_compl_discard(adapter, rxo, rxcp);
@@ -1197,9 +1201,9 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
1197 rxo->stats.rx_post_fail++; 1201 rxo->stats.rx_post_fail++;
1198 break; 1202 break;
1199 } 1203 }
1200 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, 1204 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1201 adapter->big_page_size, 1205 0, adapter->big_page_size,
1202 PCI_DMA_FROMDEVICE); 1206 DMA_FROM_DEVICE);
1203 page_info->page_offset = 0; 1207 page_info->page_offset = 0;
1204 } else { 1208 } else {
1205 get_page(pagep); 1209 get_page(pagep);
@@ -1272,8 +1276,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1272 do { 1276 do {
1273 cur_index = txq->tail; 1277 cur_index = txq->tail;
1274 wrb = queue_tail_node(txq); 1278 wrb = queue_tail_node(txq);
1275 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && 1279 unmap_tx_frag(&adapter->pdev->dev, wrb,
1276 skb_headlen(sent_skb))); 1280 (unmap_skb_hdr && skb_headlen(sent_skb)));
1277 unmap_skb_hdr = false; 1281 unmap_skb_hdr = false;
1278 1282
1279 num_wrbs++; 1283 num_wrbs++;
@@ -1829,6 +1833,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
1829 1833
1830 if (ue_status_lo || ue_status_hi) { 1834 if (ue_status_lo || ue_status_hi) {
1831 adapter->ue_detected = true; 1835 adapter->ue_detected = true;
1836 adapter->eeh_err = true;
1832 dev_err(&adapter->pdev->dev, "UE Detected!!\n"); 1837 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1833 } 1838 }
1834 1839
@@ -1867,10 +1872,14 @@ static void be_worker(struct work_struct *work)
1867 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 1872 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1868 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 1873 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1869 } 1874 }
1875
1876 if (!adapter->ue_detected && !lancer_chip(adapter))
1877 be_detect_dump_ue(adapter);
1878
1870 goto reschedule; 1879 goto reschedule;
1871 } 1880 }
1872 1881
1873 if (!adapter->stats_ioctl_sent) 1882 if (!adapter->stats_cmd_sent)
1874 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1883 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1875 1884
1876 be_tx_rate_update(adapter); 1885 be_tx_rate_update(adapter);
@@ -2181,7 +2190,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2181 memset(mac, 0, ETH_ALEN); 2190 memset(mac, 0, ETH_ALEN);
2182 2191
2183 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2192 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2184 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2193 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2194 GFP_KERNEL);
2185 if (cmd.va == NULL) 2195 if (cmd.va == NULL)
2186 return -1; 2196 return -1;
2187 memset(cmd.va, 0, cmd.size); 2197 memset(cmd.va, 0, cmd.size);
@@ -2192,8 +2202,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2192 if (status) { 2202 if (status) {
2193 dev_err(&adapter->pdev->dev, 2203 dev_err(&adapter->pdev->dev,
2194 "Could not enable Wake-on-lan\n"); 2204 "Could not enable Wake-on-lan\n");
2195 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, 2205 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2196 cmd.dma); 2206 cmd.dma);
2197 return status; 2207 return status;
2198 } 2208 }
2199 status = be_cmd_enable_magic_wol(adapter, 2209 status = be_cmd_enable_magic_wol(adapter,
@@ -2206,7 +2216,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 0); 2216 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2207 } 2217 }
2208 2218
2209 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2219 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2210 return status; 2220 return status;
2211} 2221}
2212 2222
@@ -2227,7 +2237,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2227 for (vf = 0; vf < num_vfs; vf++) { 2237 for (vf = 0; vf < num_vfs; vf++) {
2228 status = be_cmd_pmac_add(adapter, mac, 2238 status = be_cmd_pmac_add(adapter, mac,
2229 adapter->vf_cfg[vf].vf_if_handle, 2239 adapter->vf_cfg[vf].vf_if_handle,
2230 &adapter->vf_cfg[vf].vf_pmac_id); 2240 &adapter->vf_cfg[vf].vf_pmac_id,
2241 vf + 1);
2231 if (status) 2242 if (status)
2232 dev_err(&adapter->pdev->dev, 2243 dev_err(&adapter->pdev->dev,
2233 "Mac address add failed for VF %d\n", vf); 2244 "Mac address add failed for VF %d\n", vf);
@@ -2247,7 +2258,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2247 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 2258 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2248 be_cmd_pmac_del(adapter, 2259 be_cmd_pmac_del(adapter,
2249 adapter->vf_cfg[vf].vf_if_handle, 2260 adapter->vf_cfg[vf].vf_if_handle,
2250 adapter->vf_cfg[vf].vf_pmac_id); 2261 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2251 } 2262 }
2252} 2263}
2253 2264
@@ -2279,22 +2290,26 @@ static int be_setup(struct be_adapter *adapter)
2279 goto do_none; 2290 goto do_none;
2280 2291
2281 if (be_physfn(adapter)) { 2292 if (be_physfn(adapter)) {
2282 while (vf < num_vfs) { 2293 if (adapter->sriov_enabled) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED 2294 while (vf < num_vfs) {
2284 | BE_IF_FLAGS_BROADCAST; 2295 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2285 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2296 BE_IF_FLAGS_BROADCAST;
2286 mac, true, 2297 status = be_cmd_if_create(adapter, cap_flags,
2298 en_flags, mac, true,
2287 &adapter->vf_cfg[vf].vf_if_handle, 2299 &adapter->vf_cfg[vf].vf_if_handle,
2288 NULL, vf+1); 2300 NULL, vf+1);
2289 if (status) { 2301 if (status) {
2290 dev_err(&adapter->pdev->dev, 2302 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n", vf); 2303 "Interface Create failed for VF %d\n",
2292 goto if_destroy; 2304 vf);
2305 goto if_destroy;
2306 }
2307 adapter->vf_cfg[vf].vf_pmac_id =
2308 BE_INVALID_PMAC_ID;
2309 vf++;
2293 } 2310 }
2294 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2295 vf++;
2296 } 2311 }
2297 } else if (!be_physfn(adapter)) { 2312 } else {
2298 status = be_cmd_mac_addr_query(adapter, mac, 2313 status = be_cmd_mac_addr_query(adapter, mac,
2299 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); 2314 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2300 if (!status) { 2315 if (!status) {
@@ -2315,44 +2330,46 @@ static int be_setup(struct be_adapter *adapter)
2315 if (status != 0) 2330 if (status != 0)
2316 goto rx_qs_destroy; 2331 goto rx_qs_destroy;
2317 2332
2318 if (be_physfn(adapter)) {
2319 status = be_vf_eth_addr_config(adapter);
2320 if (status)
2321 goto mcc_q_destroy;
2322 }
2323
2324 adapter->link_speed = -1; 2333 adapter->link_speed = -1;
2325 2334
2326 return 0; 2335 return 0;
2327 2336
2328mcc_q_destroy:
2329 if (be_physfn(adapter))
2330 be_vf_eth_addr_rem(adapter);
2331 be_mcc_queues_destroy(adapter); 2337 be_mcc_queues_destroy(adapter);
2332rx_qs_destroy: 2338rx_qs_destroy:
2333 be_rx_queues_destroy(adapter); 2339 be_rx_queues_destroy(adapter);
2334tx_qs_destroy: 2340tx_qs_destroy:
2335 be_tx_queues_destroy(adapter); 2341 be_tx_queues_destroy(adapter);
2336if_destroy: 2342if_destroy:
2337 for (vf = 0; vf < num_vfs; vf++) 2343 if (be_physfn(adapter) && adapter->sriov_enabled)
2338 if (adapter->vf_cfg[vf].vf_if_handle) 2344 for (vf = 0; vf < num_vfs; vf++)
2339 be_cmd_if_destroy(adapter, 2345 if (adapter->vf_cfg[vf].vf_if_handle)
2340 adapter->vf_cfg[vf].vf_if_handle); 2346 be_cmd_if_destroy(adapter,
2341 be_cmd_if_destroy(adapter, adapter->if_handle); 2347 adapter->vf_cfg[vf].vf_if_handle,
2348 vf + 1);
2349 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2342do_none: 2350do_none:
2343 return status; 2351 return status;
2344} 2352}
2345 2353
2346static int be_clear(struct be_adapter *adapter) 2354static int be_clear(struct be_adapter *adapter)
2347{ 2355{
2348 if (be_physfn(adapter)) 2356 int vf;
2357
2358 if (be_physfn(adapter) && adapter->sriov_enabled)
2349 be_vf_eth_addr_rem(adapter); 2359 be_vf_eth_addr_rem(adapter);
2350 2360
2351 be_mcc_queues_destroy(adapter); 2361 be_mcc_queues_destroy(adapter);
2352 be_rx_queues_destroy(adapter); 2362 be_rx_queues_destroy(adapter);
2353 be_tx_queues_destroy(adapter); 2363 be_tx_queues_destroy(adapter);
2354 2364
2355 be_cmd_if_destroy(adapter, adapter->if_handle); 2365 if (be_physfn(adapter) && adapter->sriov_enabled)
2366 for (vf = 0; vf < num_vfs; vf++)
2367 if (adapter->vf_cfg[vf].vf_if_handle)
2368 be_cmd_if_destroy(adapter,
2369 adapter->vf_cfg[vf].vf_if_handle,
2370 vf + 1);
2371
2372 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2356 2373
2357 /* tell fw we're done with firing cmds */ 2374 /* tell fw we're done with firing cmds */
2358 be_cmd_fw_clean(adapter); 2375 be_cmd_fw_clean(adapter);
@@ -2455,8 +2472,8 @@ static int be_flash_data(struct be_adapter *adapter,
2455 continue; 2472 continue;
2456 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && 2473 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2457 (!be_flash_redboot(adapter, fw->data, 2474 (!be_flash_redboot(adapter, fw->data,
2458 pflashcomp[i].offset, pflashcomp[i].size, 2475 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2459 filehdr_size))) 2476 (num_of_images * sizeof(struct image_hdr)))))
2460 continue; 2477 continue;
2461 p = fw->data; 2478 p = fw->data;
2462 p += filehdr_size + pflashcomp[i].offset 2479 p += filehdr_size + pflashcomp[i].offset
@@ -2530,8 +2547,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2530 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); 2547 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2531 2548
2532 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2549 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2533 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, 2550 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2534 &flash_cmd.dma); 2551 &flash_cmd.dma, GFP_KERNEL);
2535 if (!flash_cmd.va) { 2552 if (!flash_cmd.va) {
2536 status = -ENOMEM; 2553 status = -ENOMEM;
2537 dev_err(&adapter->pdev->dev, 2554 dev_err(&adapter->pdev->dev,
@@ -2560,8 +2577,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2560 status = -1; 2577 status = -1;
2561 } 2578 }
2562 2579
2563 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, 2580 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2564 flash_cmd.dma); 2581 flash_cmd.dma);
2565 if (status) { 2582 if (status) {
2566 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 2583 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2567 goto fw_exit; 2584 goto fw_exit;
@@ -2628,8 +2645,6 @@ static void be_netdev_init(struct net_device *netdev)
2628 2645
2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2646 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630 BE_NAPI_WEIGHT); 2647 BE_NAPI_WEIGHT);
2631
2632 netif_stop_queue(netdev);
2633} 2648}
2634 2649
2635static void be_unmap_pci_bars(struct be_adapter *adapter) 2650static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -2704,13 +2719,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
2704 be_unmap_pci_bars(adapter); 2719 be_unmap_pci_bars(adapter);
2705 2720
2706 if (mem->va) 2721 if (mem->va)
2707 pci_free_consistent(adapter->pdev, mem->size, 2722 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2708 mem->va, mem->dma); 2723 mem->dma);
2709 2724
2710 mem = &adapter->mc_cmd_mem; 2725 mem = &adapter->mc_cmd_mem;
2711 if (mem->va) 2726 if (mem->va)
2712 pci_free_consistent(adapter->pdev, mem->size, 2727 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2713 mem->va, mem->dma); 2728 mem->dma);
2714} 2729}
2715 2730
2716static int be_ctrl_init(struct be_adapter *adapter) 2731static int be_ctrl_init(struct be_adapter *adapter)
@@ -2725,8 +2740,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
2725 goto done; 2740 goto done;
2726 2741
2727 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 2742 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2728 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, 2743 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2729 mbox_mem_alloc->size, &mbox_mem_alloc->dma); 2744 mbox_mem_alloc->size,
2745 &mbox_mem_alloc->dma,
2746 GFP_KERNEL);
2730 if (!mbox_mem_alloc->va) { 2747 if (!mbox_mem_alloc->va) {
2731 status = -ENOMEM; 2748 status = -ENOMEM;
2732 goto unmap_pci_bars; 2749 goto unmap_pci_bars;
@@ -2738,8 +2755,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
2738 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 2755 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2739 2756
2740 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); 2757 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2741 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, 2758 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2742 &mc_cmd_mem->dma); 2759 mc_cmd_mem->size, &mc_cmd_mem->dma,
2760 GFP_KERNEL);
2743 if (mc_cmd_mem->va == NULL) { 2761 if (mc_cmd_mem->va == NULL) {
2744 status = -ENOMEM; 2762 status = -ENOMEM;
2745 goto free_mbox; 2763 goto free_mbox;
@@ -2755,8 +2773,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
2755 return 0; 2773 return 0;
2756 2774
2757free_mbox: 2775free_mbox:
2758 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, 2776 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2759 mbox_mem_alloc->va, mbox_mem_alloc->dma); 2777 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2760 2778
2761unmap_pci_bars: 2779unmap_pci_bars:
2762 be_unmap_pci_bars(adapter); 2780 be_unmap_pci_bars(adapter);
@@ -2770,8 +2788,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
2770 struct be_dma_mem *cmd = &adapter->stats_cmd; 2788 struct be_dma_mem *cmd = &adapter->stats_cmd;
2771 2789
2772 if (cmd->va) 2790 if (cmd->va)
2773 pci_free_consistent(adapter->pdev, cmd->size, 2791 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2774 cmd->va, cmd->dma); 2792 cmd->va, cmd->dma);
2775} 2793}
2776 2794
2777static int be_stats_init(struct be_adapter *adapter) 2795static int be_stats_init(struct be_adapter *adapter)
@@ -2779,7 +2797,8 @@ static int be_stats_init(struct be_adapter *adapter)
2779 struct be_dma_mem *cmd = &adapter->stats_cmd; 2797 struct be_dma_mem *cmd = &adapter->stats_cmd;
2780 2798
2781 cmd->size = sizeof(struct be_cmd_req_get_stats); 2799 cmd->size = sizeof(struct be_cmd_req_get_stats);
2782 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2800 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2801 GFP_KERNEL);
2783 if (cmd->va == NULL) 2802 if (cmd->va == NULL)
2784 return -1; 2803 return -1;
2785 memset(cmd->va, 0, cmd->size); 2804 memset(cmd->va, 0, cmd->size);
@@ -2849,6 +2868,10 @@ static int be_get_config(struct be_adapter *adapter)
2849 else 2868 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 2869 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851 2870
2871 status = be_cmd_get_cntl_attributes(adapter);
2872 if (status)
2873 return status;
2874
2852 return 0; 2875 return 0;
2853} 2876}
2854 2877
@@ -2922,11 +2945,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
2922 adapter->netdev = netdev; 2945 adapter->netdev = netdev;
2923 SET_NETDEV_DEV(netdev, &pdev->dev); 2946 SET_NETDEV_DEV(netdev, &pdev->dev);
2924 2947
2925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2948 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2926 if (!status) { 2949 if (!status) {
2927 netdev->features |= NETIF_F_HIGHDMA; 2950 netdev->features |= NETIF_F_HIGHDMA;
2928 } else { 2951 } else {
2929 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2952 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2930 if (status) { 2953 if (status) {
2931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 2954 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 goto free_netdev; 2955 goto free_netdev;
@@ -2951,11 +2974,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
2951 if (status) 2974 if (status)
2952 goto ctrl_clean; 2975 goto ctrl_clean;
2953 2976
2954 if (be_physfn(adapter)) { 2977 status = be_cmd_reset_function(adapter);
2955 status = be_cmd_reset_function(adapter); 2978 if (status)
2956 if (status) 2979 goto ctrl_clean;
2957 goto ctrl_clean;
2958 }
2959 2980
2960 status = be_stats_init(adapter); 2981 status = be_stats_init(adapter);
2961 if (status) 2982 if (status)
@@ -2979,10 +3000,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
2979 goto unsetup; 3000 goto unsetup;
2980 netif_carrier_off(netdev); 3001 netif_carrier_off(netdev);
2981 3002
3003 if (be_physfn(adapter) && adapter->sriov_enabled) {
3004 status = be_vf_eth_addr_config(adapter);
3005 if (status)
3006 goto unreg_netdev;
3007 }
3008
2982 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 3009 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 3010 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2984 return 0; 3011 return 0;
2985 3012
3013unreg_netdev:
3014 unregister_netdev(netdev);
2986unsetup: 3015unsetup:
2987 be_clear(adapter); 3016 be_clear(adapter);
2988msix_disable: 3017msix_disable:
@@ -3009,6 +3038,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3009 struct be_adapter *adapter = pci_get_drvdata(pdev); 3038 struct be_adapter *adapter = pci_get_drvdata(pdev);
3010 struct net_device *netdev = adapter->netdev; 3039 struct net_device *netdev = adapter->netdev;
3011 3040
3041 cancel_delayed_work_sync(&adapter->work);
3012 if (adapter->wol) 3042 if (adapter->wol)
3013 be_setup_wol(adapter, true); 3043 be_setup_wol(adapter, true);
3014 3044
@@ -3021,6 +3051,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3021 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); 3051 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3022 be_clear(adapter); 3052 be_clear(adapter);
3023 3053
3054 be_msix_disable(adapter);
3024 pci_save_state(pdev); 3055 pci_save_state(pdev);
3025 pci_disable_device(pdev); 3056 pci_disable_device(pdev);
3026 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3057 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3042,6 +3073,7 @@ static int be_resume(struct pci_dev *pdev)
3042 pci_set_power_state(pdev, 0); 3073 pci_set_power_state(pdev, 0);
3043 pci_restore_state(pdev); 3074 pci_restore_state(pdev);
3044 3075
3076 be_msix_enable(adapter);
3045 /* tell fw we're ready to fire cmds */ 3077 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter); 3078 status = be_cmd_fw_init(adapter);
3047 if (status) 3079 if (status)
@@ -3057,6 +3089,8 @@ static int be_resume(struct pci_dev *pdev)
3057 3089
3058 if (adapter->wol) 3090 if (adapter->wol)
3059 be_setup_wol(adapter, false); 3091 be_setup_wol(adapter, false);
3092
3093 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3060 return 0; 3094 return 0;
3061} 3095}
3062 3096
@@ -3068,6 +3102,9 @@ static void be_shutdown(struct pci_dev *pdev)
3068 struct be_adapter *adapter = pci_get_drvdata(pdev); 3102 struct be_adapter *adapter = pci_get_drvdata(pdev);
3069 struct net_device *netdev = adapter->netdev; 3103 struct net_device *netdev = adapter->netdev;
3070 3104
3105 if (netif_running(netdev))
3106 cancel_delayed_work_sync(&adapter->work);
3107
3071 netif_device_detach(netdev); 3108 netif_device_detach(netdev);
3072 3109
3073 be_cmd_reset_function(adapter); 3110 be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe4..9f356d5d0f33 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
126 } 126 }
127 unmap_array[unmap_cons].skb = NULL; 127 unmap_array[unmap_cons].skb = NULL;
128 128
129 pci_unmap_single(bnad->pcidev, 129 dma_unmap_single(&bnad->pcidev->dev,
130 pci_unmap_addr(&unmap_array[unmap_cons], 130 dma_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb), 131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE); 132 DMA_TO_DEVICE);
133 133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth) 135 if (++unmap_cons >= unmap_q->q_depth)
136 break; 136 break;
137 137
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev, 139 dma_unmap_page(&bnad->pcidev->dev,
140 pci_unmap_addr(&unmap_array[unmap_cons], 140 dma_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr), 141 dma_addr),
142 skb_shinfo(skb)->frags[i].size, 142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE); 143 DMA_TO_DEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 144 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0); 145 0);
146 if (++unmap_cons >= unmap_q->q_depth) 146 if (++unmap_cons >= unmap_q->q_depth)
147 break; 147 break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
199 sent_bytes += skb->len; 199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201 201
202 pci_unmap_single(bnad->pcidev, 202 dma_unmap_single(&bnad->pcidev->dev,
203 pci_unmap_addr(&unmap_array[unmap_cons], 203 dma_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb), 204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE); 205 DMA_TO_DEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 206 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208 208
209 prefetch(&unmap_array[unmap_cons + 1]); 209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]); 211 prefetch(&unmap_array[unmap_cons + 1]);
212 212
213 pci_unmap_page(bnad->pcidev, 213 dma_unmap_page(&bnad->pcidev->dev,
214 pci_unmap_addr(&unmap_array[unmap_cons], 214 dma_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr), 215 dma_addr),
216 skb_shinfo(skb)->frags[i].size, 216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE); 217 DMA_TO_DEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 218 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0); 219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 } 221 }
@@ -340,19 +340,22 @@ static void
340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341{ 341{
342 struct bnad_unmap_q *unmap_q; 342 struct bnad_unmap_q *unmap_q;
343 struct bnad_skb_unmap *unmap_array;
343 struct sk_buff *skb; 344 struct sk_buff *skb;
344 int unmap_cons; 345 int unmap_cons;
345 346
346 unmap_q = rcb->unmap_q; 347 unmap_q = rcb->unmap_q;
348 unmap_array = unmap_q->unmap_array;
347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb; 350 skb = unmap_array[unmap_cons].skb;
349 if (!skb) 351 if (!skb)
350 continue; 352 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL; 353 unmap_array[unmap_cons].skb = NULL;
352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 354 dma_unmap_single(&bnad->pcidev->dev,
353 unmap_array[unmap_cons], 355 dma_unmap_addr(&unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size, 356 dma_addr),
355 PCI_DMA_FROMDEVICE); 357 rcb->rxq->buffer_size,
358 DMA_FROM_DEVICE);
356 dev_kfree_skb(skb); 359 dev_kfree_skb(skb);
357 } 360 }
358 bnad_reset_rcb(bnad, rcb); 361 bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
391 skb->dev = bnad->netdev; 394 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN); 395 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb; 396 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data, 397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); 398 rcb->rxq->buffer_size,
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr); 401 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
434 struct bna_rcb *rcb = NULL; 438 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0; 439 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q; 440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
437 struct sk_buff *skb; 442 struct sk_buff *skb;
438 u32 flags; 443 u32 flags, unmap_cons;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441 446
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
456 rcb = ccb->rcb[1]; 461 rcb = ccb->rcb[1];
457 462
458 unmap_q = rcb->unmap_q; 463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
459 466
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 467 skb = unmap_array[unmap_cons].skb;
461 BUG_ON(!(skb)); 468 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 469 unmap_array[unmap_cons].skb = NULL;
463 pci_unmap_single(bnad->pcidev, 470 dma_unmap_single(&bnad->pcidev->dev,
464 pci_unmap_addr(&unmap_q-> 471 dma_unmap_addr(&unmap_array[unmap_cons],
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr), 472 dma_addr),
468 rcb->rxq->buffer_size, 473 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE); 474 DMA_FROM_DEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471 476
472 /* Should be more efficient ? Performance ? */ 477 /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
1015 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1020 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1021 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017 dma_pa); 1022 dma_pa);
1018 pci_free_consistent(bnad->pcidev, 1023 dma_free_coherent(&bnad->pcidev->dev,
1019 mem_info->mdl[i].len, 1024 mem_info->mdl[i].len,
1020 mem_info->mdl[i].kva, dma_pa); 1025 mem_info->mdl[i].kva, dma_pa);
1021 } else 1026 } else
1022 kfree(mem_info->mdl[i].kva); 1027 kfree(mem_info->mdl[i].kva);
1023 } 1028 }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
1047 for (i = 0; i < mem_info->num; i++) { 1052 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len; 1053 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva = 1054 mem_info->mdl[i].kva =
1050 pci_alloc_consistent(bnad->pcidev, 1055 dma_alloc_coherent(&bnad->pcidev->dev,
1051 mem_info->len, &dma_pa); 1056 mem_info->len, &dma_pa,
1057 GFP_KERNEL);
1052 1058
1053 if (mem_info->mdl[i].kva == NULL) 1059 if (mem_info->mdl[i].kva == NULL)
1054 goto err_return; 1060 goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2600 unmap_q->unmap_array[unmap_prod].skb = skb; 2606 unmap_q->unmap_array[unmap_prod].skb = skb;
2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2607 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2608 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), 2609 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2604 PCI_DMA_TODEVICE); 2610 skb_headlen(skb), DMA_TO_DEVICE);
2605 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2611 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606 dma_addr); 2612 dma_addr);
2607 2613
2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2614 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2630 2636
2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2637 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632 txqent->vector[vect_id].length = htons(size); 2638 txqent->vector[vect_id].length = htons(size);
2633 dma_addr = 2639 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2634 pci_map_page(bnad->pcidev, frag->page, 2640 frag->page_offset, size, DMA_TO_DEVICE);
2635 frag->page_offset, size, 2641 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2636 PCI_DMA_TODEVICE);
2637 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638 dma_addr); 2642 dma_addr);
2639 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2643 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2644 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
3022 err = pci_request_regions(pdev, BNAD_NAME); 3026 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err) 3027 if (err)
3024 goto disable_device; 3028 goto disable_device;
3025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3029 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3026 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3030 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027 *using_dac = 1; 3031 *using_dac = 1;
3028 } else { 3032 } else {
3029 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 if (err) { 3034 if (err) {
3031 err = pci_set_consistent_dma_mask(pdev, 3035 err = dma_set_coherent_mask(&pdev->dev,
3032 DMA_BIT_MASK(32)); 3036 DMA_BIT_MASK(32));
3033 if (err) 3037 if (err)
3034 goto release_regions; 3038 goto release_regions;
3035 } 3039 }
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557def..a89117fa4970 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
181/* Unmap queues for Tx / Rx cleanup */ 181/* Unmap queues for Tx / Rx cleanup */
182struct bnad_skb_unmap { 182struct bnad_skb_unmap {
183 struct sk_buff *skb; 183 struct sk_buff *skb;
184 DECLARE_PCI_UNMAP_ADDR(dma_addr) 184 DEFINE_DMA_UNMAP_ADDR(dma_addr);
185}; 185};
186 186
187struct bnad_unmap_q { 187struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf3464a..2a961b7f7e17 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
435 struct cnic_ctl_info info; 435 struct cnic_ctl_info info;
436 436
437 mutex_lock(&bp->cnic_lock); 437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops; 438 c_ops = rcu_dereference_protected(bp->cnic_ops,
439 lockdep_is_held(&bp->cnic_lock));
439 if (c_ops) { 440 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD; 441 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info); 442 c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
450 struct cnic_ctl_info info; 451 struct cnic_ctl_info info;
451 452
452 mutex_lock(&bp->cnic_lock); 453 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops; 454 c_ops = rcu_dereference_protected(bp->cnic_ops,
455 lockdep_is_held(&bp->cnic_lock));
454 if (c_ops) { 456 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { 457 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 458 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -7553,6 +7555,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
7553 !(data & ETH_FLAG_RXVLAN)) 7555 !(data & ETH_FLAG_RXVLAN))
7554 return -EINVAL; 7556 return -EINVAL;
7555 7557
7558 /* TSO with VLAN tag won't work with current firmware */
7559 if (!(data & ETH_FLAG_TXVLAN))
7560 return -EINVAL;
7561
7556 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7562 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7557 ETH_FLAG_TXVLAN); 7563 ETH_FLAG_TXVLAN);
7558 if (rc) 7564 if (rc)
@@ -7962,11 +7968,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7962 7968
7963 /* AER (Advanced Error Reporting) hooks */ 7969 /* AER (Advanced Error Reporting) hooks */
7964 err = pci_enable_pcie_error_reporting(pdev); 7970 err = pci_enable_pcie_error_reporting(pdev);
7965 if (err) { 7971 if (!err)
7966 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " 7972 bp->flags |= BNX2_FLAG_AER_ENABLED;
7967 "failed 0x%x\n", err);
7968 /* non-fatal, continue */
7969 }
7970 7973
7971 } else { 7974 } else {
7972 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7975 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8232,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8229 return 0; 8232 return 0;
8230 8233
8231err_out_unmap: 8234err_out_unmap:
8232 if (bp->flags & BNX2_FLAG_PCIE) 8235 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8233 pci_disable_pcie_error_reporting(pdev); 8236 pci_disable_pcie_error_reporting(pdev);
8237 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8238 }
8234 8239
8235 if (bp->regview) { 8240 if (bp->regview) {
8236 iounmap(bp->regview); 8241 iounmap(bp->regview);
@@ -8312,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8312#endif 8317#endif
8313}; 8318};
8314 8319
8315static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 8320static void inline vlan_features_add(struct net_device *dev, u32 flags)
8316{ 8321{
8317 dev->vlan_features |= flags; 8322 dev->vlan_features |= flags;
8318} 8323}
@@ -8418,8 +8423,10 @@ bnx2_remove_one(struct pci_dev *pdev)
8418 8423
8419 kfree(bp->temp_stats_blk); 8424 kfree(bp->temp_stats_blk);
8420 8425
8421 if (bp->flags & BNX2_FLAG_PCIE) 8426 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8422 pci_disable_pcie_error_reporting(pdev); 8427 pci_disable_pcie_error_reporting(pdev);
8428 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8429 }
8423 8430
8424 free_netdev(dev); 8431 free_netdev(dev);
8425 8432
@@ -8535,7 +8542,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8535 } 8542 }
8536 rtnl_unlock(); 8543 rtnl_unlock();
8537 8544
8538 if (!(bp->flags & BNX2_FLAG_PCIE)) 8545 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8539 return result; 8546 return result;
8540 8547
8541 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8548 err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe9..7a5e88f831f6 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
6207 6207
6208#define BNX2_CP_SCRATCH 0x001a0000 6208#define BNX2_CP_SCRATCH 0x001a0000
6209 6209
6210#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
6211
6210 6212
6211/* 6213/*
6212 * mcp_reg definition 6214 * mcp_reg definition
@@ -6741,6 +6743,7 @@ struct bnx2 {
6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6743#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6744#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
6743#define BNX2_FLAG_BROKEN_STATS 0x00002000 6745#define BNX2_FLAG_BROKEN_STATS 0x00002000
6746#define BNX2_FLAG_AER_ENABLED 0x00004000
6744 6747
6745 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6748 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
6746 6749
@@ -6758,7 +6761,7 @@ struct bnx2 {
6758 u32 tx_wake_thresh; 6761 u32 tx_wake_thresh;
6759 6762
6760#ifdef BCM_CNIC 6763#ifdef BCM_CNIC
6761 struct cnic_ops *cnic_ops; 6764 struct cnic_ops __rcu *cnic_ops;
6762 void *cnic_data; 6765 void *cnic_data;
6763#endif 6766#endif
6764 6767
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335c9436..c0dd30d870ae 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-3" 25#define DRV_MODULE_VERSION "1.62.11-0"
26#define DRV_MODULE_RELDATE "2010/12/21" 26#define DRV_MODULE_RELDATE "2011/01/31"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129#endif 129#endif
130 130
131#define bnx2x_mc_addr(ha) ((ha)->addr) 131#define bnx2x_mc_addr(ha) ((ha)->addr)
132#define bnx2x_uc_addr(ha) ((ha)->addr)
132 133
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 134#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 135#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
341 /* chip independed shortcut into rx_prods_offset memory */ 342 /* chip independed shortcut into rx_prods_offset memory */
342 u32 ustorm_rx_prods_offset; 343 u32 ustorm_rx_prods_offset;
343 344
345 u32 rx_buf_size;
346
344 dma_addr_t status_blk_mapping; 347 dma_addr_t status_blk_mapping;
345 348
346 struct sw_tx_bd *tx_buf_ring; 349 struct sw_tx_bd *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
428}; 431};
429 432
430#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 433#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
434
435/* Use 2500 as a mini-jumbo MTU for FCoE */
436#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
437
431#ifdef BCM_CNIC 438#ifdef BCM_CNIC
432/* FCoE L2 `fastpath' is right after the eth entries */ 439/* FCoE L2 `fastpath' is right after the eth entries */
433#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 440#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
810 struct eth_stats_query fw_stats; 817 struct eth_stats_query fw_stats;
811 struct mac_configuration_cmd mac_config; 818 struct mac_configuration_cmd mac_config;
812 struct mac_configuration_cmd mcast_config; 819 struct mac_configuration_cmd mcast_config;
820 struct mac_configuration_cmd uc_mac_config;
813 struct client_init_ramrod_data client_init_data; 821 struct client_init_ramrod_data client_init_data;
814 822
815 /* used by dmae command executer */ 823 /* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
911 int tx_ring_size; 919 int tx_ring_size;
912 920
913 u32 rx_csum; 921 u32 rx_csum;
914 u32 rx_buf_size;
915/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 922/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
916#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) 923#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
917#define ETH_MIN_PACKET_SIZE 60 924#define ETH_MIN_PACKET_SIZE 60
@@ -939,7 +946,7 @@ struct bnx2x {
939 struct eth_spe *spq_prod_bd; 946 struct eth_spe *spq_prod_bd;
940 struct eth_spe *spq_last_bd; 947 struct eth_spe *spq_last_bd;
941 __le16 *dsb_sp_prod; 948 __le16 *dsb_sp_prod;
942 atomic_t spq_left; /* serialize spq */ 949 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
943 /* used to synchronize spq accesses */ 950 /* used to synchronize spq accesses */
944 spinlock_t spq_lock; 951 spinlock_t spq_lock;
945 952
@@ -949,6 +956,7 @@ struct bnx2x {
949 u16 eq_prod; 956 u16 eq_prod;
950 u16 eq_cons; 957 u16 eq_cons;
951 __le16 *eq_cons_sb; 958 __le16 *eq_cons_sb;
959 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
952 960
953 /* Flags for marking that there is a STAT_QUERY or 961 /* Flags for marking that there is a STAT_QUERY or
954 SET_MAC ramrod pending */ 962 SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
976#define MF_FUNC_DIS 0x1000 984#define MF_FUNC_DIS 0x1000
977#define FCOE_MACS_SET 0x2000 985#define FCOE_MACS_SET 0x2000
978#define NO_FCOE_FLAG 0x4000 986#define NO_FCOE_FLAG 0x4000
987#define NO_ISCSI_OOO_FLAG 0x8000
988#define NO_ISCSI_FLAG 0x10000
979 989
980#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 990#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
991#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
992#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
981 993
982 int pf_num; /* absolute PF number */ 994 int pf_num; /* absolute PF number */
983 int pfid; /* per-path PF number */ 995 int pfid; /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
1064 int num_queues; 1076 int num_queues;
1065 int disable_tpa; 1077 int disable_tpa;
1066 int int_mode; 1078 int int_mode;
1079 u32 *rx_indir_table;
1067 1080
1068 struct tstorm_eth_mac_filter_config mac_filters; 1081 struct tstorm_eth_mac_filter_config mac_filters;
1069#define BNX2X_ACCEPT_NONE 0x0000 1082#define BNX2X_ACCEPT_NONE 0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
1110#define BNX2X_CNIC_FLAG_MAC_SET 1 1123#define BNX2X_CNIC_FLAG_MAC_SET 1
1111 void *t2; 1124 void *t2;
1112 dma_addr_t t2_mapping; 1125 dma_addr_t t2_mapping;
1113 struct cnic_ops *cnic_ops; 1126 struct cnic_ops __rcu *cnic_ops;
1114 void *cnic_data; 1127 void *cnic_data;
1115 u32 cnic_tag; 1128 u32 cnic_tag;
1116 struct cnic_eth_dev cnic_eth_dev; 1129 struct cnic_eth_dev cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
1125 u16 cnic_kwq_pending; 1138 u16 cnic_kwq_pending;
1126 u16 cnic_spq_pending; 1139 u16 cnic_spq_pending;
1127 struct mutex cnic_mutex; 1140 struct mutex cnic_mutex;
1128 u8 iscsi_mac[ETH_ALEN];
1129 u8 fip_mac[ETH_ALEN]; 1141 u8 fip_mac[ETH_ALEN];
1130#endif 1142#endif
1131 1143
1132 int dmae_ready; 1144 int dmae_ready;
1133 /* used to synchronize dmae accesses */ 1145 /* used to synchronize dmae accesses */
1134 struct mutex dmae_mutex; 1146 spinlock_t dmae_lock;
1135 1147
1136 /* used to protect the FW mail box */ 1148 /* used to protect the FW mail box */
1137 struct mutex fw_mb_mutex; 1149 struct mutex fw_mb_mutex;
@@ -1447,6 +1459,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1447void bnx2x_calc_fc_adv(struct bnx2x *bp); 1459void bnx2x_calc_fc_adv(struct bnx2x *bp);
1448int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1460int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1449 u32 data_hi, u32 data_lo, int common); 1461 u32 data_hi, u32 data_lo, int common);
1462
1463/* Clears multicast and unicast list configuration in the chip. */
1464void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1465void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1466void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1467
1450void bnx2x_update_coalesce(struct bnx2x *bp); 1468void bnx2x_update_coalesce(struct bnx2x *bp);
1451int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1469int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1452 1470
@@ -1782,5 +1800,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1782BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ 1800BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1783 1801
1784extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1802extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1803void bnx2x_push_indir_table(struct bnx2x *bp);
1785 1804
1786#endif /* bnx2x.h */ 1805#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d04c53..6fac8e183c59 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
232 /* move empty skb from pool to prod and map it */ 232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, 234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE); 235 fp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237 237
238 /* move partial skb from cons to pool (don't unmap yet) */ 238 /* move partial skb from cons to pool (don't unmap yet) */
@@ -333,13 +333,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; 333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334 struct sk_buff *skb = rx_buf->skb; 334 struct sk_buff *skb = rx_buf->skb;
335 /* alloc new skb */ 335 /* alloc new skb */
336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
337 337
338 /* Unmap skb in the pool anyway, as we are going to change 338 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation 339 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 fails. */ 340 fails. */
341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
342 bp->rx_buf_size, DMA_FROM_DEVICE); 342 fp->rx_buf_size, DMA_FROM_DEVICE);
343 343
344 if (likely(new_skb)) { 344 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */ 345 /* fix ip xsum and give it to the stack */
@@ -349,10 +349,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
349 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 349 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
350 350
351#ifdef BNX2X_STOP_ON_ERROR 351#ifdef BNX2X_STOP_ON_ERROR
352 if (pad + len > bp->rx_buf_size) { 352 if (pad + len > fp->rx_buf_size) {
353 BNX2X_ERR("skb_put is about to fail... " 353 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n", 354 "pad %d len %d rx_buf_size %d\n",
355 pad, len, bp->rx_buf_size); 355 pad, len, fp->rx_buf_size);
356 bnx2x_panic(); 356 bnx2x_panic();
357 return; 357 return;
358 } 358 }
@@ -582,7 +582,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583 dma_unmap_single(&bp->pdev->dev, 583 dma_unmap_single(&bp->pdev->dev,
584 dma_unmap_addr(rx_buf, mapping), 584 dma_unmap_addr(rx_buf, mapping),
585 bp->rx_buf_size, 585 fp->rx_buf_size,
586 DMA_FROM_DEVICE); 586 DMA_FROM_DEVICE);
587 skb_reserve(skb, pad); 587 skb_reserve(skb, pad);
588 skb_put(skb, len); 588 skb_put(skb, len);
@@ -821,19 +821,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
821 u16 ring_prod; 821 u16 ring_prod;
822 int i, j; 822 int i, j;
823 823
824 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
825 IP_HEADER_ALIGNMENT_PADDING;
826
827 DP(NETIF_MSG_IFUP,
828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
829
830 for_each_rx_queue(bp, j) { 824 for_each_rx_queue(bp, j) {
831 struct bnx2x_fastpath *fp = &bp->fp[j]; 825 struct bnx2x_fastpath *fp = &bp->fp[j];
832 826
827 DP(NETIF_MSG_IFUP,
828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
829
833 if (!fp->disable_tpa) { 830 if (!fp->disable_tpa) {
834 for (i = 0; i < max_agg_queues; i++) { 831 for (i = 0; i < max_agg_queues; i++) {
835 fp->tpa_pool[i].skb = 832 fp->tpa_pool[i].skb =
836 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 833 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
837 if (!fp->tpa_pool[i].skb) { 834 if (!fp->tpa_pool[i].skb) {
838 BNX2X_ERR("Failed to allocate TPA " 835 BNX2X_ERR("Failed to allocate TPA "
839 "skb pool for queue[%d] - " 836 "skb pool for queue[%d] - "
@@ -941,7 +938,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
941 938
942 dma_unmap_single(&bp->pdev->dev, 939 dma_unmap_single(&bp->pdev->dev,
943 dma_unmap_addr(rx_buf, mapping), 940 dma_unmap_addr(rx_buf, mapping),
944 bp->rx_buf_size, DMA_FROM_DEVICE); 941 fp->rx_buf_size, DMA_FROM_DEVICE);
945 942
946 rx_buf->skb = NULL; 943 rx_buf->skb = NULL;
947 dev_kfree_skb(skb); 944 dev_kfree_skb(skb);
@@ -1249,6 +1246,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1249 return rc; 1246 return rc;
1250} 1247}
1251 1248
1249static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1250{
1251 int i;
1252
1253 for_each_queue(bp, i) {
1254 struct bnx2x_fastpath *fp = &bp->fp[i];
1255
1256 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1257 if (IS_FCOE_IDX(i))
1258 /*
1259 * Although there are no IP frames expected to arrive to
1260 * this ring we still want to add an
1261 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1262 * overrun attack.
1263 */
1264 fp->rx_buf_size =
1265 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1266 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1267 else
1268 fp->rx_buf_size =
1269 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1270 IP_HEADER_ALIGNMENT_PADDING;
1271 }
1272}
1273
1252/* must be called with rtnl_lock */ 1274/* must be called with rtnl_lock */
1253int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1275int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1254{ 1276{
@@ -1272,6 +1294,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1272 /* must be called before memory allocation and HW init */ 1294 /* must be called before memory allocation and HW init */
1273 bnx2x_ilt_set_info(bp); 1295 bnx2x_ilt_set_info(bp);
1274 1296
1297 /* Set the receive queues buffer size */
1298 bnx2x_set_rx_buf_size(bp);
1299
1275 if (bnx2x_alloc_mem(bp)) 1300 if (bnx2x_alloc_mem(bp))
1276 return -ENOMEM; 1301 return -ENOMEM;
1277 1302
@@ -1427,28 +1452,35 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427 1452
1428 bnx2x_set_eth_mac(bp, 1); 1453 bnx2x_set_eth_mac(bp, 1);
1429 1454
1455 /* Clear MC configuration */
1456 if (CHIP_IS_E1(bp))
1457 bnx2x_invalidate_e1_mc_list(bp);
1458 else
1459 bnx2x_invalidate_e1h_mc_list(bp);
1460
1461 /* Clear UC lists configuration */
1462 bnx2x_invalidate_uc_list(bp);
1463
1430 if (bp->port.pmf) 1464 if (bp->port.pmf)
1431 bnx2x_initial_phy_init(bp, load_mode); 1465 bnx2x_initial_phy_init(bp, load_mode);
1432 1466
1467 /* Initialize Rx filtering */
1468 bnx2x_set_rx_mode(bp->dev);
1469
1433 /* Start fast path */ 1470 /* Start fast path */
1434 switch (load_mode) { 1471 switch (load_mode) {
1435 case LOAD_NORMAL: 1472 case LOAD_NORMAL:
1436 /* Tx queue should be only reenabled */ 1473 /* Tx queue should be only reenabled */
1437 netif_tx_wake_all_queues(bp->dev); 1474 netif_tx_wake_all_queues(bp->dev);
1438 /* Initialize the receive filter. */ 1475 /* Initialize the receive filter. */
1439 bnx2x_set_rx_mode(bp->dev);
1440 break; 1476 break;
1441 1477
1442 case LOAD_OPEN: 1478 case LOAD_OPEN:
1443 netif_tx_start_all_queues(bp->dev); 1479 netif_tx_start_all_queues(bp->dev);
1444 smp_mb__after_clear_bit(); 1480 smp_mb__after_clear_bit();
1445 /* Initialize the receive filter. */
1446 bnx2x_set_rx_mode(bp->dev);
1447 break; 1481 break;
1448 1482
1449 case LOAD_DIAG: 1483 case LOAD_DIAG:
1450 /* Initialize the receive filter. */
1451 bnx2x_set_rx_mode(bp->dev);
1452 bp->state = BNX2X_STATE_DIAG; 1484 bp->state = BNX2X_STATE_DIAG;
1453 break; 1485 break;
1454 1486
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d68e6bb..f062d5d20fa9 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -822,11 +822,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
822 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 822 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
823 dma_addr_t mapping; 823 dma_addr_t mapping;
824 824
825 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 825 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
826 if (unlikely(skb == NULL)) 826 if (unlikely(skb == NULL))
827 return -ENOMEM; 827 return -ENOMEM;
828 828
829 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, 829 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
830 DMA_FROM_DEVICE); 830 DMA_FROM_DEVICE);
831 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 831 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
832 dev_kfree_skb(skb); 832 dev_kfree_skb(skb);
@@ -892,7 +892,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
892 if (fp->tpa_state[i] == BNX2X_TPA_START) 892 if (fp->tpa_state[i] == BNX2X_TPA_START)
893 dma_unmap_single(&bp->pdev->dev, 893 dma_unmap_single(&bp->pdev->dev,
894 dma_unmap_addr(rx_buf, mapping), 894 dma_unmap_addr(rx_buf, mapping),
895 bp->rx_buf_size, DMA_FROM_DEVICE); 895 fp->rx_buf_size, DMA_FROM_DEVICE);
896 896
897 dev_kfree_skb(skb); 897 dev_kfree_skb(skb);
898 rx_buf->skb = NULL; 898 rx_buf->skb = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b48509..8d19d127f796 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1618,7 +1618,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1618 /* prepare the loopback packet */ 1618 /* prepare the loopback packet */
1619 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? 1619 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1620 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); 1620 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1621 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 1621 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
1622 if (!skb) { 1622 if (!skb) {
1623 rc = -ENOMEM; 1623 rc = -ENOMEM;
1624 goto test_loopback_exit; 1624 goto test_loopback_exit;
@@ -2134,6 +2134,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
2134 return 0; 2134 return 0;
2135} 2135}
2136 2136
2137static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2138 void *rules __always_unused)
2139{
2140 struct bnx2x *bp = netdev_priv(dev);
2141
2142 switch (info->cmd) {
2143 case ETHTOOL_GRXRINGS:
2144 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2145 return 0;
2146
2147 default:
2148 return -EOPNOTSUPP;
2149 }
2150}
2151
2152static int bnx2x_get_rxfh_indir(struct net_device *dev,
2153 struct ethtool_rxfh_indir *indir)
2154{
2155 struct bnx2x *bp = netdev_priv(dev);
2156 size_t copy_size =
2157 min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
2158
2159 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2160 return -EOPNOTSUPP;
2161
2162 indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
2163 memcpy(indir->ring_index, bp->rx_indir_table,
2164 copy_size * sizeof(bp->rx_indir_table[0]));
2165 return 0;
2166}
2167
2168static int bnx2x_set_rxfh_indir(struct net_device *dev,
2169 const struct ethtool_rxfh_indir *indir)
2170{
2171 struct bnx2x *bp = netdev_priv(dev);
2172 size_t i;
2173
2174 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2175 return -EOPNOTSUPP;
2176
2177 /* Validate size and indices */
2178 if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
2179 return -EINVAL;
2180 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
2181 if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
2182 return -EINVAL;
2183
2184 memcpy(bp->rx_indir_table, indir->ring_index,
2185 indir->size * sizeof(bp->rx_indir_table[0]));
2186 bnx2x_push_indir_table(bp);
2187 return 0;
2188}
2189
2137static const struct ethtool_ops bnx2x_ethtool_ops = { 2190static const struct ethtool_ops bnx2x_ethtool_ops = {
2138 .get_settings = bnx2x_get_settings, 2191 .get_settings = bnx2x_get_settings,
2139 .set_settings = bnx2x_set_settings, 2192 .set_settings = bnx2x_set_settings,
@@ -2170,6 +2223,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2170 .get_strings = bnx2x_get_strings, 2223 .get_strings = bnx2x_get_strings,
2171 .phys_id = bnx2x_phys_id, 2224 .phys_id = bnx2x_phys_id,
2172 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2225 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2226 .get_rxnfc = bnx2x_get_rxnfc,
2227 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2228 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2173}; 2229};
2174 2230
2175void bnx2x_set_ethtool_ops(struct net_device *netdev) 2231void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f63989..be503cc0a50b 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
11 11
12#include "bnx2x_fw_defs.h" 12#include "bnx2x_fw_defs.h"
13 13
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15
14struct license_key { 16struct license_key {
15 u32 reserved[6]; 17 u32 reserved[6];
16 18
17#if defined(__BIG_ENDIAN) 19 u32 max_iscsi_conn;
18 u16 max_iscsi_init_conn; 20#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
19 u16 max_iscsi_trgt_conn; 21#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
20#elif defined(__LITTLE_ENDIAN) 22#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
21 u16 max_iscsi_trgt_conn; 23#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
22 u16 max_iscsi_init_conn;
23#endif
24 24
25 u32 reserved_a[6]; 25 u32 reserved_a;
26}; 26
27 u32 max_fcoe_conn;
28#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
29#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
30#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
31#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
27 32
33 u32 reserved_b[4];
34};
28 35
29#define PORT_0 0 36#define PORT_0 0
30#define PORT_1 1 37#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
237#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 244#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
238 245
239 246
240 u32 Reserved0[16]; /* 0x158 */ 247 u32 Reserved0[3]; /* 0x158 */
241 248 /* Controls the TX laser of the SFP+ module */
249 u32 sfp_ctrl; /* 0x164 */
250#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
251#define PORT_HW_CFG_TX_LASER_SHIFT 0
252#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
253#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
254#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
255#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
256#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
257
258 /* Controls the fault module LED of the SFP+ */
259#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
260#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
261#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
262#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
263#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
264#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
265#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
266 u32 Reserved01[12]; /* 0x158 */
242 /* for external PHY, or forced mode or during AN */ 267 /* for external PHY, or forced mode or during AN */
243 u16 xgxs_config_rx[4]; /* 0x198 */ 268 u16 xgxs_config_rx[4]; /* 0x198 */
244 269
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
246 271
247 u32 Reserved1[56]; /* 0x1A8 */ 272 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */ 273 u32 default_cfg; /* 0x288 */
274#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
275#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
276#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
277#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
278#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
279#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
280
281#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
282#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
283#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
284#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
285#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
286#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
287
288#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
289#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
290#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
291#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
292#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
293#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
294
295#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
296#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
297#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
298#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
299#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
300#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
301
302 /*
303 * When KR link is required to be set to force which is not
304 * KR-compliant, this parameter determine what is the trigger for it.
305 * When GPIO is selected, low input will force the speed. Currently
306 * default speed is 1G. In the future, it may be widen to select the
307 * forced speed in with another parameter. Note when force-1G is
308 * enabled, it override option 56: Link Speed option.
309 */
310#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
311#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
312#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
313#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
314#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
315#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
316#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
317#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
318#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
319#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
320#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
321#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
322 /* Enable to determine with which GPIO to reset the external phy */
323#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
324#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
325#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
326#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
327#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
328#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
329#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
330#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
331#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
332#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
333#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
249 /* Enable BAM on KR */ 334 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 335#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 336#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 337#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 338#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254 339
340 /* Enable Common Mode Sense */
341#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
342#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
343#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
344#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
345
255 u32 speed_capability_mask2; /* 0x28C */ 346 u32 speed_capability_mask2; /* 0x28C */
256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 347#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 348#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -352,6 +443,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 443#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
353 /* forced only */ 444 /* forced only */
354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 445#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
446 /* Indicate whether to swap the external phy polarity */
447#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
448#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
449#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
355 450
356 u32 external_phy_config; 451 u32 external_phy_config;
357#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 452#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
@@ -377,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
377#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 472#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
378#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 473#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
379#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 474#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
475#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
380#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 476#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
381#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 477#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
382 478
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f391..f2f367d4e74d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */ 31/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
32#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
32#define ETH_MIN_PACKET_SIZE 60 33#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 34#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 35#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000 36#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2 37#define BMAC_CONTROL_RX_ENABLE 2
37 38
38/***********************************************************/ 39/***********************************************************/
39/* Shortcut definitions */ 40/* Shortcut definitions */
@@ -79,7 +80,7 @@
79 80
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \ 84#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \ 86#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
112#define GP_STATUS_10G_KX4 \ 113#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 114 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114 115
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 116#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 117#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 118#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 119#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD 120#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD 121#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD 122#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD 124#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD 125#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD 126#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD 127#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD 128#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD 129#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD 130#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD 131#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD 132#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD 133#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD 134#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD 135#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD 136#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD 137#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD 138#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138 139
139#define PHY_XGXS_FLAG 0x1 140#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2 141#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
142 143
143/* */ 144/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2 145#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 146 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 147 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147 148
148 149
@@ -153,15 +154,15 @@
153 154
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 155#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 157 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157 158
158#define SFP_EEPROM_OPTIONS_ADDR 0x40 159#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 160 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2 161#define SFP_EEPROM_OPTIONS_SIZE 2
161 162
162#define EDC_MODE_LINEAR 0x0022 163#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044 164#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055 165#define EDC_MODE_PASSIVE_DAC 0x0055
165 166
166 167
167#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) 168#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
170/* INTERFACE */ 171/* INTERFACE */
171/**********************************************************/ 172/**********************************************************/
172 173
173#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 174#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
174 bnx2x_cl45_write(_bp, _phy, \ 175 bnx2x_cl45_write(_bp, _phy, \
175 (_phy)->def_md_devad, \ 176 (_phy)->def_md_devad, \
176 (_bank + (_addr & 0xf)), \ 177 (_bank + (_addr & 0xf)), \
177 _val) 178 _val)
178 179
179#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 180#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
180 bnx2x_cl45_read(_bp, _phy, \ 181 bnx2x_cl45_read(_bp, _phy, \
181 (_phy)->def_md_devad, \ 182 (_phy)->def_md_devad, \
182 (_bank + (_addr & 0xf)), \ 183 (_bank + (_addr & 0xf)), \
183 _val) 184 _val)
184 185
185static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
186 u8 devad, u16 reg, u16 *ret_val);
187
188static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
189 u8 devad, u16 reg, u16 val);
190
191static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 186static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
192{ 187{
193 u32 val = REG_RD(bp, reg); 188 u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
216 211
217 DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); 212 DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
218 213
219 /** 214 /*
220 * mapping between entry priority to client number (0,1,2 -debug and 215 * mapping between entry priority to client number (0,1,2 -debug and
221 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 216 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
222 * 3bits client num. 217 * 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
225 */ 220 */
226 221
227 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 222 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
228 /** 223 /*
229 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 224 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
230 * as strict. Bits 0,1,2 - debug and management entries, 3 - 225 * as strict. Bits 0,1,2 - debug and management entries, 3 -
231 * COS0 entry, 4 - COS1 entry. 226 * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
237 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 232 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
238 /* defines which entries (clients) are subjected to WFQ arbitration */ 233 /* defines which entries (clients) are subjected to WFQ arbitration */
239 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 234 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
240 /** 235 /*
241 * For strict priority entries defines the number of consecutive 236 * For strict priority entries defines the number of consecutive
242 * slots for the highest priority. 237 * slots for the highest priority.
243 */ 238 */
244 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 239 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
245 /** 240 /*
246 * mapping between the CREDIT_WEIGHT registers and actual client 241 * mapping between the CREDIT_WEIGHT registers and actual client
247 * numbers 242 * numbers
248 */ 243 */
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
255 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 250 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
256 /* ETS mode disable */ 251 /* ETS mode disable */
257 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 252 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
258 /** 253 /*
259 * If ETS mode is enabled (there is no strict priority) defines a WFQ 254 * If ETS mode is enabled (there is no strict priority) defines a WFQ
260 * weight for COS0/COS1. 255 * weight for COS0/COS1.
261 */ 256 */
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
268 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 263 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
269} 264}
270 265
271void bnx2x_ets_bw_limit_common(const struct link_params *params) 266static void bnx2x_ets_bw_limit_common(const struct link_params *params)
272{ 267{
273 /* ETS disabled configuration */ 268 /* ETS disabled configuration */
274 struct bnx2x *bp = params->bp; 269 struct bnx2x *bp = params->bp;
275 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 270 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
276 /** 271 /*
277 * defines which entries (clients) are subjected to WFQ arbitration 272 * defines which entries (clients) are subjected to WFQ arbitration
278 * COS0 0x8 273 * COS0 0x8
279 * COS1 0x10 274 * COS1 0x10
280 */ 275 */
281 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 276 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
282 /** 277 /*
283 * mapping between the ARB_CREDIT_WEIGHT registers and actual 278 * mapping between the ARB_CREDIT_WEIGHT registers and actual
284 * client numbers (WEIGHT_0 does not actually have to represent 279 * client numbers (WEIGHT_0 does not actually have to represent
285 * client 0) 280 * client 0)
286 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 281 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
287 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 282 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
288 */ 283 */
289 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); 284 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
290 285
291 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 286 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
298 293
299 /* Defines the number of consecutive slots for the strict priority */ 294 /* Defines the number of consecutive slots for the strict priority */
300 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 295 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
301 /** 296 /*
302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 297 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
303 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 298 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
304 * entry, 4 - COS1 entry. 299 * entry, 4 - COS1 entry.
305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 300 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
306 * bit4 bit3 bit2 bit1 bit0 301 * bit4 bit3 bit2 bit1 bit0
307 * MCP and debug are strict 302 * MCP and debug are strict
308 */ 303 */
309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 304 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
310 305
311 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ 306 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
329 if ((0 == total_bw) || 324 if ((0 == total_bw) ||
330 (0 == cos0_bw) || 325 (0 == cos0_bw) ||
331 (0 == cos1_bw)) { 326 (0 == cos1_bw)) {
332 DP(NETIF_MSG_LINK, 327 DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
333 "bnx2x_ets_bw_limit: Total BW can't be zero\n");
334 return; 328 return;
335 } 329 }
336 330
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
355 u32 val = 0; 349 u32 val = 0;
356 350
357 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 351 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
358 /** 352 /*
359 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 353 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
360 * as strict. Bits 0,1,2 - debug and management entries, 354 * as strict. Bits 0,1,2 - debug and management entries,
361 * 3 - COS0 entry, 4 - COS1 entry. 355 * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
364 * MCP and debug are strict 358 * MCP and debug are strict
365 */ 359 */
366 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 360 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
367 /** 361 /*
368 * For strict priority entries defines the number of consecutive slots 362 * For strict priority entries defines the number of consecutive slots
369 * for the highest priority. 363 * for the highest priority.
370 */ 364 */
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
377 /* Defines the number of consecutive slots for the strict priority */ 371 /* Defines the number of consecutive slots for the strict priority */
378 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 372 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
379 373
380 /** 374 /*
381 * mapping between entry priority to client number (0,1,2 -debug and 375 * mapping between entry priority to client number (0,1,2 -debug and
382 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 376 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
383 * 3bits client num. 377 * 3bits client num.
384 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 378 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
385 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 379 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
386 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 380 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
387 */ 381 */
388 val = (0 == strict_cos) ? 0x2318 : 0x22E0; 382 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
389 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); 383 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
390 384
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
471/* MAC/PBF section */ 465/* MAC/PBF section */
472/******************************************************************/ 466/******************************************************************/
473static void bnx2x_emac_init(struct link_params *params, 467static void bnx2x_emac_init(struct link_params *params,
474 struct link_vars *vars) 468 struct link_vars *vars)
475{ 469{
476 /* reset and unreset the emac core */ 470 /* reset and unreset the emac core */
477 struct bnx2x *bp = params->bp; 471 struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
481 u16 timeout; 475 u16 timeout;
482 476
483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
484 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 478 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
485 udelay(5); 479 udelay(5);
486 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 480 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
487 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 481 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
488 482
489 /* init emac - use read-modify-write */ 483 /* init emac - use read-modify-write */
490 /* self clear reset */ 484 /* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
515} 509}
516 510
517static u8 bnx2x_emac_enable(struct link_params *params, 511static u8 bnx2x_emac_enable(struct link_params *params,
518 struct link_vars *vars, u8 lb) 512 struct link_vars *vars, u8 lb)
519{ 513{
520 struct bnx2x *bp = params->bp; 514 struct bnx2x *bp = params->bp;
521 u8 port = params->port; 515 u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
527 /* enable emac and not bmac */ 521 /* enable emac and not bmac */
528 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 522 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
529 523
530 /* for paladium */
531 if (CHIP_REV_IS_EMUL(bp)) {
532 /* Use lane 1 (of lanes 0-3) */
533 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
535 port*4, 1);
536 }
537 /* for fpga */
538 else
539
540 if (CHIP_REV_IS_FPGA(bp)) {
541 /* Use lane 1 (of lanes 0-3) */
542 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
543
544 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
545 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
546 0);
547 } else
548 /* ASIC */ 524 /* ASIC */
549 if (vars->phy_flags & PHY_XGXS_FLAG) { 525 if (vars->phy_flags & PHY_XGXS_FLAG) {
550 u32 ser_lane = ((params->lane_config & 526 u32 ser_lane = ((params->lane_config &
551 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 527 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
552 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 528 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
553 529
554 DP(NETIF_MSG_LINK, "XGXS\n"); 530 DP(NETIF_MSG_LINK, "XGXS\n");
555 /* select the master lanes (out of 0-3) */ 531 /* select the master lanes (out of 0-3) */
556 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + 532 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
557 port*4, ser_lane);
558 /* select XGXS */ 533 /* select XGXS */
559 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
560 port*4, 1);
561 535
562 } else { /* SerDes */ 536 } else { /* SerDes */
563 DP(NETIF_MSG_LINK, "SerDes\n"); 537 DP(NETIF_MSG_LINK, "SerDes\n");
564 /* select SerDes */ 538 /* select SerDes */
565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 539 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
566 port*4, 0);
567 } 540 }
568 541
569 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 542 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
570 EMAC_RX_MODE_RESET); 543 EMAC_RX_MODE_RESET);
571 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 544 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
572 EMAC_TX_MODE_RESET); 545 EMAC_TX_MODE_RESET);
573 546
574 if (CHIP_REV_IS_SLOW(bp)) { 547 if (CHIP_REV_IS_SLOW(bp)) {
575 /* config GMII mode */ 548 /* config GMII mode */
576 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 549 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
577 EMAC_WR(bp, EMAC_REG_EMAC_MODE, 550 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
578 (val | EMAC_MODE_PORT_GMII));
579 } else { /* ASIC */ 551 } else { /* ASIC */
580 /* pause enable/disable */ 552 /* pause enable/disable */
581 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 553 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
605 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 577 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
606 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 578 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
607 579
608 /** 580 /*
609 * Setting this bit causes MAC control frames (except for pause 581 * Setting this bit causes MAC control frames (except for pause
610 * frames) to be passed on for processing. This setting has no 582 * frames) to be passed on for processing. This setting has no
611 * affect on the operation of the pause frames. This bit effects 583 * affect on the operation of the pause frames. This bit effects
612 * all packets regardless of RX Parser packet sorting logic. 584 * all packets regardless of RX Parser packet sorting logic.
613 * Turn the PFC off to make sure we are in Xon state before 585 * Turn the PFC off to make sure we are in Xon state before
614 * enabling it. 586 * enabling it.
615 */ 587 */
616 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); 588 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
617 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { 589 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
618 DP(NETIF_MSG_LINK, "PFC is enabled\n"); 590 DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
666 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 638 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
667 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); 639 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
668 640
669 if (CHIP_REV_IS_EMUL(bp)) { 641 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
670 /* take the BigMac out of reset */
671 REG_WR(bp,
672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
673 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
674
675 /* enable access for bmac registers */
676 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
677 } else
678 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
679 642
680 vars->mac_type = MAC_TYPE_EMAC; 643 vars->mac_type = MAC_TYPE_EMAC;
681 return 0; 644 return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
731 val |= (1<<5); 694 val |= (1<<5);
732 wb_data[0] = val; 695 wb_data[0] = val;
733 wb_data[1] = 0; 696 wb_data[1] = 0;
734 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, 697 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
735 wb_data, 2);
736 udelay(30); 698 udelay(30);
737 699
738 /* Tx control */ 700 /* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
768 730
769 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 731 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
770 732
771 /** 733 /*
772 * Set Time (based unit is 512 bit time) between automatic 734 * Set Time (based unit is 512 bit time) between automatic
773 * re-sending of PP packets amd enable automatic re-send of 735 * re-sending of PP packets amd enable automatic re-send of
774 * Per-Priroity Packet as long as pp_gen is asserted and 736 * Per-Priroity Packet as long as pp_gen is asserted and
775 * pp_disable is low. 737 * pp_disable is low.
776 */ 738 */
777 val = 0x8000; 739 val = 0x8000;
778 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 740 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
779 val |= (1<<16); /* enable automatic re-send */ 741 val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
781 wb_data[0] = val; 743 wb_data[0] = val;
782 wb_data[1] = 0; 744 wb_data[1] = 0;
783 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, 745 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
784 wb_data, 2); 746 wb_data, 2);
785 747
786 /* mac control */ 748 /* mac control */
787 val = 0x3; /* Enable RX and TX */ 749 val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
795 757
796 wb_data[0] = val; 758 wb_data[0] = val;
797 wb_data[1] = 0; 759 wb_data[1] = 0;
798 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 760 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
799 wb_data, 2);
800} 761}
801 762
802static void bnx2x_update_pfc_brb(struct link_params *params, 763static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
825 full_xon_th = 786 full_xon_th =
826 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 787 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
827 } 788 }
828 /* The number of free blocks below which the pause signal to class 0 789 /*
829 of MAC #n is asserted. n=0,1 */ 790 * The number of free blocks below which the pause signal to class 0
791 * of MAC #n is asserted. n=0,1
792 */
830 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); 793 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
831 /* The number of free blocks above which the pause signal to class 0 794 /*
832 of MAC #n is de-asserted. n=0,1 */ 795 * The number of free blocks above which the pause signal to class 0
796 * of MAC #n is de-asserted. n=0,1
797 */
833 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); 798 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
834 /* The number of free blocks below which the full signal to class 0 799 /*
835 of MAC #n is asserted. n=0,1 */ 800 * The number of free blocks below which the full signal to class 0
801 * of MAC #n is asserted. n=0,1
802 */
836 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); 803 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
837 /* The number of free blocks above which the full signal to class 0 804 /*
838 of MAC #n is de-asserted. n=0,1 */ 805 * The number of free blocks above which the full signal to class 0
806 * of MAC #n is de-asserted. n=0,1
807 */
839 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); 808 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
840 809
841 if (set_pfc && pfc_params) { 810 if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
859 full_xon_th = 828 full_xon_th =
860 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 829 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
861 } 830 }
862 /** 831 /*
863 * The number of free blocks below which the pause signal to 832 * The number of free blocks below which the pause signal to
864 * class 1 of MAC #n is asserted. n=0,1 833 * class 1 of MAC #n is asserted. n=0,1
865 **/ 834 */
866 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); 835 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
867 /** 836 /*
868 * The number of free blocks above which the pause signal to 837 * The number of free blocks above which the pause signal to
869 * class 1 of MAC #n is de-asserted. n=0,1 838 * class 1 of MAC #n is de-asserted. n=0,1
870 **/ 839 */
871 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); 840 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
872 /** 841 /*
873 * The number of free blocks below which the full signal to 842 * The number of free blocks below which the full signal to
874 * class 1 of MAC #n is asserted. n=0,1 843 * class 1 of MAC #n is asserted. n=0,1
875 **/ 844 */
876 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); 845 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
877 /** 846 /*
878 * The number of free blocks above which the full signal to 847 * The number of free blocks above which the full signal to
879 * class 1 of MAC #n is de-asserted. n=0,1 848 * class 1 of MAC #n is de-asserted. n=0,1
880 **/ 849 */
881 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); 850 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
882 } 851 }
883} 852}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
896 FEATURE_CONFIG_PFC_ENABLED; 865 FEATURE_CONFIG_PFC_ENABLED;
897 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 866 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
898 867
899 /** 868 /*
900 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set 869 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
901 * MAC control frames (that are not pause packets) 870 * MAC control frames (that are not pause packets)
902 * will be forwarded to the XCM. 871 * will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
904 xcm_mask = REG_RD(bp, 873 xcm_mask = REG_RD(bp,
905 port ? NIG_REG_LLH1_XCM_MASK : 874 port ? NIG_REG_LLH1_XCM_MASK :
906 NIG_REG_LLH0_XCM_MASK); 875 NIG_REG_LLH0_XCM_MASK);
907 /** 876 /*
908 * nig params will override non PFC params, since it's possible to 877 * nig params will override non PFC params, since it's possible to
909 * do transition from PFC to SAFC 878 * do transition from PFC to SAFC
910 */ 879 */
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
994 struct link_vars *vars, 963 struct link_vars *vars,
995 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 964 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
996{ 965{
997 /** 966 /*
998 * The PFC and pause are orthogonal to one another, meaning when 967 * The PFC and pause are orthogonal to one another, meaning when
999 * PFC is enabled, the pause are disabled, and when PFC is 968 * PFC is enabled, the pause are disabled, and when PFC is
1000 * disabled, pause are set according to the pause result. 969 * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
1035 1004
1036static u8 bnx2x_bmac1_enable(struct link_params *params, 1005static u8 bnx2x_bmac1_enable(struct link_params *params,
1037 struct link_vars *vars, 1006 struct link_vars *vars,
1038 u8 is_lb) 1007 u8 is_lb)
1039{ 1008{
1040 struct bnx2x *bp = params->bp; 1009 struct bnx2x *bp = params->bp;
1041 u8 port = params->port; 1010 u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1049 /* XGXS control */ 1018 /* XGXS control */
1050 wb_data[0] = 0x3c; 1019 wb_data[0] = 0x3c;
1051 wb_data[1] = 0; 1020 wb_data[1] = 0;
1052 REG_WR_DMAE(bp, bmac_addr + 1021 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1053 BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 1022 wb_data, 2);
1054 wb_data, 2);
1055 1023
1056 /* tx MAC SA */ 1024 /* tx MAC SA */
1057 wb_data[0] = ((params->mac_addr[2] << 24) | 1025 wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1060 params->mac_addr[5]); 1028 params->mac_addr[5]);
1061 wb_data[1] = ((params->mac_addr[0] << 8) | 1029 wb_data[1] = ((params->mac_addr[0] << 8) |
1062 params->mac_addr[1]); 1030 params->mac_addr[1]);
1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, 1031 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
1064 wb_data, 2);
1065 1032
1066 /* mac control */ 1033 /* mac control */
1067 val = 0x3; 1034 val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1071 } 1038 }
1072 wb_data[0] = val; 1039 wb_data[0] = val;
1073 wb_data[1] = 0; 1040 wb_data[1] = 0;
1074 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1041 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
1075 wb_data, 2);
1076 1042
1077 /* set rx mtu */ 1043 /* set rx mtu */
1078 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1044 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1079 wb_data[1] = 0; 1045 wb_data[1] = 0;
1080 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, 1046 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
1081 wb_data, 2);
1082 1047
1083 bnx2x_update_pfc_bmac1(params, vars); 1048 bnx2x_update_pfc_bmac1(params, vars);
1084 1049
1085 /* set tx mtu */ 1050 /* set tx mtu */
1086 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1051 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1087 wb_data[1] = 0; 1052 wb_data[1] = 0;
1088 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, 1053 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
1089 wb_data, 2);
1090 1054
1091 /* set cnt max size */ 1055 /* set cnt max size */
1092 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1056 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1093 wb_data[1] = 0; 1057 wb_data[1] = 0;
1094 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, 1058 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1095 wb_data, 2);
1096 1059
1097 /* configure safc */ 1060 /* configure safc */
1098 wb_data[0] = 0x1000200; 1061 wb_data[0] = 0x1000200;
1099 wb_data[1] = 0; 1062 wb_data[1] = 0;
1100 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1101 wb_data, 2); 1064 wb_data, 2);
1102 /* fix for emulation */
1103 if (CHIP_REV_IS_EMUL(bp)) {
1104 wb_data[0] = 0xf000;
1105 wb_data[1] = 0;
1106 REG_WR_DMAE(bp,
1107 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1108 wb_data, 2);
1109 }
1110
1111 1065
1112 return 0; 1066 return 0;
1113} 1067}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1126 1080
1127 wb_data[0] = 0; 1081 wb_data[0] = 0;
1128 wb_data[1] = 0; 1082 wb_data[1] = 0;
1129 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 1083 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
1130 wb_data, 2);
1131 udelay(30); 1084 udelay(30);
1132 1085
1133 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ 1086 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
1134 wb_data[0] = 0x3c; 1087 wb_data[0] = 0x3c;
1135 wb_data[1] = 0; 1088 wb_data[1] = 0;
1136 REG_WR_DMAE(bp, bmac_addr + 1089 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
1137 BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, 1090 wb_data, 2);
1138 wb_data, 2);
1139 1091
1140 udelay(30); 1092 udelay(30);
1141 1093
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1147 wb_data[1] = ((params->mac_addr[0] << 8) | 1099 wb_data[1] = ((params->mac_addr[0] << 8) |
1148 params->mac_addr[1]); 1100 params->mac_addr[1]);
1149 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, 1101 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
1150 wb_data, 2); 1102 wb_data, 2);
1151 1103
1152 udelay(30); 1104 udelay(30);
1153 1105
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1155 wb_data[0] = 0x1000200; 1107 wb_data[0] = 0x1000200;
1156 wb_data[1] = 0; 1108 wb_data[1] = 0;
1157 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, 1109 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
1158 wb_data, 2); 1110 wb_data, 2);
1159 udelay(30); 1111 udelay(30);
1160 1112
1161 /* set rx mtu */ 1113 /* set rx mtu */
1162 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1114 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1163 wb_data[1] = 0; 1115 wb_data[1] = 0;
1164 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, 1116 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
1165 wb_data, 2);
1166 udelay(30); 1117 udelay(30);
1167 1118
1168 /* set tx mtu */ 1119 /* set tx mtu */
1169 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1120 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1170 wb_data[1] = 0; 1121 wb_data[1] = 0;
1171 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, 1122 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
1172 wb_data, 2);
1173 udelay(30); 1123 udelay(30);
1174 /* set cnt max size */ 1124 /* set cnt max size */
1175 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 1125 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
1176 wb_data[1] = 0; 1126 wb_data[1] = 0;
1177 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, 1127 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1178 wb_data, 2);
1179 udelay(30); 1128 udelay(30);
1180 bnx2x_update_pfc_bmac2(params, vars, is_lb); 1129 bnx2x_update_pfc_bmac2(params, vars, is_lb);
1181 1130
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
1191 u32 val; 1140 u32 val;
1192 /* reset and unreset the BigMac */ 1141 /* reset and unreset the BigMac */
1193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1142 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1194 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1143 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1195 msleep(1); 1144 msleep(1);
1196 1145
1197 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1198 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1147 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1199 1148
1200 /* enable access for bmac registers */ 1149 /* enable access for bmac registers */
1201 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 1150 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
1230 struct bnx2x *bp = params->bp; 1179 struct bnx2x *bp = params->bp;
1231 1180
1232 REG_WR(bp, params->shmem_base + 1181 REG_WR(bp, params->shmem_base +
1233 offsetof(struct shmem_region, 1182 offsetof(struct shmem_region,
1234 port_mb[params->port].link_status), 1183 port_mb[params->port].link_status), link_status);
1235 link_status);
1236} 1184}
1237 1185
1238static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 1186static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1239{ 1187{
1240 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 1188 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1241 NIG_REG_INGRESS_BMAC0_MEM; 1189 NIG_REG_INGRESS_BMAC0_MEM;
1242 u32 wb_data[2]; 1190 u32 wb_data[2];
1243 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 1191 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
1244 1192
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1250 if (CHIP_IS_E2(bp)) { 1198 if (CHIP_IS_E2(bp)) {
1251 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1199 /* Clear Rx Enable bit in BMAC_CONTROL register */
1252 REG_RD_DMAE(bp, bmac_addr + 1200 REG_RD_DMAE(bp, bmac_addr +
1253 BIGMAC2_REGISTER_BMAC_CONTROL, 1201 BIGMAC2_REGISTER_BMAC_CONTROL,
1254 wb_data, 2); 1202 wb_data, 2);
1255 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 1203 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1256 REG_WR_DMAE(bp, bmac_addr + 1204 REG_WR_DMAE(bp, bmac_addr +
1257 BIGMAC2_REGISTER_BMAC_CONTROL, 1205 BIGMAC2_REGISTER_BMAC_CONTROL,
1258 wb_data, 2); 1206 wb_data, 2);
1259 } else { 1207 } else {
1260 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1208 /* Clear Rx Enable bit in BMAC_CONTROL register */
1261 REG_RD_DMAE(bp, bmac_addr + 1209 REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1271} 1219}
1272 1220
1273static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, 1221static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1274 u32 line_speed) 1222 u32 line_speed)
1275{ 1223{
1276 struct bnx2x *bp = params->bp; 1224 struct bnx2x *bp = params->bp;
1277 u8 port = params->port; 1225 u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1308 /* update threshold */ 1256 /* update threshold */
1309 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 1257 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
1310 /* update init credit */ 1258 /* update init credit */
1311 init_crd = 778; /* (800-18-4) */ 1259 init_crd = 778; /* (800-18-4) */
1312 1260
1313 } else { 1261 } else {
1314 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 1262 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1353 return 0; 1301 return 0;
1354} 1302}
1355 1303
1304/*
1305 * get_emac_base
1306 *
1307 * @param cb
1308 * @param mdc_mdio_access
1309 * @param port
1310 *
1311 * @return u32
1312 *
1313 * This function selects the MDC/MDIO access (through emac0 or
1314 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
1315 * phy has a default access mode, which could also be overridden
1316 * by nvram configuration. This parameter, whether this is the
1317 * default phy configuration, or the nvram overrun
1318 * configuration, is passed here as mdc_mdio_access and selects
1319 * the emac_base for the CL45 read/writes operations
1320 */
1356static u32 bnx2x_get_emac_base(struct bnx2x *bp, 1321static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1357 u32 mdc_mdio_access, u8 port) 1322 u32 mdc_mdio_access, u8 port)
1358{ 1323{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1385 1350
1386} 1351}
1387 1352
1388u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, 1353/******************************************************************/
1389 u8 devad, u16 reg, u16 val) 1354/* CL45 access functions */
1355/******************************************************************/
1356static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1357 u8 devad, u16 reg, u16 val)
1390{ 1358{
1391 u32 tmp, saved_mode; 1359 u32 tmp, saved_mode;
1392 u8 i, rc = 0; 1360 u8 i, rc = 0;
1393 1361 /*
1394 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1362 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1395 * (a value of 49==0x31) and make sure that the AUTO poll is off 1363 * (a value of 49==0x31) and make sure that the AUTO poll is off
1396 */ 1364 */
1397 1365
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1414 for (i = 0; i < 50; i++) { 1382 for (i = 0; i < 50; i++) {
1415 udelay(10); 1383 udelay(10);
1416 1384
1417 tmp = REG_RD(bp, phy->mdio_ctrl + 1385 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1418 EMAC_REG_EMAC_MDIO_COMM);
1419 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1386 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1420 udelay(5); 1387 udelay(5);
1421 break; 1388 break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1423 } 1390 }
1424 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1425 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1392 DP(NETIF_MSG_LINK, "write phy register failed\n");
1393 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1426 rc = -EFAULT; 1394 rc = -EFAULT;
1427 } else { 1395 } else {
1428 /* data */ 1396 /* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1435 udelay(10); 1403 udelay(10);
1436 1404
1437 tmp = REG_RD(bp, phy->mdio_ctrl + 1405 tmp = REG_RD(bp, phy->mdio_ctrl +
1438 EMAC_REG_EMAC_MDIO_COMM); 1406 EMAC_REG_EMAC_MDIO_COMM);
1439 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1407 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1440 udelay(5); 1408 udelay(5);
1441 break; 1409 break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1443 } 1411 }
1444 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1412 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1445 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1413 DP(NETIF_MSG_LINK, "write phy register failed\n");
1414 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1446 rc = -EFAULT; 1415 rc = -EFAULT;
1447 } 1416 }
1448 } 1417 }
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1453 return rc; 1422 return rc;
1454} 1423}
1455 1424
1456u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, 1425static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1457 u8 devad, u16 reg, u16 *ret_val) 1426 u8 devad, u16 reg, u16 *ret_val)
1458{ 1427{
1459 u32 val, saved_mode; 1428 u32 val, saved_mode;
1460 u16 i; 1429 u16 i;
1461 u8 rc = 0; 1430 u8 rc = 0;
1462 1431 /*
1463 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1432 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1464 * (a value of 49==0x31) and make sure that the AUTO poll is off 1433 * (a value of 49==0x31) and make sure that the AUTO poll is off
1465 */ 1434 */
1466 1435
1467 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1436 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1468 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | 1437 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
1469 EMAC_MDIO_MODE_CLOCK_CNT)); 1438 EMAC_MDIO_MODE_CLOCK_CNT));
1470 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 1439 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1471 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 1440 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1472 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 1441 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1490 } 1459 }
1491 if (val & EMAC_MDIO_COMM_START_BUSY) { 1460 if (val & EMAC_MDIO_COMM_START_BUSY) {
1492 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1461 DP(NETIF_MSG_LINK, "read phy register failed\n");
1493 1462 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1494 *ret_val = 0; 1463 *ret_val = 0;
1495 rc = -EFAULT; 1464 rc = -EFAULT;
1496 1465
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1505 udelay(10); 1474 udelay(10);
1506 1475
1507 val = REG_RD(bp, phy->mdio_ctrl + 1476 val = REG_RD(bp, phy->mdio_ctrl +
1508 EMAC_REG_EMAC_MDIO_COMM); 1477 EMAC_REG_EMAC_MDIO_COMM);
1509 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1478 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1510 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 1479 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
1511 break; 1480 break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1513 } 1482 }
1514 if (val & EMAC_MDIO_COMM_START_BUSY) { 1483 if (val & EMAC_MDIO_COMM_START_BUSY) {
1515 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1484 DP(NETIF_MSG_LINK, "read phy register failed\n");
1516 1485 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1517 *ret_val = 0; 1486 *ret_val = 0;
1518 rc = -EFAULT; 1487 rc = -EFAULT;
1519 } 1488 }
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
1529 u8 devad, u16 reg, u16 *ret_val) 1498 u8 devad, u16 reg, u16 *ret_val)
1530{ 1499{
1531 u8 phy_index; 1500 u8 phy_index;
1532 /** 1501 /*
1533 * Probe for the phy according to the given phy_addr, and execute 1502 * Probe for the phy according to the given phy_addr, and execute
1534 * the read request on it 1503 * the read request on it
1535 */ 1504 */
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
1547 u8 devad, u16 reg, u16 val) 1516 u8 devad, u16 reg, u16 val)
1548{ 1517{
1549 u8 phy_index; 1518 u8 phy_index;
1550 /** 1519 /*
1551 * Probe for the phy according to the given phy_addr, and execute 1520 * Probe for the phy according to the given phy_addr, and execute
1552 * the write request on it 1521 * the write request on it
1553 */ 1522 */
@@ -1573,19 +1542,18 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1573 1542
1574 offset = phy->addr + ser_lane; 1543 offset = phy->addr + ser_lane;
1575 if (CHIP_IS_E2(bp)) 1544 if (CHIP_IS_E2(bp))
1576 aer_val = 0x2800 + offset - 1; 1545 aer_val = 0x3800 + offset - 1;
1577 else 1546 else
1578 aer_val = 0x3800 + offset; 1547 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1548 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
1580 MDIO_REG_BANK_AER_BLOCK, 1549 MDIO_AER_BLOCK_AER_REG, aer_val);
1581 MDIO_AER_BLOCK_AER_REG, aer_val);
1582} 1550}
1583static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, 1551static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
1584 struct bnx2x_phy *phy) 1552 struct bnx2x_phy *phy)
1585{ 1553{
1586 CL45_WR_OVER_CL22(bp, phy, 1554 CL22_WR_OVER_CL45(bp, phy,
1587 MDIO_REG_BANK_AER_BLOCK, 1555 MDIO_REG_BANK_AER_BLOCK,
1588 MDIO_AER_BLOCK_AER_REG, 0x3800); 1556 MDIO_AER_BLOCK_AER_REG, 0x3800);
1589} 1557}
1590 1558
1591/******************************************************************/ 1559/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
1621 1589
1622 bnx2x_set_serdes_access(bp, port); 1590 bnx2x_set_serdes_access(bp, port);
1623 1591
1624 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + 1592 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
1625 port*0x10, 1593 DEFAULT_PHY_DEV_ADDR);
1626 DEFAULT_PHY_DEV_ADDR);
1627} 1594}
1628 1595
1629static void bnx2x_xgxs_deassert(struct link_params *params) 1596static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
1641 udelay(500); 1608 udelay(500);
1642 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 1609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
1643 1610
1644 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + 1611 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
1645 port*0x18, 0);
1646 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 1612 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
1647 params->phy[INT_PHY].def_md_devad); 1613 params->phy[INT_PHY].def_md_devad);
1648} 1614}
1649 1615
1650 1616
1651void bnx2x_link_status_update(struct link_params *params, 1617void bnx2x_link_status_update(struct link_params *params,
1652 struct link_vars *vars) 1618 struct link_vars *vars)
1653{ 1619{
1654 struct bnx2x *bp = params->bp; 1620 struct bnx2x *bp = params->bp;
1655 u8 link_10g; 1621 u8 link_10g;
1656 u8 port = params->port; 1622 u8 port = params->port;
1657 1623
1658 vars->link_status = REG_RD(bp, params->shmem_base + 1624 vars->link_status = REG_RD(bp, params->shmem_base +
1659 offsetof(struct shmem_region, 1625 offsetof(struct shmem_region,
1660 port_mb[port].link_status)); 1626 port_mb[port].link_status));
1661 1627
1662 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); 1628 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
1663 1629
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
1667 vars->phy_link_up = 1; 1633 vars->phy_link_up = 1;
1668 vars->duplex = DUPLEX_FULL; 1634 vars->duplex = DUPLEX_FULL;
1669 switch (vars->link_status & 1635 switch (vars->link_status &
1670 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 1636 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
1671 case LINK_10THD: 1637 case LINK_10THD:
1672 vars->duplex = DUPLEX_HALF; 1638 vars->duplex = DUPLEX_HALF;
1673 /* fall thru */ 1639 /* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
1779{ 1745{
1780 struct bnx2x *bp = params->bp; 1746 struct bnx2x *bp = params->bp;
1781 u16 new_master_ln, ser_lane; 1747 u16 new_master_ln, ser_lane;
1782 ser_lane = ((params->lane_config & 1748 ser_lane = ((params->lane_config &
1783 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1749 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1784 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1750 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1785 1751
1786 /* set the master_ln for AN */ 1752 /* set the master_ln for AN */
1787 CL45_RD_OVER_CL22(bp, phy, 1753 CL22_RD_OVER_CL45(bp, phy,
1788 MDIO_REG_BANK_XGXS_BLOCK2, 1754 MDIO_REG_BANK_XGXS_BLOCK2,
1789 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1755 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1790 &new_master_ln); 1756 &new_master_ln);
1791 1757
1792 CL45_WR_OVER_CL22(bp, phy, 1758 CL22_WR_OVER_CL45(bp, phy,
1793 MDIO_REG_BANK_XGXS_BLOCK2 , 1759 MDIO_REG_BANK_XGXS_BLOCK2 ,
1794 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1760 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1795 (new_master_ln | ser_lane)); 1761 (new_master_ln | ser_lane));
1796} 1762}
1797 1763
1798static u8 bnx2x_reset_unicore(struct link_params *params, 1764static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1802 struct bnx2x *bp = params->bp; 1768 struct bnx2x *bp = params->bp;
1803 u16 mii_control; 1769 u16 mii_control;
1804 u16 i; 1770 u16 i;
1805 1771 CL22_RD_OVER_CL45(bp, phy,
1806 CL45_RD_OVER_CL22(bp, phy, 1772 MDIO_REG_BANK_COMBO_IEEE0,
1807 MDIO_REG_BANK_COMBO_IEEE0, 1773 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1808 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1809 1774
1810 /* reset the unicore */ 1775 /* reset the unicore */
1811 CL45_WR_OVER_CL22(bp, phy, 1776 CL22_WR_OVER_CL45(bp, phy,
1812 MDIO_REG_BANK_COMBO_IEEE0, 1777 MDIO_REG_BANK_COMBO_IEEE0,
1813 MDIO_COMBO_IEEE0_MII_CONTROL, 1778 MDIO_COMBO_IEEE0_MII_CONTROL,
1814 (mii_control | 1779 (mii_control |
1815 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1780 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1816 if (set_serdes) 1781 if (set_serdes)
1817 bnx2x_set_serdes_access(bp, params->port); 1782 bnx2x_set_serdes_access(bp, params->port);
1818 1783
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1821 udelay(5); 1786 udelay(5);
1822 1787
1823 /* the reset erased the previous bank value */ 1788 /* the reset erased the previous bank value */
1824 CL45_RD_OVER_CL22(bp, phy, 1789 CL22_RD_OVER_CL45(bp, phy,
1825 MDIO_REG_BANK_COMBO_IEEE0, 1790 MDIO_REG_BANK_COMBO_IEEE0,
1826 MDIO_COMBO_IEEE0_MII_CONTROL, 1791 MDIO_COMBO_IEEE0_MII_CONTROL,
1827 &mii_control); 1792 &mii_control);
1828 1793
1829 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { 1794 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1830 udelay(5); 1795 udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1832 } 1797 }
1833 } 1798 }
1834 1799
1800 netdev_err(bp->dev, "Warning: PHY was not initialized,"
1801 " Port %d\n",
1802 params->port);
1835 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); 1803 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1836 return -EINVAL; 1804 return -EINVAL;
1837 1805
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
1841 struct bnx2x_phy *phy) 1809 struct bnx2x_phy *phy)
1842{ 1810{
1843 struct bnx2x *bp = params->bp; 1811 struct bnx2x *bp = params->bp;
1844 /* Each two bits represents a lane number: 1812 /*
1845 No swap is 0123 => 0x1b no need to enable the swap */ 1813 * Each two bits represents a lane number:
1814 * No swap is 0123 => 0x1b no need to enable the swap
1815 */
1846 u16 ser_lane, rx_lane_swap, tx_lane_swap; 1816 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1847 1817
1848 ser_lane = ((params->lane_config & 1818 ser_lane = ((params->lane_config &
1849 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1819 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1850 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1820 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1851 rx_lane_swap = ((params->lane_config & 1821 rx_lane_swap = ((params->lane_config &
1852 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> 1822 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1853 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); 1823 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1854 tx_lane_swap = ((params->lane_config & 1824 tx_lane_swap = ((params->lane_config &
1855 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> 1825 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1856 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1826 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1857 1827
1858 if (rx_lane_swap != 0x1b) { 1828 if (rx_lane_swap != 0x1b) {
1859 CL45_WR_OVER_CL22(bp, phy, 1829 CL22_WR_OVER_CL45(bp, phy,
1860 MDIO_REG_BANK_XGXS_BLOCK2, 1830 MDIO_REG_BANK_XGXS_BLOCK2,
1861 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1831 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1862 (rx_lane_swap | 1832 (rx_lane_swap |
1863 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1833 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1864 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 1834 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1865 } else { 1835 } else {
1866 CL45_WR_OVER_CL22(bp, phy, 1836 CL22_WR_OVER_CL45(bp, phy,
1867 MDIO_REG_BANK_XGXS_BLOCK2, 1837 MDIO_REG_BANK_XGXS_BLOCK2,
1868 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 1838 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1869 } 1839 }
1870 1840
1871 if (tx_lane_swap != 0x1b) { 1841 if (tx_lane_swap != 0x1b) {
1872 CL45_WR_OVER_CL22(bp, phy, 1842 CL22_WR_OVER_CL45(bp, phy,
1873 MDIO_REG_BANK_XGXS_BLOCK2, 1843 MDIO_REG_BANK_XGXS_BLOCK2,
1874 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1844 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1875 (tx_lane_swap | 1845 (tx_lane_swap |
1876 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 1846 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1877 } else { 1847 } else {
1878 CL45_WR_OVER_CL22(bp, phy, 1848 CL22_WR_OVER_CL45(bp, phy,
1879 MDIO_REG_BANK_XGXS_BLOCK2, 1849 MDIO_REG_BANK_XGXS_BLOCK2,
1880 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 1850 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1881 } 1851 }
1882} 1852}
1883 1853
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1886{ 1856{
1887 struct bnx2x *bp = params->bp; 1857 struct bnx2x *bp = params->bp;
1888 u16 control2; 1858 u16 control2;
1889 CL45_RD_OVER_CL22(bp, phy, 1859 CL22_RD_OVER_CL45(bp, phy,
1890 MDIO_REG_BANK_SERDES_DIGITAL, 1860 MDIO_REG_BANK_SERDES_DIGITAL,
1891 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1861 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1892 &control2); 1862 &control2);
1893 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1863 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1894 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1864 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1895 else 1865 else
1896 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1866 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1897 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1867 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1898 phy->speed_cap_mask, control2); 1868 phy->speed_cap_mask, control2);
1899 CL45_WR_OVER_CL22(bp, phy, 1869 CL22_WR_OVER_CL45(bp, phy,
1900 MDIO_REG_BANK_SERDES_DIGITAL, 1870 MDIO_REG_BANK_SERDES_DIGITAL,
1901 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1871 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1902 control2); 1872 control2);
1903 1873
1904 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 1874 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1905 (phy->speed_cap_mask & 1875 (phy->speed_cap_mask &
1906 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1907 DP(NETIF_MSG_LINK, "XGXS\n"); 1877 DP(NETIF_MSG_LINK, "XGXS\n");
1908 1878
1909 CL45_WR_OVER_CL22(bp, phy, 1879 CL22_WR_OVER_CL45(bp, phy,
1910 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1880 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1911 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1881 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1912 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 1882 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1913 1883
1914 CL45_RD_OVER_CL22(bp, phy, 1884 CL22_RD_OVER_CL45(bp, phy,
1915 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1885 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1916 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1886 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1917 &control2); 1887 &control2);
1918 1888
1919 1889
1920 control2 |= 1890 control2 |=
1921 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1891 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1922 1892
1923 CL45_WR_OVER_CL22(bp, phy, 1893 CL22_WR_OVER_CL45(bp, phy,
1924 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1894 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1925 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1895 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1926 control2); 1896 control2);
1927 1897
1928 /* Disable parallel detection of HiG */ 1898 /* Disable parallel detection of HiG */
1929 CL45_WR_OVER_CL22(bp, phy, 1899 CL22_WR_OVER_CL45(bp, phy,
1930 MDIO_REG_BANK_XGXS_BLOCK2, 1900 MDIO_REG_BANK_XGXS_BLOCK2,
1931 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1901 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1932 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1902 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1933 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); 1903 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1934 } 1904 }
1935} 1905}
1936 1906
1937static void bnx2x_set_autoneg(struct bnx2x_phy *phy, 1907static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1938 struct link_params *params, 1908 struct link_params *params,
1939 struct link_vars *vars, 1909 struct link_vars *vars,
1940 u8 enable_cl73) 1910 u8 enable_cl73)
1941{ 1911{
1942 struct bnx2x *bp = params->bp; 1912 struct bnx2x *bp = params->bp;
1943 u16 reg_val; 1913 u16 reg_val;
1944 1914
1945 /* CL37 Autoneg */ 1915 /* CL37 Autoneg */
1946 CL45_RD_OVER_CL22(bp, phy, 1916 CL22_RD_OVER_CL45(bp, phy,
1947 MDIO_REG_BANK_COMBO_IEEE0, 1917 MDIO_REG_BANK_COMBO_IEEE0,
1948 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1918 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1949 1919
1950 /* CL37 Autoneg Enabled */ 1920 /* CL37 Autoneg Enabled */
1951 if (vars->line_speed == SPEED_AUTO_NEG) 1921 if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1954 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1924 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1955 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1925 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1956 1926
1957 CL45_WR_OVER_CL22(bp, phy, 1927 CL22_WR_OVER_CL45(bp, phy,
1958 MDIO_REG_BANK_COMBO_IEEE0, 1928 MDIO_REG_BANK_COMBO_IEEE0,
1959 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1929 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1960 1930
1961 /* Enable/Disable Autodetection */ 1931 /* Enable/Disable Autodetection */
1962 1932
1963 CL45_RD_OVER_CL22(bp, phy, 1933 CL22_RD_OVER_CL45(bp, phy,
1964 MDIO_REG_BANK_SERDES_DIGITAL, 1934 MDIO_REG_BANK_SERDES_DIGITAL,
1965 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1935 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1966 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1936 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1967 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); 1937 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1968 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; 1938 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1971 else 1941 else
1972 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1942 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1973 1943
1974 CL45_WR_OVER_CL22(bp, phy, 1944 CL22_WR_OVER_CL45(bp, phy,
1975 MDIO_REG_BANK_SERDES_DIGITAL, 1945 MDIO_REG_BANK_SERDES_DIGITAL,
1976 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 1946 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1977 1947
1978 /* Enable TetonII and BAM autoneg */ 1948 /* Enable TetonII and BAM autoneg */
1979 CL45_RD_OVER_CL22(bp, phy, 1949 CL22_RD_OVER_CL45(bp, phy,
1980 MDIO_REG_BANK_BAM_NEXT_PAGE, 1950 MDIO_REG_BANK_BAM_NEXT_PAGE,
1981 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1951 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1982 &reg_val); 1952 &reg_val);
1983 if (vars->line_speed == SPEED_AUTO_NEG) { 1953 if (vars->line_speed == SPEED_AUTO_NEG) {
1984 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1954 /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1989 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1959 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1990 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1960 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1991 } 1961 }
1992 CL45_WR_OVER_CL22(bp, phy, 1962 CL22_WR_OVER_CL45(bp, phy,
1993 MDIO_REG_BANK_BAM_NEXT_PAGE, 1963 MDIO_REG_BANK_BAM_NEXT_PAGE,
1994 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1964 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1995 reg_val); 1965 reg_val);
1996 1966
1997 if (enable_cl73) { 1967 if (enable_cl73) {
1998 /* Enable Cl73 FSM status bits */ 1968 /* Enable Cl73 FSM status bits */
1999 CL45_WR_OVER_CL22(bp, phy, 1969 CL22_WR_OVER_CL45(bp, phy,
2000 MDIO_REG_BANK_CL73_USERB0, 1970 MDIO_REG_BANK_CL73_USERB0,
2001 MDIO_CL73_USERB0_CL73_UCTRL, 1971 MDIO_CL73_USERB0_CL73_UCTRL,
2002 0xe); 1972 0xe);
2003 1973
2004 /* Enable BAM Station Manager*/ 1974 /* Enable BAM Station Manager*/
2005 CL45_WR_OVER_CL22(bp, phy, 1975 CL22_WR_OVER_CL45(bp, phy,
2006 MDIO_REG_BANK_CL73_USERB0, 1976 MDIO_REG_BANK_CL73_USERB0,
2007 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1977 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2008 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1978 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2010 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1980 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
2011 1981
2012 /* Advertise CL73 link speeds */ 1982 /* Advertise CL73 link speeds */
2013 CL45_RD_OVER_CL22(bp, phy, 1983 CL22_RD_OVER_CL45(bp, phy,
2014 MDIO_REG_BANK_CL73_IEEEB1, 1984 MDIO_REG_BANK_CL73_IEEEB1,
2015 MDIO_CL73_IEEEB1_AN_ADV2, 1985 MDIO_CL73_IEEEB1_AN_ADV2,
2016 &reg_val); 1986 &reg_val);
2017 if (phy->speed_cap_mask & 1987 if (phy->speed_cap_mask &
2018 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1988 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2019 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1989 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2021 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1991 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2022 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1992 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2023 1993
2024 CL45_WR_OVER_CL22(bp, phy, 1994 CL22_WR_OVER_CL45(bp, phy,
2025 MDIO_REG_BANK_CL73_IEEEB1, 1995 MDIO_REG_BANK_CL73_IEEEB1,
2026 MDIO_CL73_IEEEB1_AN_ADV2, 1996 MDIO_CL73_IEEEB1_AN_ADV2,
2027 reg_val); 1997 reg_val);
2028 1998
2029 /* CL73 Autoneg Enabled */ 1999 /* CL73 Autoneg Enabled */
2030 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 2000 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2032 } else /* CL73 Autoneg Disabled */ 2002 } else /* CL73 Autoneg Disabled */
2033 reg_val = 0; 2003 reg_val = 0;
2034 2004
2035 CL45_WR_OVER_CL22(bp, phy, 2005 CL22_WR_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_CL73_IEEEB0, 2006 MDIO_REG_BANK_CL73_IEEEB0,
2037 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 2007 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2038} 2008}
2039 2009
2040/* program SerDes, forced speed */ 2010/* program SerDes, forced speed */
2041static void bnx2x_program_serdes(struct bnx2x_phy *phy, 2011static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2042 struct link_params *params, 2012 struct link_params *params,
2043 struct link_vars *vars) 2013 struct link_vars *vars)
2044{ 2014{
2045 struct bnx2x *bp = params->bp; 2015 struct bnx2x *bp = params->bp;
2046 u16 reg_val; 2016 u16 reg_val;
2047 2017
2048 /* program duplex, disable autoneg and sgmii*/ 2018 /* program duplex, disable autoneg and sgmii*/
2049 CL45_RD_OVER_CL22(bp, phy, 2019 CL22_RD_OVER_CL45(bp, phy,
2050 MDIO_REG_BANK_COMBO_IEEE0, 2020 MDIO_REG_BANK_COMBO_IEEE0,
2051 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 2021 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2052 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 2022 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2053 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2023 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2054 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 2024 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
2055 if (phy->req_duplex == DUPLEX_FULL) 2025 if (phy->req_duplex == DUPLEX_FULL)
2056 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2026 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2057 CL45_WR_OVER_CL22(bp, phy, 2027 CL22_WR_OVER_CL45(bp, phy,
2058 MDIO_REG_BANK_COMBO_IEEE0, 2028 MDIO_REG_BANK_COMBO_IEEE0,
2059 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 2029 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2060 2030
2061 /* program speed 2031 /*
2062 - needed only if the speed is greater than 1G (2.5G or 10G) */ 2032 * program speed
2063 CL45_RD_OVER_CL22(bp, phy, 2033 * - needed only if the speed is greater than 1G (2.5G or 10G)
2064 MDIO_REG_BANK_SERDES_DIGITAL, 2034 */
2065 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 2035 CL22_RD_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_SERDES_DIGITAL,
2037 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2066 /* clearing the speed value before setting the right speed */ 2038 /* clearing the speed value before setting the right speed */
2067 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 2039 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
2068 2040
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2083 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 2055 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
2084 } 2056 }
2085 2057
2086 CL45_WR_OVER_CL22(bp, phy, 2058 CL22_WR_OVER_CL45(bp, phy,
2087 MDIO_REG_BANK_SERDES_DIGITAL, 2059 MDIO_REG_BANK_SERDES_DIGITAL,
2088 MDIO_SERDES_DIGITAL_MISC1, reg_val); 2060 MDIO_SERDES_DIGITAL_MISC1, reg_val);
2089 2061
2090} 2062}
2091 2063
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
2102 val |= MDIO_OVER_1G_UP1_2_5G; 2074 val |= MDIO_OVER_1G_UP1_2_5G;
2103 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2075 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2104 val |= MDIO_OVER_1G_UP1_10G; 2076 val |= MDIO_OVER_1G_UP1_10G;
2105 CL45_WR_OVER_CL22(bp, phy, 2077 CL22_WR_OVER_CL45(bp, phy,
2106 MDIO_REG_BANK_OVER_1G, 2078 MDIO_REG_BANK_OVER_1G,
2107 MDIO_OVER_1G_UP1, val); 2079 MDIO_OVER_1G_UP1, val);
2108 2080
2109 CL45_WR_OVER_CL22(bp, phy, 2081 CL22_WR_OVER_CL45(bp, phy,
2110 MDIO_REG_BANK_OVER_1G, 2082 MDIO_REG_BANK_OVER_1G,
2111 MDIO_OVER_1G_UP3, 0x400); 2083 MDIO_OVER_1G_UP3, 0x400);
2112} 2084}
2113 2085
2114static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 2086static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2116{ 2088{
2117 struct bnx2x *bp = params->bp; 2089 struct bnx2x *bp = params->bp;
2118 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 2090 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2119 /* resolve pause mode and advertisement 2091 /*
2120 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 2092 * Resolve pause mode and advertisement.
2093 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
2094 */
2121 2095
2122 switch (phy->req_flow_ctrl) { 2096 switch (phy->req_flow_ctrl) {
2123 case BNX2X_FLOW_CTRL_AUTO: 2097 case BNX2X_FLOW_CTRL_AUTO:
2124 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 2098 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
2125 *ieee_fc |= 2099 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 2100 else
2127 } else {
2128 *ieee_fc |= 2101 *ieee_fc |=
2129 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 2102 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2130 }
2131 break; 2103 break;
2132 case BNX2X_FLOW_CTRL_TX: 2104 case BNX2X_FLOW_CTRL_TX:
2133 *ieee_fc |= 2105 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2134 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2135 break; 2106 break;
2136 2107
2137 case BNX2X_FLOW_CTRL_RX: 2108 case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2149 2120
2150static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, 2121static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
2151 struct link_params *params, 2122 struct link_params *params,
2152 u16 ieee_fc) 2123 u16 ieee_fc)
2153{ 2124{
2154 struct bnx2x *bp = params->bp; 2125 struct bnx2x *bp = params->bp;
2155 u16 val; 2126 u16 val;
2156 /* for AN, we are always publishing full duplex */ 2127 /* for AN, we are always publishing full duplex */
2157 2128
2158 CL45_WR_OVER_CL22(bp, phy, 2129 CL22_WR_OVER_CL45(bp, phy,
2159 MDIO_REG_BANK_COMBO_IEEE0, 2130 MDIO_REG_BANK_COMBO_IEEE0,
2160 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 2131 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
2161 CL45_RD_OVER_CL22(bp, phy, 2132 CL22_RD_OVER_CL45(bp, phy,
2162 MDIO_REG_BANK_CL73_IEEEB1, 2133 MDIO_REG_BANK_CL73_IEEEB1,
2163 MDIO_CL73_IEEEB1_AN_ADV1, &val); 2134 MDIO_CL73_IEEEB1_AN_ADV1, &val);
2164 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 2135 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
2165 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 2136 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
2166 CL45_WR_OVER_CL22(bp, phy, 2137 CL22_WR_OVER_CL45(bp, phy,
2167 MDIO_REG_BANK_CL73_IEEEB1, 2138 MDIO_REG_BANK_CL73_IEEEB1,
2168 MDIO_CL73_IEEEB1_AN_ADV1, val); 2139 MDIO_CL73_IEEEB1_AN_ADV1, val);
2169} 2140}
2170 2141
2171static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, 2142static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
2179 /* Enable and restart BAM/CL37 aneg */ 2150 /* Enable and restart BAM/CL37 aneg */
2180 2151
2181 if (enable_cl73) { 2152 if (enable_cl73) {
2182 CL45_RD_OVER_CL22(bp, phy, 2153 CL22_RD_OVER_CL45(bp, phy,
2183 MDIO_REG_BANK_CL73_IEEEB0, 2154 MDIO_REG_BANK_CL73_IEEEB0,
2184 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2155 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2185 &mii_control); 2156 &mii_control);
2186 2157
2187 CL45_WR_OVER_CL22(bp, phy, 2158 CL22_WR_OVER_CL45(bp, phy,
2188 MDIO_REG_BANK_CL73_IEEEB0, 2159 MDIO_REG_BANK_CL73_IEEEB0,
2189 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2160 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2190 (mii_control | 2161 (mii_control |
2191 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | 2162 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2192 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 2163 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2193 } else { 2164 } else {
2194 2165
2195 CL45_RD_OVER_CL22(bp, phy, 2166 CL22_RD_OVER_CL45(bp, phy,
2196 MDIO_REG_BANK_COMBO_IEEE0, 2167 MDIO_REG_BANK_COMBO_IEEE0,
2197 MDIO_COMBO_IEEE0_MII_CONTROL, 2168 MDIO_COMBO_IEEE0_MII_CONTROL,
2198 &mii_control); 2169 &mii_control);
2199 DP(NETIF_MSG_LINK, 2170 DP(NETIF_MSG_LINK,
2200 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 2171 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
2201 mii_control); 2172 mii_control);
2202 CL45_WR_OVER_CL22(bp, phy, 2173 CL22_WR_OVER_CL45(bp, phy,
2203 MDIO_REG_BANK_COMBO_IEEE0, 2174 MDIO_REG_BANK_COMBO_IEEE0,
2204 MDIO_COMBO_IEEE0_MII_CONTROL, 2175 MDIO_COMBO_IEEE0_MII_CONTROL,
2205 (mii_control | 2176 (mii_control |
2206 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2177 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2207 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); 2178 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2208 } 2179 }
2209} 2180}
2210 2181
2211static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, 2182static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2212 struct link_params *params, 2183 struct link_params *params,
2213 struct link_vars *vars) 2184 struct link_vars *vars)
2214{ 2185{
2215 struct bnx2x *bp = params->bp; 2186 struct bnx2x *bp = params->bp;
2216 u16 control1; 2187 u16 control1;
2217 2188
2218 /* in SGMII mode, the unicore is always slave */ 2189 /* in SGMII mode, the unicore is always slave */
2219 2190
2220 CL45_RD_OVER_CL22(bp, phy, 2191 CL22_RD_OVER_CL45(bp, phy,
2221 MDIO_REG_BANK_SERDES_DIGITAL, 2192 MDIO_REG_BANK_SERDES_DIGITAL,
2222 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2193 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2223 &control1); 2194 &control1);
2224 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 2195 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2225 /* set sgmii mode (and not fiber) */ 2196 /* set sgmii mode (and not fiber) */
2226 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 2197 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2227 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 2198 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2228 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 2199 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2229 CL45_WR_OVER_CL22(bp, phy, 2200 CL22_WR_OVER_CL45(bp, phy,
2230 MDIO_REG_BANK_SERDES_DIGITAL, 2201 MDIO_REG_BANK_SERDES_DIGITAL,
2231 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2202 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2232 control1); 2203 control1);
2233 2204
2234 /* if forced speed */ 2205 /* if forced speed */
2235 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 2206 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
2236 /* set speed, disable autoneg */ 2207 /* set speed, disable autoneg */
2237 u16 mii_control; 2208 u16 mii_control;
2238 2209
2239 CL45_RD_OVER_CL22(bp, phy, 2210 CL22_RD_OVER_CL45(bp, phy,
2240 MDIO_REG_BANK_COMBO_IEEE0, 2211 MDIO_REG_BANK_COMBO_IEEE0,
2241 MDIO_COMBO_IEEE0_MII_CONTROL, 2212 MDIO_COMBO_IEEE0_MII_CONTROL,
2242 &mii_control); 2213 &mii_control);
2243 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2214 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2244 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 2215 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
2245 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 2216 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2267 if (phy->req_duplex == DUPLEX_FULL) 2238 if (phy->req_duplex == DUPLEX_FULL)
2268 mii_control |= 2239 mii_control |=
2269 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2240 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2270 CL45_WR_OVER_CL22(bp, phy, 2241 CL22_WR_OVER_CL45(bp, phy,
2271 MDIO_REG_BANK_COMBO_IEEE0, 2242 MDIO_REG_BANK_COMBO_IEEE0,
2272 MDIO_COMBO_IEEE0_MII_CONTROL, 2243 MDIO_COMBO_IEEE0_MII_CONTROL,
2273 mii_control); 2244 mii_control);
2274 2245
2275 } else { /* AN mode */ 2246 } else { /* AN mode */
2276 /* enable and restart AN */ 2247 /* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2285 2256
2286static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 2257static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
2287{ /* LD LP */ 2258{ /* LD LP */
2288 switch (pause_result) { /* ASYM P ASYM P */ 2259 switch (pause_result) { /* ASYM P ASYM P */
2289 case 0xb: /* 1 0 1 1 */ 2260 case 0xb: /* 1 0 1 1 */
2290 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 2261 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
2291 break; 2262 break;
2292 2263
2293 case 0xe: /* 1 1 1 0 */ 2264 case 0xe: /* 1 1 1 0 */
2294 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 2265 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
2295 break; 2266 break;
2296 2267
2297 case 0x5: /* 0 1 0 1 */ 2268 case 0x5: /* 0 1 0 1 */
2298 case 0x7: /* 0 1 1 1 */ 2269 case 0x7: /* 0 1 1 1 */
2299 case 0xd: /* 1 1 0 1 */ 2270 case 0xd: /* 1 1 0 1 */
2300 case 0xf: /* 1 1 1 1 */ 2271 case 0xf: /* 1 1 1 1 */
2301 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 2272 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
2302 break; 2273 break;
2303 2274
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
2317 u16 pd_10g, status2_1000x; 2288 u16 pd_10g, status2_1000x;
2318 if (phy->req_line_speed != SPEED_AUTO_NEG) 2289 if (phy->req_line_speed != SPEED_AUTO_NEG)
2319 return 0; 2290 return 0;
2320 CL45_RD_OVER_CL22(bp, phy, 2291 CL22_RD_OVER_CL45(bp, phy,
2321 MDIO_REG_BANK_SERDES_DIGITAL, 2292 MDIO_REG_BANK_SERDES_DIGITAL,
2322 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2293 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2323 &status2_1000x); 2294 &status2_1000x);
2324 CL45_RD_OVER_CL22(bp, phy, 2295 CL22_RD_OVER_CL45(bp, phy,
2325 MDIO_REG_BANK_SERDES_DIGITAL, 2296 MDIO_REG_BANK_SERDES_DIGITAL,
2326 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2297 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2327 &status2_1000x); 2298 &status2_1000x);
2328 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { 2299 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
2329 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", 2300 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
2330 params->port); 2301 params->port);
2331 return 1; 2302 return 1;
2332 } 2303 }
2333 2304
2334 CL45_RD_OVER_CL22(bp, phy, 2305 CL22_RD_OVER_CL45(bp, phy,
2335 MDIO_REG_BANK_10G_PARALLEL_DETECT, 2306 MDIO_REG_BANK_10G_PARALLEL_DETECT,
2336 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 2307 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
2337 &pd_10g); 2308 &pd_10g);
2338 2309
2339 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { 2310 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
2340 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", 2311 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2373 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 2344 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
2374 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 2345 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
2375 2346
2376 CL45_RD_OVER_CL22(bp, phy, 2347 CL22_RD_OVER_CL45(bp, phy,
2377 MDIO_REG_BANK_CL73_IEEEB1, 2348 MDIO_REG_BANK_CL73_IEEEB1,
2378 MDIO_CL73_IEEEB1_AN_ADV1, 2349 MDIO_CL73_IEEEB1_AN_ADV1,
2379 &ld_pause); 2350 &ld_pause);
2380 CL45_RD_OVER_CL22(bp, phy, 2351 CL22_RD_OVER_CL45(bp, phy,
2381 MDIO_REG_BANK_CL73_IEEEB1, 2352 MDIO_REG_BANK_CL73_IEEEB1,
2382 MDIO_CL73_IEEEB1_AN_LP_ADV1, 2353 MDIO_CL73_IEEEB1_AN_LP_ADV1,
2383 &lp_pause); 2354 &lp_pause);
2384 pause_result = (ld_pause & 2355 pause_result = (ld_pause &
2385 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) 2356 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
2386 >> 8; 2357 >> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2390 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 2361 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
2391 pause_result); 2362 pause_result);
2392 } else { 2363 } else {
2393 CL45_RD_OVER_CL22(bp, phy, 2364 CL22_RD_OVER_CL45(bp, phy,
2394 MDIO_REG_BANK_COMBO_IEEE0, 2365 MDIO_REG_BANK_COMBO_IEEE0,
2395 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 2366 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
2396 &ld_pause); 2367 &ld_pause);
2397 CL45_RD_OVER_CL22(bp, phy, 2368 CL22_RD_OVER_CL45(bp, phy,
2398 MDIO_REG_BANK_COMBO_IEEE0, 2369 MDIO_REG_BANK_COMBO_IEEE0,
2399 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 2370 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
2400 &lp_pause); 2371 &lp_pause);
2401 pause_result = (ld_pause & 2372 pause_result = (ld_pause &
2402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 2373 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
2403 pause_result |= (lp_pause & 2374 pause_result |= (lp_pause &
2404 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 2375 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
2405 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", 2376 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
2406 pause_result); 2377 pause_result);
2407 } 2378 }
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2417 u16 rx_status, ustat_val, cl37_fsm_recieved; 2388 u16 rx_status, ustat_val, cl37_fsm_recieved;
2418 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 2389 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
2419 /* Step 1: Make sure signal is detected */ 2390 /* Step 1: Make sure signal is detected */
2420 CL45_RD_OVER_CL22(bp, phy, 2391 CL22_RD_OVER_CL45(bp, phy,
2421 MDIO_REG_BANK_RX0, 2392 MDIO_REG_BANK_RX0,
2422 MDIO_RX0_RX_STATUS, 2393 MDIO_RX0_RX_STATUS,
2423 &rx_status); 2394 &rx_status);
2424 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != 2395 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
2425 (MDIO_RX0_RX_STATUS_SIGDET)) { 2396 (MDIO_RX0_RX_STATUS_SIGDET)) {
2426 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 2397 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
2427 "rx_status(0x80b0) = 0x%x\n", rx_status); 2398 "rx_status(0x80b0) = 0x%x\n", rx_status);
2428 CL45_WR_OVER_CL22(bp, phy, 2399 CL22_WR_OVER_CL45(bp, phy,
2429 MDIO_REG_BANK_CL73_IEEEB0, 2400 MDIO_REG_BANK_CL73_IEEEB0,
2430 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2401 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 2402 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
2432 return; 2403 return;
2433 } 2404 }
2434 /* Step 2: Check CL73 state machine */ 2405 /* Step 2: Check CL73 state machine */
2435 CL45_RD_OVER_CL22(bp, phy, 2406 CL22_RD_OVER_CL45(bp, phy,
2436 MDIO_REG_BANK_CL73_USERB0, 2407 MDIO_REG_BANK_CL73_USERB0,
2437 MDIO_CL73_USERB0_CL73_USTAT1, 2408 MDIO_CL73_USERB0_CL73_USTAT1,
2438 &ustat_val); 2409 &ustat_val);
2439 if ((ustat_val & 2410 if ((ustat_val &
2440 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 2411 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
2441 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != 2412 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2445 "ustat_val(0x8371) = 0x%x\n", ustat_val); 2416 "ustat_val(0x8371) = 0x%x\n", ustat_val);
2446 return; 2417 return;
2447 } 2418 }
2448 /* Step 3: Check CL37 Message Pages received to indicate LP 2419 /*
2449 supports only CL37 */ 2420 * Step 3: Check CL37 Message Pages received to indicate LP
2450 CL45_RD_OVER_CL22(bp, phy, 2421 * supports only CL37
2451 MDIO_REG_BANK_REMOTE_PHY, 2422 */
2452 MDIO_REMOTE_PHY_MISC_RX_STATUS, 2423 CL22_RD_OVER_CL45(bp, phy,
2453 &cl37_fsm_recieved); 2424 MDIO_REG_BANK_REMOTE_PHY,
2425 MDIO_REMOTE_PHY_MISC_RX_STATUS,
2426 &cl37_fsm_recieved);
2454 if ((cl37_fsm_recieved & 2427 if ((cl37_fsm_recieved &
2455 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 2428 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
2456 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != 2429 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2461 cl37_fsm_recieved); 2434 cl37_fsm_recieved);
2462 return; 2435 return;
2463 } 2436 }
2464 /* The combined cl37/cl73 fsm state information indicating that we are 2437 /*
2465 connected to a device which does not support cl73, but does support 2438 * The combined cl37/cl73 fsm state information indicating that
2466 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 2439 * we are connected to a device which does not support cl73, but
2440 * does support cl37 BAM. In this case we disable cl73 and
2441 * restart cl37 auto-neg
2442 */
2443
2467 /* Disable CL73 */ 2444 /* Disable CL73 */
2468 CL45_WR_OVER_CL22(bp, phy, 2445 CL22_WR_OVER_CL45(bp, phy,
2469 MDIO_REG_BANK_CL73_IEEEB0, 2446 MDIO_REG_BANK_CL73_IEEEB0,
2470 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2447 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2471 0); 2448 0);
2472 /* Restart CL37 autoneg */ 2449 /* Restart CL37 autoneg */
2473 bnx2x_restart_autoneg(phy, params, 0); 2450 bnx2x_restart_autoneg(phy, params, 0);
2474 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 2451 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
2493 struct link_vars *vars) 2470 struct link_vars *vars)
2494{ 2471{
2495 struct bnx2x *bp = params->bp; 2472 struct bnx2x *bp = params->bp;
2496 u16 new_line_speed , gp_status; 2473 u16 new_line_speed, gp_status;
2497 u8 rc = 0; 2474 u8 rc = 0;
2498 2475
2499 /* Read gp_status */ 2476 /* Read gp_status */
2500 CL45_RD_OVER_CL22(bp, phy, 2477 CL22_RD_OVER_CL45(bp, phy,
2501 MDIO_REG_BANK_GP_STATUS, 2478 MDIO_REG_BANK_GP_STATUS,
2502 MDIO_GP_STATUS_TOP_AN_STATUS1, 2479 MDIO_GP_STATUS_TOP_AN_STATUS1,
2503 &gp_status); 2480 &gp_status);
2504 2481
2505 if (phy->req_line_speed == SPEED_AUTO_NEG) 2482 if (phy->req_line_speed == SPEED_AUTO_NEG)
2506 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; 2483 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2637 u16 bank; 2614 u16 bank;
2638 2615
2639 /* read precomp */ 2616 /* read precomp */
2640 CL45_RD_OVER_CL22(bp, phy, 2617 CL22_RD_OVER_CL45(bp, phy,
2641 MDIO_REG_BANK_OVER_1G, 2618 MDIO_REG_BANK_OVER_1G,
2642 MDIO_OVER_1G_LP_UP2, &lp_up2); 2619 MDIO_OVER_1G_LP_UP2, &lp_up2);
2643 2620
2644 /* bits [10:7] at lp_up2, positioned at [15:12] */ 2621 /* bits [10:7] at lp_up2, positioned at [15:12] */
2645 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 2622 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2651 2628
2652 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 2629 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2653 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 2630 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2654 CL45_RD_OVER_CL22(bp, phy, 2631 CL22_RD_OVER_CL45(bp, phy,
2655 bank, 2632 bank,
2656 MDIO_TX0_TX_DRIVER, &tx_driver); 2633 MDIO_TX0_TX_DRIVER, &tx_driver);
2657 2634
2658 /* replace tx_driver bits [15:12] */ 2635 /* replace tx_driver bits [15:12] */
2659 if (lp_up2 != 2636 if (lp_up2 !=
2660 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 2637 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2661 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 2638 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2662 tx_driver |= lp_up2; 2639 tx_driver |= lp_up2;
2663 CL45_WR_OVER_CL22(bp, phy, 2640 CL22_WR_OVER_CL45(bp, phy,
2664 bank, 2641 bank,
2665 MDIO_TX0_TX_DRIVER, tx_driver); 2642 MDIO_TX0_TX_DRIVER, tx_driver);
2666 } 2643 }
2667 } 2644 }
2668} 2645}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
2676 2653
2677 DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); 2654 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2678 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + 2655 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2679 EMAC_REG_EMAC_MODE, 2656 EMAC_REG_EMAC_MODE,
2680 (EMAC_MODE_25G_MODE | 2657 (EMAC_MODE_25G_MODE |
2681 EMAC_MODE_PORT_MII_10M | 2658 EMAC_MODE_PORT_MII_10M |
2682 EMAC_MODE_HALF_DUPLEX)); 2659 EMAC_MODE_HALF_DUPLEX));
2683 switch (vars->line_speed) { 2660 switch (vars->line_speed) {
2684 case SPEED_10: 2661 case SPEED_10:
2685 mode |= EMAC_MODE_PORT_MII_10M; 2662 mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
2707 if (vars->duplex == DUPLEX_HALF) 2684 if (vars->duplex == DUPLEX_HALF)
2708 mode |= EMAC_MODE_HALF_DUPLEX; 2685 mode |= EMAC_MODE_HALF_DUPLEX;
2709 bnx2x_bits_en(bp, 2686 bnx2x_bits_en(bp,
2710 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2687 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2711 mode); 2688 mode);
2712 2689
2713 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 2690 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2714 return 0; 2691 return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2723 2700
2724 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 2701 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
2725 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 2702 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
2726 CL45_WR_OVER_CL22(bp, phy, 2703 CL22_WR_OVER_CL45(bp, phy,
2727 bank, 2704 bank,
2728 MDIO_RX0_RX_EQ_BOOST, 2705 MDIO_RX0_RX_EQ_BOOST,
2729 phy->rx_preemphasis[i]); 2706 phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2731 2708
2732 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; 2709 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
2733 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { 2710 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
2734 CL45_WR_OVER_CL22(bp, phy, 2711 CL22_WR_OVER_CL45(bp, phy,
2735 bank, 2712 bank,
2736 MDIO_TX0_TX_DRIVER, 2713 MDIO_TX0_TX_DRIVER,
2737 phy->tx_preemphasis[i]); 2714 phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2754 /* forced speed requested? */ 2731 /* forced speed requested? */
2755 if (vars->line_speed != SPEED_AUTO_NEG || 2732 if (vars->line_speed != SPEED_AUTO_NEG ||
2756 (SINGLE_MEDIA_DIRECT(params) && 2733 (SINGLE_MEDIA_DIRECT(params) &&
2757 params->loopback_mode == LOOPBACK_EXT)) { 2734 params->loopback_mode == LOOPBACK_EXT)) {
2758 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 2735 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2759 2736
2760 /* disable autoneg */ 2737 /* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2771 2748
2772 /* program duplex & pause advertisement (for aneg) */ 2749 /* program duplex & pause advertisement (for aneg) */
2773 bnx2x_set_ieee_aneg_advertisment(phy, params, 2750 bnx2x_set_ieee_aneg_advertisment(phy, params,
2774 vars->ieee_fc); 2751 vars->ieee_fc);
2775 2752
2776 /* enable autoneg */ 2753 /* enable autoneg */
2777 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 2754 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2842} 2819}
2843 2820
2844static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, 2821static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2845 struct bnx2x_phy *phy) 2822 struct bnx2x_phy *phy,
2823 struct link_params *params)
2846{ 2824{
2847 u16 cnt, ctrl; 2825 u16 cnt, ctrl;
2848 /* Wait for soft reset to get cleared upto 1 sec */ 2826 /* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2853 break; 2831 break;
2854 msleep(1); 2832 msleep(1);
2855 } 2833 }
2834
2835 if (cnt == 1000)
2836 netdev_err(bp->dev, "Warning: PHY was not initialized,"
2837 " Port %d\n",
2838 params->port);
2856 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); 2839 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2857 return cnt; 2840 return cnt;
2858} 2841}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
2863 u32 mask; 2846 u32 mask;
2864 struct bnx2x *bp = params->bp; 2847 struct bnx2x *bp = params->bp;
2865 2848
2866 /* setting the status to report on link up 2849 /* Setting the status to report on link up for either XGXS or SerDes */
2867 for either XGXS or SerDes */
2868
2869 if (params->switch_cfg == SWITCH_CFG_10G) { 2850 if (params->switch_cfg == SWITCH_CFG_10G) {
2870 mask = (NIG_MASK_XGXS0_LINK10G | 2851 mask = (NIG_MASK_XGXS0_LINK10G |
2871 NIG_MASK_XGXS0_LINK_STATUS); 2852 NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2908{ 2889{
2909 u32 latch_status = 0; 2890 u32 latch_status = 0;
2910 2891
2911 /** 2892 /*
2912 * Disable the MI INT ( external phy int ) by writing 1 to the 2893 * Disable the MI INT ( external phy int ) by writing 1 to the
2913 * status register. Link down indication is high-active-signal, 2894 * status register. Link down indication is high-active-signal,
2914 * so in this case we need to write the status to clear the XOR 2895 * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2933 2914
2934 /* For all latched-signal=up : Re-Arm Latch signals */ 2915 /* For all latched-signal=up : Re-Arm Latch signals */
2935 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, 2916 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2936 (latch_status & 0xfffe) | (latch_status & 1)); 2917 (latch_status & 0xfffe) | (latch_status & 1));
2937 } 2918 }
2938 /* For all latched-signal=up,Write original_signal to status */ 2919 /* For all latched-signal=up,Write original_signal to status */
2939} 2920}
2940 2921
2941static void bnx2x_link_int_ack(struct link_params *params, 2922static void bnx2x_link_int_ack(struct link_params *params,
2942 struct link_vars *vars, u8 is_10g) 2923 struct link_vars *vars, u8 is_10g)
2943{ 2924{
2944 struct bnx2x *bp = params->bp; 2925 struct bnx2x *bp = params->bp;
2945 u8 port = params->port; 2926 u8 port = params->port;
2946 2927
2947 /* first reset all status 2928 /*
2948 * we assume only one line will be change at a time */ 2929 * First reset all status we assume only one line will be
2930 * change at a time
2931 */
2949 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2950 (NIG_STATUS_XGXS0_LINK10G | 2933 (NIG_STATUS_XGXS0_LINK10G |
2951 NIG_STATUS_XGXS0_LINK_STATUS | 2934 NIG_STATUS_XGXS0_LINK_STATUS |
2952 NIG_STATUS_SERDES0_LINK_STATUS)); 2935 NIG_STATUS_SERDES0_LINK_STATUS));
2953 if (vars->phy_link_up) { 2936 if (vars->phy_link_up) {
2954 if (is_10g) { 2937 if (is_10g) {
2955 /* Disable the 10G link interrupt 2938 /*
2956 * by writing 1 to the status register 2939 * Disable the 10G link interrupt by writing 1 to the
2940 * status register
2957 */ 2941 */
2958 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); 2942 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2959 bnx2x_bits_en(bp, 2943 bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2961 NIG_STATUS_XGXS0_LINK10G); 2945 NIG_STATUS_XGXS0_LINK10G);
2962 2946
2963 } else if (params->switch_cfg == SWITCH_CFG_10G) { 2947 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2964 /* Disable the link interrupt 2948 /*
2965 * by writing 1 to the relevant lane 2949 * Disable the link interrupt by writing 1 to the
2966 * in the status register 2950 * relevant lane in the status register
2967 */ 2951 */
2968 u32 ser_lane = ((params->lane_config & 2952 u32 ser_lane = ((params->lane_config &
2969 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 2953 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2978 2962
2979 } else { /* SerDes */ 2963 } else { /* SerDes */
2980 DP(NETIF_MSG_LINK, "SerDes phy link up\n"); 2964 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2981 /* Disable the link interrupt 2965 /*
2982 * by writing 1 to the status register 2966 * Disable the link interrupt by writing 1 to the status
2967 * register
2983 */ 2968 */
2984 bnx2x_bits_en(bp, 2969 bnx2x_bits_en(bp,
2985 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3059 } 3044 }
3060 if ((params->num_phys == MAX_PHYS) && 3045 if ((params->num_phys == MAX_PHYS) &&
3061 (params->phy[EXT_PHY2].ver_addr != 0)) { 3046 (params->phy[EXT_PHY2].ver_addr != 0)) {
3062 spirom_ver = REG_RD(bp, 3047 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
3063 params->phy[EXT_PHY2].ver_addr);
3064 if (params->phy[EXT_PHY2].format_fw_ver) { 3048 if (params->phy[EXT_PHY2].format_fw_ver) {
3065 *ver_p = '/'; 3049 *ver_p = '/';
3066 ver_p++; 3050 ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
3089 3073
3090 /* change the uni_phy_addr in the nig */ 3074 /* change the uni_phy_addr in the nig */
3091 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 3075 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
3092 port*0x18)); 3076 port*0x18));
3093 3077
3094 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 3078 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3095 3079
3096 bnx2x_cl45_write(bp, phy, 3080 bnx2x_cl45_write(bp, phy,
3097 5, 3081 5,
3098 (MDIO_REG_BANK_AER_BLOCK + 3082 (MDIO_REG_BANK_AER_BLOCK +
3099 (MDIO_AER_BLOCK_AER_REG & 0xf)), 3083 (MDIO_AER_BLOCK_AER_REG & 0xf)),
3100 0x2800); 3084 0x2800);
3101 3085
3102 bnx2x_cl45_write(bp, phy, 3086 bnx2x_cl45_write(bp, phy,
3103 5, 3087 5,
3104 (MDIO_REG_BANK_CL73_IEEEB0 + 3088 (MDIO_REG_BANK_CL73_IEEEB0 +
3105 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3089 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3106 0x6041); 3090 0x6041);
3107 msleep(200); 3091 msleep(200);
3108 /* set aer mmd back */ 3092 /* set aer mmd back */
3109 bnx2x_set_aer_mmd_xgxs(params, phy); 3093 bnx2x_set_aer_mmd_xgxs(params, phy);
3110 3094
3111 /* and md_devad */ 3095 /* and md_devad */
3112 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 3096 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3113 md_devad);
3114
3115 } else { 3097 } else {
3116 u16 mii_ctrl; 3098 u16 mii_ctrl;
3117 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 3099 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,56 +3134,71 @@ u8 bnx2x_set_led(struct link_params *params,
3152 case LED_MODE_OFF: 3134 case LED_MODE_OFF:
3153 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3135 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3136 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3155 SHARED_HW_CFG_LED_MAC1); 3137 SHARED_HW_CFG_LED_MAC1);
3156 3138
3157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3139 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3158 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); 3140 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3159 break; 3141 break;
3160 3142
3161 case LED_MODE_OPER: 3143 case LED_MODE_OPER:
3162 /** 3144 /*
3163 * For all other phys, OPER mode is same as ON, so in case 3145 * For all other phys, OPER mode is same as ON, so in case
3164 * link is down, do nothing 3146 * link is down, do nothing
3165 **/ 3147 */
3166 if (!vars->link_up) 3148 if (!vars->link_up)
3167 break; 3149 break;
3168 case LED_MODE_ON: 3150 case LED_MODE_ON:
3169 if (SINGLE_MEDIA_DIRECT(params)) { 3151 if (params->phy[EXT_PHY1].type ==
3170 /** 3152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 * This is a work-around for HW issue found when link 3153 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 * is up in CL73 3154 /*
3173 */ 3155 * This is a work-around for E2+8727 Configurations
3156 */
3157 if (mode == LED_MODE_ON ||
3158 speed == SPEED_10000){
3159 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3160 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3161
3162 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3163 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3164 (tmp | EMAC_LED_OVERRIDE));
3165 return rc;
3166 }
3167 } else if (SINGLE_MEDIA_DIRECT(params)) {
3168 /*
3169 * This is a work-around for HW issue found when link
3170 * is up in CL73
3171 */
3174 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 3172 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3175 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 3173 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3176 } else { 3174 } else {
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3175 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
3178 hw_led_mode);
3179 } 3176 }
3180 3177
3181 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 3178 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
3182 port*4, 0);
3183 /* Set blinking rate to ~15.9Hz */ 3179 /* Set blinking rate to ~15.9Hz */
3184 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 3180 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
3185 LED_BLINK_RATE_VAL); 3181 LED_BLINK_RATE_VAL);
3186 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3182 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3187 port*4, 1); 3183 port*4, 1);
3188 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3184 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3189 EMAC_WR(bp, EMAC_REG_EMAC_LED, 3185 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
3190 (tmp & (~EMAC_LED_OVERRIDE)));
3191 3186
3192 if (CHIP_IS_E1(bp) && 3187 if (CHIP_IS_E1(bp) &&
3193 ((speed == SPEED_2500) || 3188 ((speed == SPEED_2500) ||
3194 (speed == SPEED_1000) || 3189 (speed == SPEED_1000) ||
3195 (speed == SPEED_100) || 3190 (speed == SPEED_100) ||
3196 (speed == SPEED_10))) { 3191 (speed == SPEED_10))) {
3197 /* On Everest 1 Ax chip versions for speeds less than 3192 /*
3198 10G LED scheme is different */ 3193 * On Everest 1 Ax chip versions for speeds less than
3194 * 10G LED scheme is different
3195 */
3199 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 3196 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
3200 + port*4, 1); 3197 + port*4, 1);
3201 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 3198 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
3202 port*4, 0); 3199 port*4, 0);
3203 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + 3200 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
3204 port*4, 1); 3201 port*4, 1);
3205 } 3202 }
3206 break; 3203 break;
3207 3204
@@ -3215,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
3215 3212
3216} 3213}
3217 3214
3218/** 3215/*
3219 * This function comes to reflect the actual link state read DIRECTLY from the 3216 * This function comes to reflect the actual link state read DIRECTLY from the
3220 * HW 3217 * HW
3221 */ 3218 */
@@ -3227,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
3227 u8 ext_phy_link_up = 0, serdes_phy_type; 3224 u8 ext_phy_link_up = 0, serdes_phy_type;
3228 struct link_vars temp_vars; 3225 struct link_vars temp_vars;
3229 3226
3230 CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY], 3227 CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
3231 MDIO_REG_BANK_GP_STATUS, 3228 MDIO_REG_BANK_GP_STATUS,
3232 MDIO_GP_STATUS_TOP_AN_STATUS1, 3229 MDIO_GP_STATUS_TOP_AN_STATUS1,
3233 &gp_status); 3230 &gp_status);
3234 /* link is up only if both local phy and external phy are up */ 3231 /* link is up only if both local phy and external phy are up */
3235 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 3232 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
3236 return -ESRCH; 3233 return -ESRCH;
@@ -3274,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3274 u8 rc = 0; 3271 u8 rc = 0;
3275 u8 phy_index, non_ext_phy; 3272 u8 phy_index, non_ext_phy;
3276 struct bnx2x *bp = params->bp; 3273 struct bnx2x *bp = params->bp;
3277 /** 3274 /*
3278 * In case of external phy existence, the line speed would be the 3275 * In case of external phy existence, the line speed would be the
3279 * line speed linked up by the external phy. In case it is direct 3276 * line speed linked up by the external phy. In case it is direct
3280 * only, then the line_speed during initialization will be 3277 * only, then the line_speed during initialization will be
3281 * equal to the req_line_speed 3278 * equal to the req_line_speed
3282 */ 3279 */
3283 vars->line_speed = params->phy[INT_PHY].req_line_speed; 3280 vars->line_speed = params->phy[INT_PHY].req_line_speed;
3284 3281
3285 /** 3282 /*
3286 * Initialize the internal phy in case this is a direct board 3283 * Initialize the internal phy in case this is a direct board
3287 * (no external phys), or this board has external phy which requires 3284 * (no external phys), or this board has external phy which requires
3288 * to first. 3285 * to first.
@@ -3310,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3310 if (!non_ext_phy) 3307 if (!non_ext_phy)
3311 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3308 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3312 phy_index++) { 3309 phy_index++) {
3313 /** 3310 /*
3314 * No need to initialize second phy in case of first 3311 * No need to initialize second phy in case of first
3315 * phy only selection. In case of second phy, we do 3312 * phy only selection. In case of second phy, we do
3316 * need to initialize the first phy, since they are 3313 * need to initialize the first phy, since they are
3317 * connected. 3314 * connected.
3318 **/ 3315 */
3319 if (phy_index == EXT_PHY2 && 3316 if (phy_index == EXT_PHY2 &&
3320 (bnx2x_phy_selection(params) == 3317 (bnx2x_phy_selection(params) ==
3321 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { 3318 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
3322 DP(NETIF_MSG_LINK, "Not initializing" 3319 DP(NETIF_MSG_LINK, "Ignoring second phy\n");
3323 "second phy\n");
3324 continue; 3320 continue;
3325 } 3321 }
3326 params->phy[phy_index].config_init( 3322 params->phy[phy_index].config_init(
@@ -3342,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
3342 struct link_params *params) 3338 struct link_params *params)
3343{ 3339{
3344 /* reset the SerDes/XGXS */ 3340 /* reset the SerDes/XGXS */
3345 REG_WR(params->bp, GRCBASE_MISC + 3341 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3346 MISC_REGISTERS_RESET_REG_3_CLEAR, 3342 (0x1ff << (params->port*16)));
3347 (0x1ff << (params->port*16)));
3348} 3343}
3349 3344
3350static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, 3345static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3358,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
3358 else 3353 else
3359 gpio_port = params->port; 3354 gpio_port = params->port;
3360 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3355 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3361 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3356 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3362 gpio_port); 3357 gpio_port);
3363 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3358 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3364 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3359 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3365 gpio_port); 3360 gpio_port);
3366 DP(NETIF_MSG_LINK, "reset external PHY\n"); 3361 DP(NETIF_MSG_LINK, "reset external PHY\n");
3367} 3362}
3368 3363
@@ -3393,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
3393 3388
3394 /* reset BigMac */ 3389 /* reset BigMac */
3395 bnx2x_bmac_rx_disable(bp, params->port); 3390 bnx2x_bmac_rx_disable(bp, params->port);
3396 REG_WR(bp, GRCBASE_MISC + 3391 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3397 MISC_REGISTERS_RESET_REG_2_CLEAR, 3392 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3398 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3399 return 0; 3393 return 0;
3400} 3394}
3401 3395
@@ -3446,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
3446 msleep(20); 3440 msleep(20);
3447 return rc; 3441 return rc;
3448} 3442}
3449/** 3443/*
3450 * The bnx2x_link_update function should be called upon link 3444 * The bnx2x_link_update function should be called upon link
3451 * interrupt. 3445 * interrupt.
3452 * Link is considered up as follows: 3446 * Link is considered up as follows:
@@ -3485,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3485 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 3479 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
3486 3480
3487 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + 3481 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
3488 port*0x18) > 0); 3482 port*0x18) > 0);
3489 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", 3483 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
3490 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 3484 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3491 is_mi_int, 3485 is_mi_int,
3492 REG_RD(bp, 3486 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3493 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3494 3487
3495 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 3488 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
3496 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 3489 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3499,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3499 /* disable emac */ 3492 /* disable emac */
3500 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 3493 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
3501 3494
3502 /** 3495 /*
3503 * Step 1: 3496 * Step 1:
3504 * Check external link change only for external phys, and apply 3497 * Check external link change only for external phys, and apply
3505 * priority selection between them in case the link on both phys 3498 * priority selection between them in case the link on both phys
3506 * is up. Note that the instead of the common vars, a temporary 3499 * is up. Note that the instead of the common vars, a temporary
3507 * vars argument is used since each phy may have different link/ 3500 * vars argument is used since each phy may have different link/
3508 * speed/duplex result 3501 * speed/duplex result
3509 */ 3502 */
3510 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3503 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3511 phy_index++) { 3504 phy_index++) {
3512 struct bnx2x_phy *phy = &params->phy[phy_index]; 3505 struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3531,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3531 switch (bnx2x_phy_selection(params)) { 3524 switch (bnx2x_phy_selection(params)) {
3532 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 3525 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3533 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 3526 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3534 /** 3527 /*
3535 * In this option, the first PHY makes sure to pass the 3528 * In this option, the first PHY makes sure to pass the
3536 * traffic through itself only. 3529 * traffic through itself only.
3537 * Its not clear how to reset the link on the second phy 3530 * Its not clear how to reset the link on the second phy
3538 **/ 3531 */
3539 active_external_phy = EXT_PHY1; 3532 active_external_phy = EXT_PHY1;
3540 break; 3533 break;
3541 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 3534 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3542 /** 3535 /*
3543 * In this option, the first PHY makes sure to pass the 3536 * In this option, the first PHY makes sure to pass the
3544 * traffic through the second PHY. 3537 * traffic through the second PHY.
3545 **/ 3538 */
3546 active_external_phy = EXT_PHY2; 3539 active_external_phy = EXT_PHY2;
3547 break; 3540 break;
3548 default: 3541 default:
3549 /** 3542 /*
3550 * Link indication on both PHYs with the following cases 3543 * Link indication on both PHYs with the following cases
3551 * is invalid: 3544 * is invalid:
3552 * - FIRST_PHY means that second phy wasn't initialized, 3545 * - FIRST_PHY means that second phy wasn't initialized,
@@ -3554,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3554 * - SECOND_PHY means that first phy should not be able 3547 * - SECOND_PHY means that first phy should not be able
3555 * to link up by itself (using configuration) 3548 * to link up by itself (using configuration)
3556 * - DEFAULT should be overriden during initialiazation 3549 * - DEFAULT should be overriden during initialiazation
3557 **/ 3550 */
3558 DP(NETIF_MSG_LINK, "Invalid link indication" 3551 DP(NETIF_MSG_LINK, "Invalid link indication"
3559 "mpc=0x%x. DISABLING LINK !!!\n", 3552 "mpc=0x%x. DISABLING LINK !!!\n",
3560 params->multi_phy_config); 3553 params->multi_phy_config);
@@ -3564,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3564 } 3557 }
3565 } 3558 }
3566 prev_line_speed = vars->line_speed; 3559 prev_line_speed = vars->line_speed;
3567 /** 3560 /*
3568 * Step 2: 3561 * Step 2:
3569 * Read the status of the internal phy. In case of 3562 * Read the status of the internal phy. In case of
3570 * DIRECT_SINGLE_MEDIA board, this link is the external link, 3563 * DIRECT_SINGLE_MEDIA board, this link is the external link,
3571 * otherwise this is the link between the 577xx and the first 3564 * otherwise this is the link between the 577xx and the first
3572 * external phy 3565 * external phy
3573 */ 3566 */
3574 if (params->phy[INT_PHY].read_status) 3567 if (params->phy[INT_PHY].read_status)
3575 params->phy[INT_PHY].read_status( 3568 params->phy[INT_PHY].read_status(
3576 &params->phy[INT_PHY], 3569 &params->phy[INT_PHY],
3577 params, vars); 3570 params, vars);
3578 /** 3571 /*
3579 * The INT_PHY flow control reside in the vars. This include the 3572 * The INT_PHY flow control reside in the vars. This include the
3580 * case where the speed or flow control are not set to AUTO. 3573 * case where the speed or flow control are not set to AUTO.
3581 * Otherwise, the active external phy flow control result is set 3574 * Otherwise, the active external phy flow control result is set
@@ -3585,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3585 */ 3578 */
3586 if (active_external_phy > INT_PHY) { 3579 if (active_external_phy > INT_PHY) {
3587 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 3580 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
3588 /** 3581 /*
3589 * Link speed is taken from the XGXS. AN and FC result from 3582 * Link speed is taken from the XGXS. AN and FC result from
3590 * the external phy. 3583 * the external phy.
3591 */ 3584 */
3592 vars->link_status |= phy_vars[active_external_phy].link_status; 3585 vars->link_status |= phy_vars[active_external_phy].link_status;
3593 3586
3594 /** 3587 /*
3595 * if active_external_phy is first PHY and link is up - disable 3588 * if active_external_phy is first PHY and link is up - disable
3596 * disable TX on second external PHY 3589 * disable TX on second external PHY
3597 */ 3590 */
@@ -3627,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3627 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 3620 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
3628 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 3621 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
3629 vars->link_status, ext_phy_line_speed); 3622 vars->link_status, ext_phy_line_speed);
3630 /** 3623 /*
3631 * Upon link speed change set the NIG into drain mode. Comes to 3624 * Upon link speed change set the NIG into drain mode. Comes to
3632 * deals with possible FIFO glitch due to clk change when speed 3625 * deals with possible FIFO glitch due to clk change when speed
3633 * is decreased without link down indicator 3626 * is decreased without link down indicator
@@ -3642,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3642 ext_phy_line_speed); 3635 ext_phy_line_speed);
3643 vars->phy_link_up = 0; 3636 vars->phy_link_up = 0;
3644 } else if (prev_line_speed != vars->line_speed) { 3637 } else if (prev_line_speed != vars->line_speed) {
3645 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE 3638 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
3646 + params->port*4, 0); 3639 0);
3647 msleep(1); 3640 msleep(1);
3648 } 3641 }
3649 } 3642 }
@@ -3658,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3658 3651
3659 bnx2x_link_int_ack(params, vars, link_10g); 3652 bnx2x_link_int_ack(params, vars, link_10g);
3660 3653
3661 /** 3654 /*
3662 * In case external phy link is up, and internal link is down 3655 * In case external phy link is up, and internal link is down
3663 * (not initialized yet probably after link initialization, it 3656 * (not initialized yet probably after link initialization, it
3664 * needs to be initialized. 3657 * needs to be initialized.
3665 * Note that after link down-up as result of cable plug, the xgxs 3658 * Note that after link down-up as result of cable plug, the xgxs
3666 * link would probably become up again without the need 3659 * link would probably become up again without the need
3667 * initialize it 3660 * initialize it
3668 */ 3661 */
3669 if (!(SINGLE_MEDIA_DIRECT(params))) { 3662 if (!(SINGLE_MEDIA_DIRECT(params))) {
3670 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," 3663 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3671 " init_preceding = %d\n", ext_phy_link_up, 3664 " init_preceding = %d\n", ext_phy_link_up,
@@ -3685,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3685 vars); 3678 vars);
3686 } 3679 }
3687 } 3680 }
3688 /** 3681 /*
3689 * Link is up only if both local phy and external phy (in case of 3682 * Link is up only if both local phy and external phy (in case of
3690 * non-direct board) are up 3683 * non-direct board) are up
3691 */ 3684 */
3692 vars->link_up = (vars->phy_link_up && 3685 vars->link_up = (vars->phy_link_up &&
3693 (ext_phy_link_up || 3686 (ext_phy_link_up ||
@@ -3708,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3708void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) 3701void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3709{ 3702{
3710 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3703 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3711 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 3704 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3712 msleep(1); 3705 msleep(1);
3713 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3706 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3714 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 3707 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
3715} 3708}
3716 3709
3717static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3710static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3731,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
3731 u16 fw_ver1, fw_ver2; 3724 u16 fw_ver1, fw_ver2;
3732 3725
3733 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3726 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3734 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3727 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3735 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3728 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3736 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3729 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
3737 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), 3730 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
3738 phy->ver_addr); 3731 phy->ver_addr);
3739} 3732}
@@ -3754,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
3754 if ((vars->ieee_fc & 3747 if ((vars->ieee_fc &
3755 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 3748 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3756 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 3749 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3757 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 3750 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3758 } 3751 }
3759 if ((vars->ieee_fc & 3752 if ((vars->ieee_fc &
3760 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 3753 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3785,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3785 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 3778 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
3786 ret = 1; 3779 ret = 1;
3787 bnx2x_cl45_read(bp, phy, 3780 bnx2x_cl45_read(bp, phy,
3788 MDIO_AN_DEVAD, 3781 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_ADV_PAUSE, &ld_pause); 3782 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3790 bnx2x_cl45_read(bp, phy, 3783 bnx2x_cl45_read(bp, phy,
3791 MDIO_AN_DEVAD, 3784 MDIO_AN_DEVAD,
3792 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); 3785 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3793 pause_result = (ld_pause & 3786 pause_result = (ld_pause &
3794 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 3787 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
3795 pause_result |= (lp_pause & 3788 pause_result |= (lp_pause &
@@ -3854,90 +3847,82 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3854 pause_result); 3847 pause_result);
3855 } 3848 }
3856} 3849}
3857 3850static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3858static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3859 struct bnx2x_phy *phy, 3851 struct bnx2x_phy *phy,
3860 u8 port) 3852 u8 port)
3861{ 3853{
3854 u32 count = 0;
3855 u16 fw_ver1, fw_msgout;
3856 u8 rc = 0;
3857
3862 /* Boot port from external ROM */ 3858 /* Boot port from external ROM */
3863 /* EDC grst */ 3859 /* EDC grst */
3864 bnx2x_cl45_write(bp, phy, 3860 bnx2x_cl45_write(bp, phy,
3865 MDIO_PMA_DEVAD, 3861 MDIO_PMA_DEVAD,
3866 MDIO_PMA_REG_GEN_CTRL, 3862 MDIO_PMA_REG_GEN_CTRL,
3867 0x0001); 3863 0x0001);
3868 3864
3869 /* ucode reboot and rst */ 3865 /* ucode reboot and rst */
3870 bnx2x_cl45_write(bp, phy, 3866 bnx2x_cl45_write(bp, phy,
3871 MDIO_PMA_DEVAD, 3867 MDIO_PMA_DEVAD,
3872 MDIO_PMA_REG_GEN_CTRL, 3868 MDIO_PMA_REG_GEN_CTRL,
3873 0x008c); 3869 0x008c);
3874 3870
3875 bnx2x_cl45_write(bp, phy, 3871 bnx2x_cl45_write(bp, phy,
3876 MDIO_PMA_DEVAD, 3872 MDIO_PMA_DEVAD,
3877 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3873 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3878 3874
3879 /* Reset internal microprocessor */ 3875 /* Reset internal microprocessor */
3880 bnx2x_cl45_write(bp, phy, 3876 bnx2x_cl45_write(bp, phy,
3881 MDIO_PMA_DEVAD, 3877 MDIO_PMA_DEVAD,
3882 MDIO_PMA_REG_GEN_CTRL, 3878 MDIO_PMA_REG_GEN_CTRL,
3883 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3879 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3884 3880
3885 /* Release srst bit */ 3881 /* Release srst bit */
3886 bnx2x_cl45_write(bp, phy, 3882 bnx2x_cl45_write(bp, phy,
3887 MDIO_PMA_DEVAD, 3883 MDIO_PMA_DEVAD,
3888 MDIO_PMA_REG_GEN_CTRL, 3884 MDIO_PMA_REG_GEN_CTRL,
3889 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3885 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3890 3886
3891 /* wait for 120ms for code download via SPI port */ 3887 /* Delay 100ms per the PHY specifications */
3892 msleep(120); 3888 msleep(100);
3893 3889
3894 /* Clear ser_boot_ctl bit */ 3890 /* 8073 sometimes taking longer to download */
3895 bnx2x_cl45_write(bp, phy, 3891 do {
3896 MDIO_PMA_DEVAD, 3892 count++;
3897 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3893 if (count > 300) {
3898 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3894 DP(NETIF_MSG_LINK,
3899} 3895 "bnx2x_8073_8727_external_rom_boot port %x:"
3896 "Download failed. fw version = 0x%x\n",
3897 port, fw_ver1);
3898 rc = -EINVAL;
3899 break;
3900 }
3900 3901
3901static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, 3902 bnx2x_cl45_read(bp, phy,
3902 struct bnx2x_phy *phy) 3903 MDIO_PMA_DEVAD,
3903{ 3904 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3904 u16 val; 3905 bnx2x_cl45_read(bp, phy,
3905 bnx2x_cl45_read(bp, phy, 3906 MDIO_PMA_DEVAD,
3906 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); 3907 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
3907 3908
3908 if (val == 0) { 3909 msleep(1);
3909 /* Mustn't set low power mode in 8073 A0 */ 3910 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
3910 return; 3911 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
3911 } 3912 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
3912 3913
3913 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */ 3914 /* Clear ser_boot_ctl bit */
3914 bnx2x_cl45_read(bp, phy,
3915 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3916 val &= ~(1<<13);
3917 bnx2x_cl45_write(bp, phy, 3915 bnx2x_cl45_write(bp, phy,
3918 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); 3916 MDIO_PMA_DEVAD,
3919 3917 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3920 /* PLL controls */ 3918 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3921 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3922 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3923 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3924 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3925 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3926
3927 /* Tx Controls */
3928 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3929 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3930 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3931 3919
3932 /* Rx Controls */ 3920 DP(NETIF_MSG_LINK,
3933 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4); 3921 "bnx2x_8073_8727_external_rom_boot port %x:"
3934 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249); 3922 "Download complete. fw version = 0x%x\n",
3935 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015); 3923 port, fw_ver1);
3936 3924
3937 /* Enable PLL sequencer (use read-modify-write to set bit 13) */ 3925 return rc;
3938 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3939 val |= (1<<13);
3940 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3941} 3926}
3942 3927
3943/******************************************************************/ 3928/******************************************************************/
@@ -3950,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3950 3935
3951 /* Read 8073 HW revision*/ 3936 /* Read 8073 HW revision*/
3952 bnx2x_cl45_read(bp, phy, 3937 bnx2x_cl45_read(bp, phy,
3953 MDIO_PMA_DEVAD, 3938 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_8073_CHIP_REV, &val); 3939 MDIO_PMA_REG_8073_CHIP_REV, &val);
3955 3940
3956 if (val != 1) { 3941 if (val != 1) {
3957 /* No need to workaround in 8073 A1 */ 3942 /* No need to workaround in 8073 A1 */
@@ -3959,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3959 } 3944 }
3960 3945
3961 bnx2x_cl45_read(bp, phy, 3946 bnx2x_cl45_read(bp, phy,
3962 MDIO_PMA_DEVAD, 3947 MDIO_PMA_DEVAD,
3963 MDIO_PMA_REG_ROM_VER2, &val); 3948 MDIO_PMA_REG_ROM_VER2, &val);
3964 3949
3965 /* SNR should be applied only for version 0x102 */ 3950 /* SNR should be applied only for version 0x102 */
3966 if (val != 0x102) 3951 if (val != 0x102)
@@ -3974,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3974 u16 val, cnt, cnt1 ; 3959 u16 val, cnt, cnt1 ;
3975 3960
3976 bnx2x_cl45_read(bp, phy, 3961 bnx2x_cl45_read(bp, phy,
3977 MDIO_PMA_DEVAD, 3962 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_8073_CHIP_REV, &val); 3963 MDIO_PMA_REG_8073_CHIP_REV, &val);
3979 3964
3980 if (val > 0) { 3965 if (val > 0) {
3981 /* No need to workaround in 8073 A1 */ 3966 /* No need to workaround in 8073 A1 */
@@ -3983,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3983 } 3968 }
3984 /* XAUI workaround in 8073 A0: */ 3969 /* XAUI workaround in 8073 A0: */
3985 3970
3986 /* After loading the boot ROM and restarting Autoneg, 3971 /*
3987 poll Dev1, Reg $C820: */ 3972 * After loading the boot ROM and restarting Autoneg, poll
3973 * Dev1, Reg $C820:
3974 */
3988 3975
3989 for (cnt = 0; cnt < 1000; cnt++) { 3976 for (cnt = 0; cnt < 1000; cnt++) {
3990 bnx2x_cl45_read(bp, phy, 3977 bnx2x_cl45_read(bp, phy,
3991 MDIO_PMA_DEVAD, 3978 MDIO_PMA_DEVAD,
3992 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3979 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
3993 &val); 3980 &val);
3994 /* If bit [14] = 0 or bit [13] = 0, continue on with 3981 /*
3995 system initialization (XAUI work-around not required, 3982 * If bit [14] = 0 or bit [13] = 0, continue on with
3996 as these bits indicate 2.5G or 1G link up). */ 3983 * system initialization (XAUI work-around not required, as
3984 * these bits indicate 2.5G or 1G link up).
3985 */
3997 if (!(val & (1<<14)) || !(val & (1<<13))) { 3986 if (!(val & (1<<14)) || !(val & (1<<13))) {
3998 DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); 3987 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
3999 return 0; 3988 return 0;
4000 } else if (!(val & (1<<15))) { 3989 } else if (!(val & (1<<15))) {
4001 DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); 3990 DP(NETIF_MSG_LINK, "bit 15 went off\n");
4002 /* If bit 15 is 0, then poll Dev1, Reg $C841 until 3991 /*
4003 it's MSB (bit 15) goes to 1 (indicating that the 3992 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
4004 XAUI workaround has completed), 3993 * MSB (bit15) goes to 1 (indicating that the XAUI
4005 then continue on with system initialization.*/ 3994 * workaround has completed), then continue on with
3995 * system initialization.
3996 */
4006 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3997 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
4007 bnx2x_cl45_read(bp, phy, 3998 bnx2x_cl45_read(bp, phy,
4008 MDIO_PMA_DEVAD, 3999 MDIO_PMA_DEVAD,
@@ -4085,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4085 gpio_port = params->port; 4076 gpio_port = params->port;
4086 /* Restore normal power mode*/ 4077 /* Restore normal power mode*/
4087 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4078 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4088 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4079 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4089 4080
4090 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4081 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4091 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4082 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4092 4083
4093 /* enable LASI */ 4084 /* enable LASI */
4094 bnx2x_cl45_write(bp, phy, 4085 bnx2x_cl45_write(bp, phy,
@@ -4098,8 +4089,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4098 4089
4099 bnx2x_8073_set_pause_cl37(params, phy, vars); 4090 bnx2x_8073_set_pause_cl37(params, phy, vars);
4100 4091
4101 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
4102
4103 bnx2x_cl45_read(bp, phy, 4092 bnx2x_cl45_read(bp, phy,
4104 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 4093 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4105 4094
@@ -4108,6 +4097,21 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4108 4097
4109 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4098 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4110 4099
4100 /* Swap polarity if required - Must be done only in non-1G mode */
4101 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4102 /* Configure the 8073 to swap _P and _N of the KR lines */
4103 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
4104 /* 10G Rx/Tx and 1G Tx signal polarity swap */
4105 bnx2x_cl45_read(bp, phy,
4106 MDIO_PMA_DEVAD,
4107 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
4108 bnx2x_cl45_write(bp, phy,
4109 MDIO_PMA_DEVAD,
4110 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
4111 (val | (3<<9)));
4112 }
4113
4114
4111 /* Enable CL37 BAM */ 4115 /* Enable CL37 BAM */
4112 if (REG_RD(bp, params->shmem_base + 4116 if (REG_RD(bp, params->shmem_base +
4113 offsetof(struct shmem_region, dev_info. 4117 offsetof(struct shmem_region, dev_info.
@@ -4135,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4135 val = (1<<7); 4139 val = (1<<7);
4136 } else if (phy->req_line_speed == SPEED_2500) { 4140 } else if (phy->req_line_speed == SPEED_2500) {
4137 val = (1<<5); 4141 val = (1<<5);
4138 /* Note that 2.5G works only 4142 /*
4139 when used with 1G advertisment */ 4143 * Note that 2.5G works only when used with 1G
4144 * advertisment
4145 */
4140 } else 4146 } else
4141 val = (1<<5); 4147 val = (1<<5);
4142 } else { 4148 } else {
@@ -4145,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4145 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 4151 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4146 val |= (1<<7); 4152 val |= (1<<7);
4147 4153
4148 /* Note that 2.5G works only when 4154 /* Note that 2.5G works only when used with 1G advertisment */
4149 used with 1G advertisment */
4150 if (phy->speed_cap_mask & 4155 if (phy->speed_cap_mask &
4151 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | 4156 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4152 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 4157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4186,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4186 /* Add support for CL37 (passive mode) III */ 4191 /* Add support for CL37 (passive mode) III */
4187 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 4192 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4188 4193
4189 /* The SNR will improve about 2db by changing 4194 /*
4190 BW and FEE main tap. Rest commands are executed 4195 * The SNR will improve about 2db by changing BW and FEE main
4191 after link is up*/ 4196 * tap. Rest commands are executed after link is up
4197 * Change FFE main cursor to 5 in EDC register
4198 */
4192 if (bnx2x_8073_is_snr_needed(bp, phy)) 4199 if (bnx2x_8073_is_snr_needed(bp, phy))
4193 bnx2x_cl45_write(bp, phy, 4200 bnx2x_cl45_write(bp, phy,
4194 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, 4201 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4272,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4272 4279
4273 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 4280 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
4274 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 4281 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
4275 /* The SNR will improve about 2dbby 4282 /*
4276 changing the BW and FEE main tap.*/ 4283 * The SNR will improve about 2dbby changing the BW and FEE main
4277 /* The 1st write to change FFE main 4284 * tap. The 1st write to change FFE main tap is set before
4278 tap is set before restart AN */ 4285 * restart AN. Change PLL Bandwidth in EDC register
4279 /* Change PLL Bandwidth in EDC 4286 */
4280 register */
4281 bnx2x_cl45_write(bp, phy, 4287 bnx2x_cl45_write(bp, phy,
4282 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 4288 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
4283 0x26BC); 4289 0x26BC);
@@ -4314,8 +4320,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4314 } 4320 }
4315 4321
4316 if (link_up) { 4322 if (link_up) {
4323 /* Swap polarity if required */
4324 if (params->lane_config &
4325 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4326 /* Configure the 8073 to swap P and N of the KR lines */
4327 bnx2x_cl45_read(bp, phy,
4328 MDIO_XS_DEVAD,
4329 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4330 /*
4331 * Set bit 3 to invert Rx in 1G mode and clear this bit
4332 * when it`s in 10G mode.
4333 */
4334 if (vars->line_speed == SPEED_1000) {
4335 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4336 "the 8073\n");
4337 val1 |= (1<<3);
4338 } else
4339 val1 &= ~(1<<3);
4340
4341 bnx2x_cl45_write(bp, phy,
4342 MDIO_XS_DEVAD,
4343 MDIO_XS_REG_8073_RX_CTRL_PCIE,
4344 val1);
4345 }
4317 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 4346 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
4318 bnx2x_8073_resolve_fc(phy, params, vars); 4347 bnx2x_8073_resolve_fc(phy, params, vars);
4348 vars->duplex = DUPLEX_FULL;
4319 } 4349 }
4320 return link_up; 4350 return link_up;
4321} 4351}
@@ -4332,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
4332 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 4362 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
4333 gpio_port); 4363 gpio_port);
4334 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4364 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4335 MISC_REGISTERS_GPIO_OUTPUT_LOW, 4365 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4336 gpio_port); 4366 gpio_port);
4337} 4367}
4338 4368
4339/******************************************************************/ 4369/******************************************************************/
@@ -4347,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
4347 DP(NETIF_MSG_LINK, "init 8705\n"); 4377 DP(NETIF_MSG_LINK, "init 8705\n");
4348 /* Restore normal power mode*/ 4378 /* Restore normal power mode*/
4349 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4350 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 4380 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
4351 /* HW reset */ 4381 /* HW reset */
4352 bnx2x_ext_phy_hw_reset(bp, params->port); 4382 bnx2x_ext_phy_hw_reset(bp, params->port);
4353 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 4383 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
4354 bnx2x_wait_reset_complete(bp, phy); 4384 bnx2x_wait_reset_complete(bp, phy, params);
4355 4385
4356 bnx2x_cl45_write(bp, phy, 4386 bnx2x_cl45_write(bp, phy,
4357 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); 4387 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4402,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
4402/******************************************************************/ 4432/******************************************************************/
4403/* SFP+ module Section */ 4433/* SFP+ module Section */
4404/******************************************************************/ 4434/******************************************************************/
4405static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, 4435static u8 bnx2x_get_gpio_port(struct link_params *params)
4436{
4437 u8 gpio_port;
4438 u32 swap_val, swap_override;
4439 struct bnx2x *bp = params->bp;
4440 if (CHIP_IS_E2(bp))
4441 gpio_port = BP_PATH(bp);
4442 else
4443 gpio_port = params->port;
4444 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4445 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4446 return gpio_port ^ (swap_val && swap_override);
4447}
4448static void bnx2x_sfp_set_transmitter(struct link_params *params,
4406 struct bnx2x_phy *phy, 4449 struct bnx2x_phy *phy,
4407 u8 port,
4408 u8 tx_en) 4450 u8 tx_en)
4409{ 4451{
4410 u16 val; 4452 u16 val;
4453 u8 port = params->port;
4454 struct bnx2x *bp = params->bp;
4455 u32 tx_en_mode;
4411 4456
4412 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
4413 tx_en, port);
4414 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 4457 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
4415 bnx2x_cl45_read(bp, phy, 4458 tx_en_mode = REG_RD(bp, params->shmem_base +
4416 MDIO_PMA_DEVAD, 4459 offsetof(struct shmem_region,
4417 MDIO_PMA_REG_PHY_IDENTIFIER, 4460 dev_info.port_hw_config[port].sfp_ctrl)) &
4418 &val); 4461 PORT_HW_CFG_TX_LASER_MASK;
4462 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
4463 "mode = %x\n", tx_en, port, tx_en_mode);
4464 switch (tx_en_mode) {
4465 case PORT_HW_CFG_TX_LASER_MDIO:
4419 4466
4420 if (tx_en) 4467 bnx2x_cl45_read(bp, phy,
4421 val &= ~(1<<15); 4468 MDIO_PMA_DEVAD,
4422 else 4469 MDIO_PMA_REG_PHY_IDENTIFIER,
4423 val |= (1<<15); 4470 &val);
4424 4471
4425 bnx2x_cl45_write(bp, phy, 4472 if (tx_en)
4426 MDIO_PMA_DEVAD, 4473 val &= ~(1<<15);
4427 MDIO_PMA_REG_PHY_IDENTIFIER, 4474 else
4428 val); 4475 val |= (1<<15);
4476
4477 bnx2x_cl45_write(bp, phy,
4478 MDIO_PMA_DEVAD,
4479 MDIO_PMA_REG_PHY_IDENTIFIER,
4480 val);
4481 break;
4482 case PORT_HW_CFG_TX_LASER_GPIO0:
4483 case PORT_HW_CFG_TX_LASER_GPIO1:
4484 case PORT_HW_CFG_TX_LASER_GPIO2:
4485 case PORT_HW_CFG_TX_LASER_GPIO3:
4486 {
4487 u16 gpio_pin;
4488 u8 gpio_port, gpio_mode;
4489 if (tx_en)
4490 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
4491 else
4492 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
4493
4494 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
4495 gpio_port = bnx2x_get_gpio_port(params);
4496 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
4497 break;
4498 }
4499 default:
4500 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
4501 break;
4502 }
4429} 4503}
4430 4504
4431static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4505static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4432 struct link_params *params, 4506 struct link_params *params,
4433 u16 addr, u8 byte_cnt, u8 *o_buf) 4507 u16 addr, u8 byte_cnt, u8 *o_buf)
4434{ 4508{
4435 struct bnx2x *bp = params->bp; 4509 struct bnx2x *bp = params->bp;
4436 u16 val = 0; 4510 u16 val = 0;
@@ -4443,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4443 /* Set the read command byte count */ 4517 /* Set the read command byte count */
4444 bnx2x_cl45_write(bp, phy, 4518 bnx2x_cl45_write(bp, phy,
4445 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4519 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4446 (byte_cnt | 0xa000)); 4520 (byte_cnt | 0xa000));
4447 4521
4448 /* Set the read command address */ 4522 /* Set the read command address */
4449 bnx2x_cl45_write(bp, phy, 4523 bnx2x_cl45_write(bp, phy,
4450 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4524 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4451 addr); 4525 addr);
4452 4526
4453 /* Activate read command */ 4527 /* Activate read command */
4454 bnx2x_cl45_write(bp, phy, 4528 bnx2x_cl45_write(bp, phy,
4455 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4529 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4456 0x2c0f); 4530 0x2c0f);
4457 4531
4458 /* Wait up to 500us for command complete status */ 4532 /* Wait up to 500us for command complete status */
4459 for (i = 0; i < 100; i++) { 4533 for (i = 0; i < 100; i++) {
4460 bnx2x_cl45_read(bp, phy, 4534 bnx2x_cl45_read(bp, phy,
4461 MDIO_PMA_DEVAD, 4535 MDIO_PMA_DEVAD,
4462 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4536 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4463 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4537 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4464 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4538 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4465 break; 4539 break;
@@ -4477,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4477 /* Read the buffer */ 4551 /* Read the buffer */
4478 for (i = 0; i < byte_cnt; i++) { 4552 for (i = 0; i < byte_cnt; i++) {
4479 bnx2x_cl45_read(bp, phy, 4553 bnx2x_cl45_read(bp, phy,
4480 MDIO_PMA_DEVAD, 4554 MDIO_PMA_DEVAD,
4481 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 4555 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
4482 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 4556 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
4483 } 4557 }
4484 4558
4485 for (i = 0; i < 100; i++) { 4559 for (i = 0; i < 100; i++) {
4486 bnx2x_cl45_read(bp, phy, 4560 bnx2x_cl45_read(bp, phy,
4487 MDIO_PMA_DEVAD, 4561 MDIO_PMA_DEVAD,
4488 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4562 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4489 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4563 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4490 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4564 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4491 return 0; 4565 return 0;
@@ -4496,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4496 4570
4497static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4571static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4498 struct link_params *params, 4572 struct link_params *params,
4499 u16 addr, u8 byte_cnt, u8 *o_buf) 4573 u16 addr, u8 byte_cnt, u8 *o_buf)
4500{ 4574{
4501 struct bnx2x *bp = params->bp; 4575 struct bnx2x *bp = params->bp;
4502 u16 val, i; 4576 u16 val, i;
@@ -4509,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4509 4583
4510 /* Need to read from 1.8000 to clear it */ 4584 /* Need to read from 1.8000 to clear it */
4511 bnx2x_cl45_read(bp, phy, 4585 bnx2x_cl45_read(bp, phy,
4512 MDIO_PMA_DEVAD, 4586 MDIO_PMA_DEVAD,
4513 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4587 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4514 &val); 4588 &val);
4515 4589
4516 /* Set the read command byte count */ 4590 /* Set the read command byte count */
4517 bnx2x_cl45_write(bp, phy, 4591 bnx2x_cl45_write(bp, phy,
4518 MDIO_PMA_DEVAD, 4592 MDIO_PMA_DEVAD,
4519 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4593 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4520 ((byte_cnt < 2) ? 2 : byte_cnt)); 4594 ((byte_cnt < 2) ? 2 : byte_cnt));
4521 4595
4522 /* Set the read command address */ 4596 /* Set the read command address */
4523 bnx2x_cl45_write(bp, phy, 4597 bnx2x_cl45_write(bp, phy,
4524 MDIO_PMA_DEVAD, 4598 MDIO_PMA_DEVAD,
4525 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4599 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4526 addr); 4600 addr);
4527 /* Set the destination address */ 4601 /* Set the destination address */
4528 bnx2x_cl45_write(bp, phy, 4602 bnx2x_cl45_write(bp, phy,
4529 MDIO_PMA_DEVAD, 4603 MDIO_PMA_DEVAD,
4530 0x8004, 4604 0x8004,
4531 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 4605 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
4532 4606
4533 /* Activate read command */ 4607 /* Activate read command */
4534 bnx2x_cl45_write(bp, phy, 4608 bnx2x_cl45_write(bp, phy,
4535 MDIO_PMA_DEVAD, 4609 MDIO_PMA_DEVAD,
4536 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4610 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4537 0x8002); 4611 0x8002);
4538 /* Wait appropriate time for two-wire command to finish before 4612 /*
4539 polling the status register */ 4613 * Wait appropriate time for two-wire command to finish before
4614 * polling the status register
4615 */
4540 msleep(1); 4616 msleep(1);
4541 4617
4542 /* Wait up to 500us for command complete status */ 4618 /* Wait up to 500us for command complete status */
4543 for (i = 0; i < 100; i++) { 4619 for (i = 0; i < 100; i++) {
4544 bnx2x_cl45_read(bp, phy, 4620 bnx2x_cl45_read(bp, phy,
4545 MDIO_PMA_DEVAD, 4621 MDIO_PMA_DEVAD,
4546 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4622 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4547 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4623 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4548 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4624 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4549 break; 4625 break;
@@ -4555,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4555 DP(NETIF_MSG_LINK, 4631 DP(NETIF_MSG_LINK,
4556 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 4632 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
4557 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 4633 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
4558 return -EINVAL; 4634 return -EFAULT;
4559 } 4635 }
4560 4636
4561 /* Read the buffer */ 4637 /* Read the buffer */
4562 for (i = 0; i < byte_cnt; i++) { 4638 for (i = 0; i < byte_cnt; i++) {
4563 bnx2x_cl45_read(bp, phy, 4639 bnx2x_cl45_read(bp, phy,
4564 MDIO_PMA_DEVAD, 4640 MDIO_PMA_DEVAD,
4565 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 4641 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
4566 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 4642 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
4567 } 4643 }
4568 4644
4569 for (i = 0; i < 100; i++) { 4645 for (i = 0; i < 100; i++) {
4570 bnx2x_cl45_read(bp, phy, 4646 bnx2x_cl45_read(bp, phy,
4571 MDIO_PMA_DEVAD, 4647 MDIO_PMA_DEVAD,
4572 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4648 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4573 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4649 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4574 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4650 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4575 return 0; 4651 return 0;
@@ -4579,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4579 return -EINVAL; 4655 return -EINVAL;
4580} 4656}
4581 4657
4582static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4658u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4583 struct link_params *params, u16 addr, 4659 struct link_params *params, u16 addr,
4584 u8 byte_cnt, u8 *o_buf) 4660 u8 byte_cnt, u8 *o_buf)
4585{ 4661{
4586 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4662 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4587 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 4663 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
4588 byte_cnt, o_buf); 4664 byte_cnt, o_buf);
4589 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4665 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
4590 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, 4666 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
4591 byte_cnt, o_buf); 4667 byte_cnt, o_buf);
4592 return -EINVAL; 4668 return -EINVAL;
4593} 4669}
4594 4670
4595static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, 4671static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4596 struct link_params *params, 4672 struct link_params *params,
4597 u16 *edc_mode) 4673 u16 *edc_mode)
4598{ 4674{
4599 struct bnx2x *bp = params->bp; 4675 struct bnx2x *bp = params->bp;
4600 u8 val, check_limiting_mode = 0; 4676 u8 val, check_limiting_mode = 0;
@@ -4615,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4615 { 4691 {
4616 u8 copper_module_type; 4692 u8 copper_module_type;
4617 4693
4618 /* Check if its active cable( includes SFP+ module) 4694 /*
4619 of passive cable*/ 4695 * Check if its active cable (includes SFP+ module)
4696 * of passive cable
4697 */
4620 if (bnx2x_read_sfp_module_eeprom(phy, 4698 if (bnx2x_read_sfp_module_eeprom(phy,
4621 params, 4699 params,
4622 SFP_EEPROM_FC_TX_TECH_ADDR, 4700 SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4675,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4675 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4753 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
4676 return 0; 4754 return 0;
4677} 4755}
4678/* This function read the relevant field from the module ( SFP+ ), 4756/*
4679 and verify it is compliant with this board */ 4757 * This function read the relevant field from the module (SFP+), and verify it
4758 * is compliant with this board
4759 */
4680static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 4760static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4681 struct link_params *params) 4761 struct link_params *params)
4682{ 4762{
@@ -4725,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4725 /* format the warning message */ 4805 /* format the warning message */
4726 if (bnx2x_read_sfp_module_eeprom(phy, 4806 if (bnx2x_read_sfp_module_eeprom(phy,
4727 params, 4807 params,
4728 SFP_EEPROM_VENDOR_NAME_ADDR, 4808 SFP_EEPROM_VENDOR_NAME_ADDR,
4729 SFP_EEPROM_VENDOR_NAME_SIZE, 4809 SFP_EEPROM_VENDOR_NAME_SIZE,
4730 (u8 *)vendor_name)) 4810 (u8 *)vendor_name))
4731 vendor_name[0] = '\0'; 4811 vendor_name[0] = '\0';
4732 else 4812 else
4733 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4813 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
4734 if (bnx2x_read_sfp_module_eeprom(phy, 4814 if (bnx2x_read_sfp_module_eeprom(phy,
4735 params, 4815 params,
4736 SFP_EEPROM_PART_NO_ADDR, 4816 SFP_EEPROM_PART_NO_ADDR,
4737 SFP_EEPROM_PART_NO_SIZE, 4817 SFP_EEPROM_PART_NO_SIZE,
4738 (u8 *)vendor_pn)) 4818 (u8 *)vendor_pn))
4739 vendor_pn[0] = '\0'; 4819 vendor_pn[0] = '\0';
4740 else 4820 else
4741 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4821 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
4742 4822
4743 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected," 4823 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
4744 " Port %d from %s part number %s\n", 4824 " Port %d from %s part number %s\n",
4745 params->port, vendor_name, vendor_pn); 4825 params->port, vendor_name, vendor_pn);
4746 phy->flags |= FLAGS_SFP_NOT_APPROVED; 4826 phy->flags |= FLAGS_SFP_NOT_APPROVED;
4747 return -EINVAL; 4827 return -EINVAL;
4748} 4828}
@@ -4754,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
4754 u8 val; 4834 u8 val;
4755 struct bnx2x *bp = params->bp; 4835 struct bnx2x *bp = params->bp;
4756 u16 timeout; 4836 u16 timeout;
4757 /* Initialization time after hot-plug may take up to 300ms for some 4837 /*
4758 phys type ( e.g. JDSU ) */ 4838 * Initialization time after hot-plug may take up to 300ms for
4839 * some phys type ( e.g. JDSU )
4840 */
4841
4759 for (timeout = 0; timeout < 60; timeout++) { 4842 for (timeout = 0; timeout < 60; timeout++) {
4760 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 4843 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
4761 == 0) { 4844 == 0) {
@@ -4774,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
4774 /* Make sure GPIOs are not using for LED mode */ 4857 /* Make sure GPIOs are not using for LED mode */
4775 u16 val; 4858 u16 val;
4776 /* 4859 /*
4777 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4860 * In the GPIO register, bit 4 is use to determine if the GPIOs are
4778 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4861 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
4779 * output 4862 * output
4780 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4863 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
4781 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4864 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
4782 * where the 1st bit is the over-current(only input), and 2nd bit is 4865 * where the 1st bit is the over-current(only input), and 2nd bit is
4783 * for power( only output ) 4866 * for power( only output )
4784 */ 4867 *
4785
4786 /*
4787 * In case of NOC feature is disabled and power is up, set GPIO control 4868 * In case of NOC feature is disabled and power is up, set GPIO control
4788 * as input to enable listening of over-current indication 4869 * as input to enable listening of over-current indication
4789 */ 4870 */
@@ -4812,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4812 u16 cur_limiting_mode; 4893 u16 cur_limiting_mode;
4813 4894
4814 bnx2x_cl45_read(bp, phy, 4895 bnx2x_cl45_read(bp, phy,
4815 MDIO_PMA_DEVAD, 4896 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_ROM_VER2, 4897 MDIO_PMA_REG_ROM_VER2,
4817 &cur_limiting_mode); 4898 &cur_limiting_mode);
4818 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", 4899 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
4819 cur_limiting_mode); 4900 cur_limiting_mode);
4820 4901
4821 if (edc_mode == EDC_MODE_LIMITING) { 4902 if (edc_mode == EDC_MODE_LIMITING) {
4822 DP(NETIF_MSG_LINK, 4903 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
4823 "Setting LIMITING MODE\n");
4824 bnx2x_cl45_write(bp, phy, 4904 bnx2x_cl45_write(bp, phy,
4825 MDIO_PMA_DEVAD, 4905 MDIO_PMA_DEVAD,
4826 MDIO_PMA_REG_ROM_VER2, 4906 MDIO_PMA_REG_ROM_VER2,
@@ -4829,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4829 4909
4830 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 4910 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
4831 4911
4832 /* Changing to LRM mode takes quite few seconds. 4912 /*
4833 So do it only if current mode is limiting 4913 * Changing to LRM mode takes quite few seconds. So do it only
4834 ( default is LRM )*/ 4914 * if current mode is limiting (default is LRM)
4915 */
4835 if (cur_limiting_mode != EDC_MODE_LIMITING) 4916 if (cur_limiting_mode != EDC_MODE_LIMITING)
4836 return 0; 4917 return 0;
4837 4918
4838 bnx2x_cl45_write(bp, phy, 4919 bnx2x_cl45_write(bp, phy,
4839 MDIO_PMA_DEVAD, 4920 MDIO_PMA_DEVAD,
4840 MDIO_PMA_REG_LRM_MODE, 4921 MDIO_PMA_REG_LRM_MODE,
4841 0); 4922 0);
4842 bnx2x_cl45_write(bp, phy, 4923 bnx2x_cl45_write(bp, phy,
4843 MDIO_PMA_DEVAD, 4924 MDIO_PMA_DEVAD,
4844 MDIO_PMA_REG_ROM_VER2, 4925 MDIO_PMA_REG_ROM_VER2,
4845 0x128); 4926 0x128);
4846 bnx2x_cl45_write(bp, phy, 4927 bnx2x_cl45_write(bp, phy,
4847 MDIO_PMA_DEVAD, 4928 MDIO_PMA_DEVAD,
4848 MDIO_PMA_REG_MISC_CTRL0, 4929 MDIO_PMA_REG_MISC_CTRL0,
4849 0x4008); 4930 0x4008);
4850 bnx2x_cl45_write(bp, phy, 4931 bnx2x_cl45_write(bp, phy,
4851 MDIO_PMA_DEVAD, 4932 MDIO_PMA_DEVAD,
4852 MDIO_PMA_REG_LRM_MODE, 4933 MDIO_PMA_REG_LRM_MODE,
4853 0xaaaa); 4934 0xaaaa);
4854 } 4935 }
4855 return 0; 4936 return 0;
4856} 4937}
4857 4938
4858static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, 4939static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4859 struct bnx2x_phy *phy, 4940 struct bnx2x_phy *phy,
4860 u16 edc_mode) 4941 u16 edc_mode)
4861{ 4942{
4862 u16 phy_identifier; 4943 u16 phy_identifier;
4863 u16 rom_ver2_val; 4944 u16 rom_ver2_val;
4864 bnx2x_cl45_read(bp, phy, 4945 bnx2x_cl45_read(bp, phy,
4865 MDIO_PMA_DEVAD, 4946 MDIO_PMA_DEVAD,
4866 MDIO_PMA_REG_PHY_IDENTIFIER, 4947 MDIO_PMA_REG_PHY_IDENTIFIER,
4867 &phy_identifier); 4948 &phy_identifier);
4868 4949
4869 bnx2x_cl45_write(bp, phy, 4950 bnx2x_cl45_write(bp, phy,
4870 MDIO_PMA_DEVAD, 4951 MDIO_PMA_DEVAD,
4871 MDIO_PMA_REG_PHY_IDENTIFIER, 4952 MDIO_PMA_REG_PHY_IDENTIFIER,
4872 (phy_identifier & ~(1<<9))); 4953 (phy_identifier & ~(1<<9)));
4873 4954
4874 bnx2x_cl45_read(bp, phy, 4955 bnx2x_cl45_read(bp, phy,
4875 MDIO_PMA_DEVAD, 4956 MDIO_PMA_DEVAD,
4876 MDIO_PMA_REG_ROM_VER2, 4957 MDIO_PMA_REG_ROM_VER2,
4877 &rom_ver2_val); 4958 &rom_ver2_val);
4878 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 4959 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
4879 bnx2x_cl45_write(bp, phy, 4960 bnx2x_cl45_write(bp, phy,
4880 MDIO_PMA_DEVAD, 4961 MDIO_PMA_DEVAD,
4881 MDIO_PMA_REG_ROM_VER2, 4962 MDIO_PMA_REG_ROM_VER2,
4882 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 4963 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
4883 4964
4884 bnx2x_cl45_write(bp, phy, 4965 bnx2x_cl45_write(bp, phy,
4885 MDIO_PMA_DEVAD, 4966 MDIO_PMA_DEVAD,
4886 MDIO_PMA_REG_PHY_IDENTIFIER, 4967 MDIO_PMA_REG_PHY_IDENTIFIER,
4887 (phy_identifier | (1<<9))); 4968 (phy_identifier | (1<<9)));
4888 4969
4889 return 0; 4970 return 0;
4890} 4971}
@@ -4897,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4897 4978
4898 switch (action) { 4979 switch (action) {
4899 case DISABLE_TX: 4980 case DISABLE_TX:
4900 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 4981 bnx2x_sfp_set_transmitter(params, phy, 0);
4901 break; 4982 break;
4902 case ENABLE_TX: 4983 case ENABLE_TX:
4903 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 4984 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
4904 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 4985 bnx2x_sfp_set_transmitter(params, phy, 1);
4905 break; 4986 break;
4906 default: 4987 default:
4907 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 4988 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4910,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4910 } 4991 }
4911} 4992}
4912 4993
4994static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
4995 u8 gpio_mode)
4996{
4997 struct bnx2x *bp = params->bp;
4998
4999 u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
5000 offsetof(struct shmem_region,
5001 dev_info.port_hw_config[params->port].sfp_ctrl)) &
5002 PORT_HW_CFG_FAULT_MODULE_LED_MASK;
5003 switch (fault_led_gpio) {
5004 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
5005 return;
5006 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
5007 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
5008 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
5009 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
5010 {
5011 u8 gpio_port = bnx2x_get_gpio_port(params);
5012 u16 gpio_pin = fault_led_gpio -
5013 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
5014 DP(NETIF_MSG_LINK, "Set fault module-detected led "
5015 "pin %x port %x mode %x\n",
5016 gpio_pin, gpio_port, gpio_mode);
5017 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
5018 }
5019 break;
5020 default:
5021 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
5022 fault_led_gpio);
5023 }
5024}
5025
4913static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, 5026static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4914 struct link_params *params) 5027 struct link_params *params)
4915{ 5028{
@@ -4927,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4927 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { 5040 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
4928 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 5041 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
4929 return -EINVAL; 5042 return -EINVAL;
4930 } else if (bnx2x_verify_sfp_module(phy, params) != 5043 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
4931 0) {
4932 /* check SFP+ module compatibility */ 5044 /* check SFP+ module compatibility */
4933 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 5045 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
4934 rc = -EINVAL; 5046 rc = -EINVAL;
4935 /* Turn on fault module-detected led */ 5047 /* Turn on fault module-detected led */
4936 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5048 bnx2x_set_sfp_module_fault_led(params,
4937 MISC_REGISTERS_GPIO_HIGH, 5049 MISC_REGISTERS_GPIO_HIGH);
4938 params->port); 5050
4939 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 5051 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
4940 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5052 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4941 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 5053 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4946,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4946 } 5058 }
4947 } else { 5059 } else {
4948 /* Turn off fault module-detected led */ 5060 /* Turn off fault module-detected led */
4949 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); 5061 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
4950 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4951 MISC_REGISTERS_GPIO_LOW,
4952 params->port);
4953 } 5062 }
4954 5063
4955 /* power up the SFP module */ 5064 /* power up the SFP module */
4956 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 5065 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
4957 bnx2x_8727_power_module(bp, phy, 1); 5066 bnx2x_8727_power_module(bp, phy, 1);
4958 5067
4959 /* Check and set limiting mode / LRM mode on 8726. 5068 /*
4960 On 8727 it is done automatically */ 5069 * Check and set limiting mode / LRM mode on 8726. On 8727 it
5070 * is done automatically
5071 */
4961 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 5072 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4962 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); 5073 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
4963 else 5074 else
@@ -4969,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4969 if (rc == 0 || 5080 if (rc == 0 ||
4970 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 5081 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
4971 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5082 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4972 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 5083 bnx2x_sfp_set_transmitter(params, phy, 1);
4973 else 5084 else
4974 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5085 bnx2x_sfp_set_transmitter(params, phy, 0);
4975 5086
4976 return rc; 5087 return rc;
4977} 5088}
@@ -4984,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
4984 u8 port = params->port; 5095 u8 port = params->port;
4985 5096
4986 /* Set valid module led off */ 5097 /* Set valid module led off */
4987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5098 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
4988 MISC_REGISTERS_GPIO_HIGH,
4989 params->port);
4990 5099
4991 /* Get current gpio val refelecting module plugged in / out*/ 5100 /* Get current gpio val reflecting module plugged in / out*/
4992 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 5101 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
4993 5102
4994 /* Call the handling function in case module is detected */ 5103 /* Call the handling function in case module is detected */
@@ -5004,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
5004 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 5113 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
5005 } else { 5114 } else {
5006 u32 val = REG_RD(bp, params->shmem_base + 5115 u32 val = REG_RD(bp, params->shmem_base +
5007 offsetof(struct shmem_region, dev_info. 5116 offsetof(struct shmem_region, dev_info.
5008 port_feature_config[params->port]. 5117 port_feature_config[params->port].
5009 config)); 5118 config));
5010 5119
5011 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 5120 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
5012 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 5121 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
5013 port); 5122 port);
5014 /* Module was plugged out. */ 5123 /*
5015 /* Disable transmit for this module */ 5124 * Module was plugged out.
5125 * Disable transmit for this module
5126 */
5016 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5127 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5017 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5128 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5018 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5129 bnx2x_sfp_set_transmitter(params, phy, 0);
5019 } 5130 }
5020} 5131}
5021 5132
@@ -5051,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5051 5162
5052 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 5163 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
5053 " link_status 0x%x\n", rx_sd, pcs_status, val2); 5164 " link_status 0x%x\n", rx_sd, pcs_status, val2);
5054 /* link is up if both bit 0 of pmd_rx_sd and 5165 /*
5055 * bit 0 of pcs_status are set, or if the autoneg bit 5166 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
5056 * 1 is set 5167 * are set, or if the autoneg bit 1 is set
5057 */ 5168 */
5058 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 5169 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
5059 if (link_up) { 5170 if (link_up) {
@@ -5062,6 +5173,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5062 else 5173 else
5063 vars->line_speed = SPEED_10000; 5174 vars->line_speed = SPEED_10000;
5064 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5175 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5176 vars->duplex = DUPLEX_FULL;
5065 } 5177 }
5066 return link_up; 5178 return link_up;
5067} 5179}
@@ -5073,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5073 struct link_params *params, 5185 struct link_params *params,
5074 struct link_vars *vars) 5186 struct link_vars *vars)
5075{ 5187{
5076 u16 cnt, val; 5188 u32 tx_en_mode;
5189 u16 cnt, val, tmp1;
5077 struct bnx2x *bp = params->bp; 5190 struct bnx2x *bp = params->bp;
5078 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 5191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5079 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 5192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5080 /* HW reset */ 5193 /* HW reset */
5081 bnx2x_ext_phy_hw_reset(bp, params->port); 5194 bnx2x_ext_phy_hw_reset(bp, params->port);
5082 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 5195 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
5083 bnx2x_wait_reset_complete(bp, phy); 5196 bnx2x_wait_reset_complete(bp, phy, params);
5084 5197
5085 /* Wait until fw is loaded */ 5198 /* Wait until fw is loaded */
5086 for (cnt = 0; cnt < 100; cnt++) { 5199 for (cnt = 0; cnt < 100; cnt++) {
@@ -5147,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5147 0x0004); 5260 0x0004);
5148 } 5261 }
5149 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5262 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
5263
5264 /*
5265 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5266 * power mode, if TX Laser is disabled
5267 */
5268
5269 tx_en_mode = REG_RD(bp, params->shmem_base +
5270 offsetof(struct shmem_region,
5271 dev_info.port_hw_config[params->port].sfp_ctrl))
5272 & PORT_HW_CFG_TX_LASER_MASK;
5273
5274 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5275 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5276 bnx2x_cl45_read(bp, phy,
5277 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
5278 tmp1 |= 0x1;
5279 bnx2x_cl45_write(bp, phy,
5280 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
5281 }
5282
5150 return 0; 5283 return 0;
5151} 5284}
5152 5285
@@ -5181,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
5181 5314
5182 /* Set soft reset */ 5315 /* Set soft reset */
5183 bnx2x_cl45_write(bp, phy, 5316 bnx2x_cl45_write(bp, phy,
5184 MDIO_PMA_DEVAD, 5317 MDIO_PMA_DEVAD,
5185 MDIO_PMA_REG_GEN_CTRL, 5318 MDIO_PMA_REG_GEN_CTRL,
5186 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 5319 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
5187 5320
5188 bnx2x_cl45_write(bp, phy, 5321 bnx2x_cl45_write(bp, phy,
5189 MDIO_PMA_DEVAD, 5322 MDIO_PMA_DEVAD,
5190 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 5323 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
5191 5324
5192 bnx2x_cl45_write(bp, phy, 5325 bnx2x_cl45_write(bp, phy,
5193 MDIO_PMA_DEVAD, 5326 MDIO_PMA_DEVAD,
5194 MDIO_PMA_REG_GEN_CTRL, 5327 MDIO_PMA_REG_GEN_CTRL,
5195 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 5328 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
5196 5329
5197 /* wait for 150ms for microcode load */ 5330 /* wait for 150ms for microcode load */
5198 msleep(150); 5331 msleep(150);
5199 5332
5200 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 5333 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
5201 bnx2x_cl45_write(bp, phy, 5334 bnx2x_cl45_write(bp, phy,
5202 MDIO_PMA_DEVAD, 5335 MDIO_PMA_DEVAD,
5203 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 5336 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
5204 5337
5205 msleep(200); 5338 msleep(200);
5206 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5339 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5235,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5235 u32 val; 5368 u32 val;
5236 u32 swap_val, swap_override, aeu_gpio_mask, offset; 5369 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5237 DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); 5370 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
5238 /* Restore normal power mode*/
5239 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5240 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5241
5242 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5243 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5244 5371
5245 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5372 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5246 bnx2x_wait_reset_complete(bp, phy); 5373 bnx2x_wait_reset_complete(bp, phy, params);
5247 5374
5248 bnx2x_8726_external_rom_boot(phy, params); 5375 bnx2x_8726_external_rom_boot(phy, params);
5249 5376
5250 /* Need to call module detected on initialization since 5377 /*
5251 the module detection triggered by actual module 5378 * Need to call module detected on initialization since the module
5252 insertion might occur before driver is loaded, and when 5379 * detection triggered by actual module insertion might occur before
5253 driver is loaded, it reset all registers, including the 5380 * driver is loaded, and when driver is loaded, it reset all
5254 transmitter */ 5381 * registers, including the transmitter
5382 */
5255 bnx2x_sfp_module_detection(phy, params); 5383 bnx2x_sfp_module_detection(phy, params);
5256 5384
5257 if (phy->req_line_speed == SPEED_1000) { 5385 if (phy->req_line_speed == SPEED_1000) {
@@ -5284,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5284 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 5412 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
5285 bnx2x_cl45_write(bp, phy, 5413 bnx2x_cl45_write(bp, phy,
5286 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 5414 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
5287 /* Enable RX-ALARM control to receive 5415 /*
5288 interrupt for 1G speed change */ 5416 * Enable RX-ALARM control to receive interrupt for 1G speed
5417 * change
5418 */
5289 bnx2x_cl45_write(bp, phy, 5419 bnx2x_cl45_write(bp, phy,
5290 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); 5420 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
5291 bnx2x_cl45_write(bp, phy, 5421 bnx2x_cl45_write(bp, phy,
@@ -5317,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5317 5447
5318 /* Set GPIO3 to trigger SFP+ module insertion/removal */ 5448 /* Set GPIO3 to trigger SFP+ module insertion/removal */
5319 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5449 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5320 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); 5450 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
5321 5451
5322 /* The GPIO should be swapped if the swap register is set and active */ 5452 /* The GPIO should be swapped if the swap register is set and active */
5323 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 5453 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5408,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5408 struct link_params *params) { 5538 struct link_params *params) {
5409 u32 swap_val, swap_override; 5539 u32 swap_val, swap_override;
5410 u8 port; 5540 u8 port;
5411 /** 5541 /*
5412 * The PHY reset is controlled by GPIO 1. Fake the port number 5542 * The PHY reset is controlled by GPIO 1. Fake the port number
5413 * to cancel the swap done in set_gpio() 5543 * to cancel the swap done in set_gpio()
5414 */ 5544 */
@@ -5417,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5417 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 5547 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5418 port = (swap_val && swap_override) ^ 1; 5548 port = (swap_val && swap_override) ^ 1;
5419 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 5549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5420 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 5550 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
5421} 5551}
5422 5552
5423static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, 5553static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5424 struct link_params *params, 5554 struct link_params *params,
5425 struct link_vars *vars) 5555 struct link_vars *vars)
5426{ 5556{
5427 u16 tmp1, val, mod_abs; 5557 u32 tx_en_mode;
5558 u16 tmp1, val, mod_abs, tmp2;
5428 u16 rx_alarm_ctrl_val; 5559 u16 rx_alarm_ctrl_val;
5429 u16 lasi_ctrl_val; 5560 u16 lasi_ctrl_val;
5430 struct bnx2x *bp = params->bp; 5561 struct bnx2x *bp = params->bp;
5431 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 5562 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
5432 5563
5433 bnx2x_wait_reset_complete(bp, phy); 5564 bnx2x_wait_reset_complete(bp, phy, params);
5434 rx_alarm_ctrl_val = (1<<2) | (1<<5) ; 5565 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
5435 lasi_ctrl_val = 0x0004; 5566 lasi_ctrl_val = 0x0004;
5436 5567
@@ -5443,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5443 bnx2x_cl45_write(bp, phy, 5574 bnx2x_cl45_write(bp, phy,
5444 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); 5575 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
5445 5576
5446 /* Initially configure MOD_ABS to interrupt when 5577 /*
5447 module is presence( bit 8) */ 5578 * Initially configure MOD_ABS to interrupt when module is
5579 * presence( bit 8)
5580 */
5448 bnx2x_cl45_read(bp, phy, 5581 bnx2x_cl45_read(bp, phy,
5449 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5582 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5450 /* Set EDC off by setting OPTXLOS signal input to low 5583 /*
5451 (bit 9). 5584 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
5452 When the EDC is off it locks onto a reference clock and 5585 * When the EDC is off it locks onto a reference clock and avoids
5453 avoids becoming 'lost'.*/ 5586 * becoming 'lost'
5587 */
5454 mod_abs &= ~(1<<8); 5588 mod_abs &= ~(1<<8);
5455 if (!(phy->flags & FLAGS_NOC)) 5589 if (!(phy->flags & FLAGS_NOC))
5456 mod_abs &= ~(1<<9); 5590 mod_abs &= ~(1<<9);
@@ -5465,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5465 if (phy->flags & FLAGS_NOC) 5599 if (phy->flags & FLAGS_NOC)
5466 val |= (3<<5); 5600 val |= (3<<5);
5467 5601
5468 /** 5602 /*
5469 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 5603 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
5470 * status which reflect SFP+ module over-current 5604 * status which reflect SFP+ module over-current
5471 */ 5605 */
@@ -5492,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5492 bnx2x_cl45_read(bp, phy, 5626 bnx2x_cl45_read(bp, phy,
5493 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 5627 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
5494 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 5628 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
5495 /** 5629 /*
5496 * Power down the XAUI until link is up in case of dual-media 5630 * Power down the XAUI until link is up in case of dual-media
5497 * and 1G 5631 * and 1G
5498 */ 5632 */
@@ -5518,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5518 bnx2x_cl45_write(bp, phy, 5652 bnx2x_cl45_write(bp, phy,
5519 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 5653 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
5520 } else { 5654 } else {
5521 /** 5655 /*
5522 * Since the 8727 has only single reset pin, need to set the 10G 5656 * Since the 8727 has only single reset pin, need to set the 10G
5523 * registers although it is default 5657 * registers although it is default
5524 */ 5658 */
@@ -5534,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5534 0x0008); 5668 0x0008);
5535 } 5669 }
5536 5670
5537 /* Set 2-wire transfer rate of SFP+ module EEPROM 5671 /*
5672 * Set 2-wire transfer rate of SFP+ module EEPROM
5538 * to 100Khz since some DACs(direct attached cables) do 5673 * to 100Khz since some DACs(direct attached cables) do
5539 * not work at 400Khz. 5674 * not work at 400Khz.
5540 */ 5675 */
@@ -5557,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5557 phy->tx_preemphasis[1]); 5692 phy->tx_preemphasis[1]);
5558 } 5693 }
5559 5694
5695 /*
5696 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5697 * power mode, if TX Laser is disabled
5698 */
5699 tx_en_mode = REG_RD(bp, params->shmem_base +
5700 offsetof(struct shmem_region,
5701 dev_info.port_hw_config[params->port].sfp_ctrl))
5702 & PORT_HW_CFG_TX_LASER_MASK;
5703
5704 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5705
5706 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5707 bnx2x_cl45_read(bp, phy,
5708 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
5709 tmp2 |= 0x1000;
5710 tmp2 &= 0xFFEF;
5711 bnx2x_cl45_write(bp, phy,
5712 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
5713 }
5714
5560 return 0; 5715 return 0;
5561} 5716}
5562 5717
@@ -5570,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5570 port_feature_config[params->port]. 5725 port_feature_config[params->port].
5571 config)); 5726 config));
5572 bnx2x_cl45_read(bp, phy, 5727 bnx2x_cl45_read(bp, phy,
5573 MDIO_PMA_DEVAD, 5728 MDIO_PMA_DEVAD,
5574 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5729 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5575 if (mod_abs & (1<<8)) { 5730 if (mod_abs & (1<<8)) {
5576 5731
5577 /* Module is absent */ 5732 /* Module is absent */
5578 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5733 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5579 "show module is absent\n"); 5734 "show module is absent\n");
5580 5735
5581 /* 1. Set mod_abs to detect next module 5736 /*
5582 presence event 5737 * 1. Set mod_abs to detect next module
5583 2. Set EDC off by setting OPTXLOS signal input to low 5738 * presence event
5584 (bit 9). 5739 * 2. Set EDC off by setting OPTXLOS signal input to low
5585 When the EDC is off it locks onto a reference clock and 5740 * (bit 9).
5586 avoids becoming 'lost'.*/ 5741 * When the EDC is off it locks onto a reference clock and
5742 * avoids becoming 'lost'.
5743 */
5587 mod_abs &= ~(1<<8); 5744 mod_abs &= ~(1<<8);
5588 if (!(phy->flags & FLAGS_NOC)) 5745 if (!(phy->flags & FLAGS_NOC))
5589 mod_abs &= ~(1<<9); 5746 mod_abs &= ~(1<<9);
5590 bnx2x_cl45_write(bp, phy, 5747 bnx2x_cl45_write(bp, phy,
5591 MDIO_PMA_DEVAD, 5748 MDIO_PMA_DEVAD,
5592 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5749 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5593 5750
5594 /* Clear RX alarm since it stays up as long as 5751 /*
5595 the mod_abs wasn't changed */ 5752 * Clear RX alarm since it stays up as long as
5753 * the mod_abs wasn't changed
5754 */
5596 bnx2x_cl45_read(bp, phy, 5755 bnx2x_cl45_read(bp, phy,
5597 MDIO_PMA_DEVAD, 5756 MDIO_PMA_DEVAD,
5598 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5757 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
5599 5758
5600 } else { 5759 } else {
5601 /* Module is present */ 5760 /* Module is present */
5602 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5761 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5603 "show module is present\n"); 5762 "show module is present\n");
5604 /* First thing, disable transmitter, 5763 /*
5605 and if the module is ok, the 5764 * First disable transmitter, and if the module is ok, the
5606 module_detection will enable it*/ 5765 * module_detection will enable it
5607 5766 * 1. Set mod_abs to detect next module absent event ( bit 8)
5608 /* 1. Set mod_abs to detect next module 5767 * 2. Restore the default polarity of the OPRXLOS signal and
5609 absent event ( bit 8) 5768 * this signal will then correctly indicate the presence or
5610 2. Restore the default polarity of the OPRXLOS signal and 5769 * absence of the Rx signal. (bit 9)
5611 this signal will then correctly indicate the presence or 5770 */
5612 absence of the Rx signal. (bit 9) */
5613 mod_abs |= (1<<8); 5771 mod_abs |= (1<<8);
5614 if (!(phy->flags & FLAGS_NOC)) 5772 if (!(phy->flags & FLAGS_NOC))
5615 mod_abs |= (1<<9); 5773 mod_abs |= (1<<9);
@@ -5617,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5617 MDIO_PMA_DEVAD, 5775 MDIO_PMA_DEVAD,
5618 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5776 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5619 5777
5620 /* Clear RX alarm since it stays up as long as 5778 /*
5621 the mod_abs wasn't changed. This is need to be done 5779 * Clear RX alarm since it stays up as long as the mod_abs
5622 before calling the module detection, otherwise it will clear 5780 * wasn't changed. This is need to be done before calling the
5623 the link update alarm */ 5781 * module detection, otherwise it will clear* the link update
5782 * alarm
5783 */
5624 bnx2x_cl45_read(bp, phy, 5784 bnx2x_cl45_read(bp, phy,
5625 MDIO_PMA_DEVAD, 5785 MDIO_PMA_DEVAD,
5626 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5786 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5628,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5628 5788
5629 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5789 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5630 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5790 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5631 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5791 bnx2x_sfp_set_transmitter(params, phy, 0);
5632 5792
5633 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 5793 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
5634 bnx2x_sfp_module_detection(phy, params); 5794 bnx2x_sfp_module_detection(phy, params);
@@ -5637,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5637 } 5797 }
5638 5798
5639 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 5799 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
5640 rx_alarm_status); 5800 rx_alarm_status);
5641 /* No need to check link status in case of 5801 /* No need to check link status in case of module plugged in/out */
5642 module plugged in/out */
5643} 5802}
5644 5803
5645static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, 5804static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5675,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5675 bnx2x_cl45_read(bp, phy, 5834 bnx2x_cl45_read(bp, phy,
5676 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 5835 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
5677 5836
5678 /** 5837 /*
5679 * If a module is present and there is need to check 5838 * If a module is present and there is need to check
5680 * for over current 5839 * for over current
5681 */ 5840 */
@@ -5695,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5695 " Please remove the SFP+ module and" 5854 " Please remove the SFP+ module and"
5696 " restart the system to clear this" 5855 " restart the system to clear this"
5697 " error.\n", 5856 " error.\n",
5698 params->port); 5857 params->port);
5699 5858 /* Disable all RX_ALARMs except for mod_abs */
5700 /*
5701 * Disable all RX_ALARMs except for
5702 * mod_abs
5703 */
5704 bnx2x_cl45_write(bp, phy, 5859 bnx2x_cl45_write(bp, phy,
5705 MDIO_PMA_DEVAD, 5860 MDIO_PMA_DEVAD,
5706 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); 5861 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5743,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5743 MDIO_PMA_DEVAD, 5898 MDIO_PMA_DEVAD,
5744 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 5899 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
5745 5900
5746 /* Bits 0..2 --> speed detected, 5901 /*
5747 bits 13..15--> link is down */ 5902 * Bits 0..2 --> speed detected,
5903 * Bits 13..15--> link is down
5904 */
5748 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 5905 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
5749 link_up = 1; 5906 link_up = 1;
5750 vars->line_speed = SPEED_10000; 5907 vars->line_speed = SPEED_10000;
5908 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
5909 params->port);
5751 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { 5910 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
5752 link_up = 1; 5911 link_up = 1;
5753 vars->line_speed = SPEED_1000; 5912 vars->line_speed = SPEED_1000;
@@ -5758,15 +5917,18 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5758 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 5917 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
5759 params->port); 5918 params->port);
5760 } 5919 }
5761 if (link_up) 5920 if (link_up) {
5762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5921 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5922 vars->duplex = DUPLEX_FULL;
5923 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
5924 }
5763 5925
5764 if ((DUAL_MEDIA(params)) && 5926 if ((DUAL_MEDIA(params)) &&
5765 (phy->req_line_speed == SPEED_1000)) { 5927 (phy->req_line_speed == SPEED_1000)) {
5766 bnx2x_cl45_read(bp, phy, 5928 bnx2x_cl45_read(bp, phy,
5767 MDIO_PMA_DEVAD, 5929 MDIO_PMA_DEVAD,
5768 MDIO_PMA_REG_8727_PCS_GP, &val1); 5930 MDIO_PMA_REG_8727_PCS_GP, &val1);
5769 /** 5931 /*
5770 * In case of dual-media board and 1G, power up the XAUI side, 5932 * In case of dual-media board and 1G, power up the XAUI side,
5771 * otherwise power it down. For 10G it is done automatically 5933 * otherwise power it down. For 10G it is done automatically
5772 */ 5934 */
@@ -5786,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5786{ 5948{
5787 struct bnx2x *bp = params->bp; 5949 struct bnx2x *bp = params->bp;
5788 /* Disable Transmitter */ 5950 /* Disable Transmitter */
5789 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5951 bnx2x_sfp_set_transmitter(params, phy, 0);
5790 /* Clear LASI */ 5952 /* Clear LASI */
5791 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); 5953 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
5792 5954
@@ -5798,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5798static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, 5960static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5799 struct link_params *params) 5961 struct link_params *params)
5800{ 5962{
5801 u16 val, fw_ver1, fw_ver2, cnt; 5963 u16 val, fw_ver1, fw_ver2, cnt, adj;
5802 struct bnx2x *bp = params->bp; 5964 struct bnx2x *bp = params->bp;
5803 5965
5966 adj = 0;
5967 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5968 adj = -1;
5969
5804 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ 5970 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5805 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 5971 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5806 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); 5972 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
5807 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5973 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5808 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); 5974 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
5809 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); 5975 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
5810 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); 5976 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
5811 5977
5812 for (cnt = 0; cnt < 100; cnt++) { 5978 for (cnt = 0; cnt < 100; cnt++) {
5813 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5979 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5814 if (val & 1) 5980 if (val & 1)
5815 break; 5981 break;
5816 udelay(5); 5982 udelay(5);
@@ -5824,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5824 5990
5825 5991
5826 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 5992 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5827 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); 5993 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
5828 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5994 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5829 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); 5995 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
5830 for (cnt = 0; cnt < 100; cnt++) { 5996 for (cnt = 0; cnt < 100; cnt++) {
5831 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5997 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5832 if (val & 1) 5998 if (val & 1)
5833 break; 5999 break;
5834 udelay(5); 6000 udelay(5);
@@ -5841,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5841 } 6007 }
5842 6008
5843 /* lower 16 bits of the register SPI_FW_STATUS */ 6009 /* lower 16 bits of the register SPI_FW_STATUS */
5844 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); 6010 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
5845 /* upper 16 bits of register SPI_FW_STATUS */ 6011 /* upper 16 bits of register SPI_FW_STATUS */
5846 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); 6012 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
5847 6013
5848 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, 6014 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
5849 phy->ver_addr); 6015 phy->ver_addr);
@@ -5852,33 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5852static void bnx2x_848xx_set_led(struct bnx2x *bp, 6018static void bnx2x_848xx_set_led(struct bnx2x *bp,
5853 struct bnx2x_phy *phy) 6019 struct bnx2x_phy *phy)
5854{ 6020{
5855 u16 val; 6021 u16 val, adj;
6022
6023 adj = 0;
6024 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6025 adj = -1;
5856 6026
5857 /* PHYC_CTL_LED_CTL */ 6027 /* PHYC_CTL_LED_CTL */
5858 bnx2x_cl45_read(bp, phy, 6028 bnx2x_cl45_read(bp, phy,
5859 MDIO_PMA_DEVAD, 6029 MDIO_PMA_DEVAD,
5860 MDIO_PMA_REG_8481_LINK_SIGNAL, &val); 6030 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
5861 val &= 0xFE00; 6031 val &= 0xFE00;
5862 val |= 0x0092; 6032 val |= 0x0092;
5863 6033
5864 bnx2x_cl45_write(bp, phy, 6034 bnx2x_cl45_write(bp, phy,
5865 MDIO_PMA_DEVAD, 6035 MDIO_PMA_DEVAD,
5866 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 6036 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
5867 6037
5868 bnx2x_cl45_write(bp, phy, 6038 bnx2x_cl45_write(bp, phy,
5869 MDIO_PMA_DEVAD, 6039 MDIO_PMA_DEVAD,
5870 MDIO_PMA_REG_8481_LED1_MASK, 6040 MDIO_PMA_REG_8481_LED1_MASK + adj,
5871 0x80); 6041 0x80);
5872 6042
5873 bnx2x_cl45_write(bp, phy, 6043 bnx2x_cl45_write(bp, phy,
5874 MDIO_PMA_DEVAD, 6044 MDIO_PMA_DEVAD,
5875 MDIO_PMA_REG_8481_LED2_MASK, 6045 MDIO_PMA_REG_8481_LED2_MASK + adj,
5876 0x18); 6046 0x18);
5877 6047
6048 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5878 bnx2x_cl45_write(bp, phy, 6049 bnx2x_cl45_write(bp, phy,
5879 MDIO_PMA_DEVAD, 6050 MDIO_PMA_DEVAD,
5880 MDIO_PMA_REG_8481_LED3_MASK, 6051 MDIO_PMA_REG_8481_LED3_MASK + adj,
5881 0x0040); 6052 0x0006);
6053
6054 /* Select the closest activity blink rate to that in 10/100/1000 */
6055 bnx2x_cl45_write(bp, phy,
6056 MDIO_PMA_DEVAD,
6057 MDIO_PMA_REG_8481_LED3_BLINK + adj,
6058 0);
6059
6060 bnx2x_cl45_read(bp, phy,
6061 MDIO_PMA_DEVAD,
6062 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
6063 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
6064
6065 bnx2x_cl45_write(bp, phy,
6066 MDIO_PMA_DEVAD,
6067 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
5882 6068
5883 /* 'Interrupt Mask' */ 6069 /* 'Interrupt Mask' */
5884 bnx2x_cl45_write(bp, phy, 6070 bnx2x_cl45_write(bp, phy,
@@ -5892,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5892{ 6078{
5893 struct bnx2x *bp = params->bp; 6079 struct bnx2x *bp = params->bp;
5894 u16 autoneg_val, an_1000_val, an_10_100_val; 6080 u16 autoneg_val, an_1000_val, an_10_100_val;
5895 6081 /*
6082 * This phy uses the NIG latch mechanism since link indication
6083 * arrives through its LED4 and not via its LASI signal, so we
6084 * get steady signal instead of clear on read
6085 */
5896 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 6086 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5897 1 << NIG_LATCH_BC_ENABLE_MI_INT); 6087 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5898 6088
@@ -6017,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
6017 struct bnx2x *bp = params->bp; 6207 struct bnx2x *bp = params->bp;
6018 /* Restore normal power mode*/ 6208 /* Restore normal power mode*/
6019 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6209 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6020 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6210 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6021 6211
6022 /* HW reset */ 6212 /* HW reset */
6023 bnx2x_ext_phy_hw_reset(bp, params->port); 6213 bnx2x_ext_phy_hw_reset(bp, params->port);
6024 bnx2x_wait_reset_complete(bp, phy); 6214 bnx2x_wait_reset_complete(bp, phy, params);
6025 6215
6026 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 6216 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
6027 return bnx2x_848xx_cmn_config_init(phy, params, vars); 6217 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6033,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6033{ 6223{
6034 struct bnx2x *bp = params->bp; 6224 struct bnx2x *bp = params->bp;
6035 u8 port, initialize = 1; 6225 u8 port, initialize = 1;
6036 u16 val; 6226 u16 val, adj;
6037 u16 temp; 6227 u16 temp;
6038 u32 actual_phy_selection; 6228 u32 actual_phy_selection, cms_enable;
6039 u8 rc = 0; 6229 u8 rc = 0;
6040 6230
6041 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 6231 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
6232 adj = 0;
6233 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6234 adj = 3;
6042 6235
6043 msleep(1); 6236 msleep(1);
6044 if (CHIP_IS_E2(bp)) 6237 if (CHIP_IS_E2(bp))
@@ -6048,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6048 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6241 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6049 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 6242 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
6050 port); 6243 port);
6051 bnx2x_wait_reset_complete(bp, phy); 6244 bnx2x_wait_reset_complete(bp, phy, params);
6052 /* Wait for GPHY to come out of reset */ 6245 /* Wait for GPHY to come out of reset */
6053 msleep(50); 6246 msleep(50);
6054 /* BCM84823 requires that XGXS links up first @ 10G for normal 6247 /*
6055 behavior */ 6248 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
6249 */
6056 temp = vars->line_speed; 6250 temp = vars->line_speed;
6057 vars->line_speed = SPEED_10000; 6251 vars->line_speed = SPEED_10000;
6058 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0); 6252 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6062,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6062 /* Set dual-media configuration according to configuration */ 6256 /* Set dual-media configuration according to configuration */
6063 6257
6064 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 6258 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6065 MDIO_CTL_REG_84823_MEDIA, &val); 6259 MDIO_CTL_REG_84823_MEDIA + adj, &val);
6066 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | 6260 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
6067 MDIO_CTL_REG_84823_MEDIA_LINE_MASK | 6261 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
6068 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | 6262 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6095,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6095 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; 6289 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
6096 6290
6097 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 6291 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6098 MDIO_CTL_REG_84823_MEDIA, val); 6292 MDIO_CTL_REG_84823_MEDIA + adj, val);
6099 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 6293 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
6100 params->multi_phy_config, val); 6294 params->multi_phy_config, val);
6101 6295
@@ -6103,29 +6297,50 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6103 rc = bnx2x_848xx_cmn_config_init(phy, params, vars); 6297 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
6104 else 6298 else
6105 bnx2x_save_848xx_spirom_version(phy, params); 6299 bnx2x_save_848xx_spirom_version(phy, params);
6300 cms_enable = REG_RD(bp, params->shmem_base +
6301 offsetof(struct shmem_region,
6302 dev_info.port_hw_config[params->port].default_cfg)) &
6303 PORT_HW_CFG_ENABLE_CMS_MASK;
6304
6305 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6306 MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
6307 if (cms_enable)
6308 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
6309 else
6310 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
6311 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6312 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
6313
6314
6106 return rc; 6315 return rc;
6107} 6316}
6108 6317
6109static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, 6318static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6110 struct link_params *params, 6319 struct link_params *params,
6111 struct link_vars *vars) 6320 struct link_vars *vars)
6112{ 6321{
6113 struct bnx2x *bp = params->bp; 6322 struct bnx2x *bp = params->bp;
6114 u16 val, val1, val2; 6323 u16 val, val1, val2, adj;
6115 u8 link_up = 0; 6324 u8 link_up = 0;
6116 6325
6326 /* Reg offset adjustment for 84833 */
6327 adj = 0;
6328 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6329 adj = -1;
6330
6117 /* Check 10G-BaseT link status */ 6331 /* Check 10G-BaseT link status */
6118 /* Check PMD signal ok */ 6332 /* Check PMD signal ok */
6119 bnx2x_cl45_read(bp, phy, 6333 bnx2x_cl45_read(bp, phy,
6120 MDIO_AN_DEVAD, 0xFFFA, &val1); 6334 MDIO_AN_DEVAD, 0xFFFA, &val1);
6121 bnx2x_cl45_read(bp, phy, 6335 bnx2x_cl45_read(bp, phy,
6122 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, 6336 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
6123 &val2); 6337 &val2);
6124 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); 6338 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
6125 6339
6126 /* Check link 10G */ 6340 /* Check link 10G */
6127 if (val2 & (1<<11)) { 6341 if (val2 & (1<<11)) {
6128 vars->line_speed = SPEED_10000; 6342 vars->line_speed = SPEED_10000;
6343 vars->duplex = DUPLEX_FULL;
6129 link_up = 1; 6344 link_up = 1;
6130 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6345 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6131 } else { /* Check Legacy speed link */ 6346 } else { /* Check Legacy speed link */
@@ -6203,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
6203 struct link_params *params) 6418 struct link_params *params)
6204{ 6419{
6205 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6420 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6206 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); 6421 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
6207 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6422 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6208 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); 6423 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
6209} 6424}
6210 6425
6211static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, 6426static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6227,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
6227 else 6442 else
6228 port = params->port; 6443 port = params->port;
6229 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6444 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6230 MISC_REGISTERS_GPIO_OUTPUT_LOW, 6445 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6231 port); 6446 port);
6232} 6447}
6233 6448
6234static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 6449static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6283,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6283 6498
6284 /* Set LED masks */ 6499 /* Set LED masks */
6285 bnx2x_cl45_write(bp, phy, 6500 bnx2x_cl45_write(bp, phy,
6286 MDIO_PMA_DEVAD, 6501 MDIO_PMA_DEVAD,
6287 MDIO_PMA_REG_8481_LED1_MASK, 6502 MDIO_PMA_REG_8481_LED1_MASK,
6288 0x0); 6503 0x0);
6289 6504
6290 bnx2x_cl45_write(bp, phy, 6505 bnx2x_cl45_write(bp, phy,
6291 MDIO_PMA_DEVAD, 6506 MDIO_PMA_DEVAD,
6292 MDIO_PMA_REG_8481_LED2_MASK, 6507 MDIO_PMA_REG_8481_LED2_MASK,
6293 0x0); 6508 0x0);
6294 6509
6295 bnx2x_cl45_write(bp, phy, 6510 bnx2x_cl45_write(bp, phy,
6296 MDIO_PMA_DEVAD, 6511 MDIO_PMA_DEVAD,
6297 MDIO_PMA_REG_8481_LED3_MASK, 6512 MDIO_PMA_REG_8481_LED3_MASK,
6298 0x0); 6513 0x0);
6299 6514
6300 bnx2x_cl45_write(bp, phy, 6515 bnx2x_cl45_write(bp, phy,
6301 MDIO_PMA_DEVAD, 6516 MDIO_PMA_DEVAD,
6302 MDIO_PMA_REG_8481_LED5_MASK, 6517 MDIO_PMA_REG_8481_LED5_MASK,
6303 0x20); 6518 0x20);
6304 6519
6305 } else { 6520 } else {
6306 bnx2x_cl45_write(bp, phy, 6521 bnx2x_cl45_write(bp, phy,
@@ -6324,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6324 val |= 0x2492; 6539 val |= 0x2492;
6325 6540
6326 bnx2x_cl45_write(bp, phy, 6541 bnx2x_cl45_write(bp, phy,
6327 MDIO_PMA_DEVAD, 6542 MDIO_PMA_DEVAD,
6328 MDIO_PMA_REG_8481_LINK_SIGNAL, 6543 MDIO_PMA_REG_8481_LINK_SIGNAL,
6329 val); 6544 val);
6330 6545
6331 /* Set LED masks */ 6546 /* Set LED masks */
6332 bnx2x_cl45_write(bp, phy, 6547 bnx2x_cl45_write(bp, phy,
6333 MDIO_PMA_DEVAD, 6548 MDIO_PMA_DEVAD,
6334 MDIO_PMA_REG_8481_LED1_MASK, 6549 MDIO_PMA_REG_8481_LED1_MASK,
6335 0x0); 6550 0x0);
6336 6551
6337 bnx2x_cl45_write(bp, phy, 6552 bnx2x_cl45_write(bp, phy,
6338 MDIO_PMA_DEVAD, 6553 MDIO_PMA_DEVAD,
6339 MDIO_PMA_REG_8481_LED2_MASK, 6554 MDIO_PMA_REG_8481_LED2_MASK,
6340 0x20); 6555 0x20);
6341 6556
6342 bnx2x_cl45_write(bp, phy, 6557 bnx2x_cl45_write(bp, phy,
6343 MDIO_PMA_DEVAD, 6558 MDIO_PMA_DEVAD,
6344 MDIO_PMA_REG_8481_LED3_MASK, 6559 MDIO_PMA_REG_8481_LED3_MASK,
6345 0x20); 6560 0x20);
6346 6561
6347 bnx2x_cl45_write(bp, phy, 6562 bnx2x_cl45_write(bp, phy,
6348 MDIO_PMA_DEVAD, 6563 MDIO_PMA_DEVAD,
6349 MDIO_PMA_REG_8481_LED5_MASK, 6564 MDIO_PMA_REG_8481_LED5_MASK,
6350 0x0); 6565 0x0);
6351 } else { 6566 } else {
6352 bnx2x_cl45_write(bp, phy, 6567 bnx2x_cl45_write(bp, phy,
6353 MDIO_PMA_DEVAD, 6568 MDIO_PMA_DEVAD,
6354 MDIO_PMA_REG_8481_LED1_MASK, 6569 MDIO_PMA_REG_8481_LED1_MASK,
6355 0x20); 6570 0x20);
6356 } 6571 }
6357 break; 6572 break;
6358 6573
@@ -6370,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6370 &val); 6585 &val);
6371 6586
6372 if (!((val & 6587 if (!((val &
6373 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) 6588 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
6374 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){ 6589 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
6375 DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n"); 6590 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
6376 bnx2x_cl45_write(bp, phy, 6591 bnx2x_cl45_write(bp, phy,
6377 MDIO_PMA_DEVAD, 6592 MDIO_PMA_DEVAD,
6378 MDIO_PMA_REG_8481_LINK_SIGNAL, 6593 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6381,30 +6596,42 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6381 6596
6382 /* Set LED masks */ 6597 /* Set LED masks */
6383 bnx2x_cl45_write(bp, phy, 6598 bnx2x_cl45_write(bp, phy,
6384 MDIO_PMA_DEVAD, 6599 MDIO_PMA_DEVAD,
6385 MDIO_PMA_REG_8481_LED1_MASK, 6600 MDIO_PMA_REG_8481_LED1_MASK,
6386 0x10); 6601 0x10);
6387 6602
6388 bnx2x_cl45_write(bp, phy, 6603 bnx2x_cl45_write(bp, phy,
6389 MDIO_PMA_DEVAD, 6604 MDIO_PMA_DEVAD,
6390 MDIO_PMA_REG_8481_LED2_MASK, 6605 MDIO_PMA_REG_8481_LED2_MASK,
6391 0x80); 6606 0x80);
6392 6607
6393 bnx2x_cl45_write(bp, phy, 6608 bnx2x_cl45_write(bp, phy,
6394 MDIO_PMA_DEVAD, 6609 MDIO_PMA_DEVAD,
6395 MDIO_PMA_REG_8481_LED3_MASK, 6610 MDIO_PMA_REG_8481_LED3_MASK,
6396 0x98); 6611 0x98);
6397 6612
6398 bnx2x_cl45_write(bp, phy, 6613 bnx2x_cl45_write(bp, phy,
6399 MDIO_PMA_DEVAD, 6614 MDIO_PMA_DEVAD,
6400 MDIO_PMA_REG_8481_LED5_MASK, 6615 MDIO_PMA_REG_8481_LED5_MASK,
6401 0x40); 6616 0x40);
6402 6617
6403 } else { 6618 } else {
6404 bnx2x_cl45_write(bp, phy, 6619 bnx2x_cl45_write(bp, phy,
6405 MDIO_PMA_DEVAD, 6620 MDIO_PMA_DEVAD,
6406 MDIO_PMA_REG_8481_LED1_MASK, 6621 MDIO_PMA_REG_8481_LED1_MASK,
6407 0x80); 6622 0x80);
6623
6624 /* Tell LED3 to blink on source */
6625 bnx2x_cl45_read(bp, phy,
6626 MDIO_PMA_DEVAD,
6627 MDIO_PMA_REG_8481_LINK_SIGNAL,
6628 &val);
6629 val &= ~(7<<6);
6630 val |= (1<<6); /* A83B[8:6]= 1 */
6631 bnx2x_cl45_write(bp, phy,
6632 MDIO_PMA_DEVAD,
6633 MDIO_PMA_REG_8481_LINK_SIGNAL,
6634 val);
6408 } 6635 }
6409 break; 6636 break;
6410 } 6637 }
@@ -6431,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
6431 6658
6432 /* Restore normal power mode*/ 6659 /* Restore normal power mode*/
6433 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6434 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6661 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6435 /* HW reset */ 6662 /* HW reset */
6436 bnx2x_ext_phy_hw_reset(bp, params->port); 6663 bnx2x_ext_phy_hw_reset(bp, params->port);
6437 bnx2x_wait_reset_complete(bp, phy); 6664 bnx2x_wait_reset_complete(bp, phy, params);
6438 6665
6439 bnx2x_cl45_write(bp, phy, 6666 bnx2x_cl45_write(bp, phy,
6440 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); 6667 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6481,14 +6708,13 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6481 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 6708 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
6482 val2, val1); 6709 val2, val1);
6483 link_up = ((val1 & 4) == 4); 6710 link_up = ((val1 & 4) == 4);
6484 /* if link is up 6711 /* if link is up print the AN outcome of the SFX7101 PHY */
6485 * print the AN outcome of the SFX7101 PHY
6486 */
6487 if (link_up) { 6712 if (link_up) {
6488 bnx2x_cl45_read(bp, phy, 6713 bnx2x_cl45_read(bp, phy,
6489 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6714 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
6490 &val2); 6715 &val2);
6491 vars->line_speed = SPEED_10000; 6716 vars->line_speed = SPEED_10000;
6717 vars->duplex = DUPLEX_FULL;
6492 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", 6718 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
6493 val2, (val2 & (1<<14))); 6719 val2, (val2 & (1<<14)));
6494 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6720 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -6516,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
6516 u16 val, cnt; 6742 u16 val, cnt;
6517 6743
6518 bnx2x_cl45_read(bp, phy, 6744 bnx2x_cl45_read(bp, phy,
6519 MDIO_PMA_DEVAD, 6745 MDIO_PMA_DEVAD,
6520 MDIO_PMA_REG_7101_RESET, &val); 6746 MDIO_PMA_REG_7101_RESET, &val);
6521 6747
6522 for (cnt = 0; cnt < 10; cnt++) { 6748 for (cnt = 0; cnt < 10; cnt++) {
6523 msleep(50); 6749 msleep(50);
6524 /* Writes a self-clearing reset */ 6750 /* Writes a self-clearing reset */
6525 bnx2x_cl45_write(bp, phy, 6751 bnx2x_cl45_write(bp, phy,
6526 MDIO_PMA_DEVAD, 6752 MDIO_PMA_DEVAD,
6527 MDIO_PMA_REG_7101_RESET, 6753 MDIO_PMA_REG_7101_RESET,
6528 (val | (1<<15))); 6754 (val | (1<<15)));
6529 /* Wait for clear */ 6755 /* Wait for clear */
6530 bnx2x_cl45_read(bp, phy, 6756 bnx2x_cl45_read(bp, phy,
6531 MDIO_PMA_DEVAD, 6757 MDIO_PMA_DEVAD,
6532 MDIO_PMA_REG_7101_RESET, &val); 6758 MDIO_PMA_REG_7101_RESET, &val);
6533 6759
6534 if ((val & (1<<15)) == 0) 6760 if ((val & (1<<15)) == 0)
6535 break; 6761 break;
@@ -6540,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
6540 struct link_params *params) { 6766 struct link_params *params) {
6541 /* Low power mode is controlled by GPIO 2 */ 6767 /* Low power mode is controlled by GPIO 2 */
6542 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, 6768 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
6543 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6769 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6544 /* The PHY reset is controlled by GPIO 1 */ 6770 /* The PHY reset is controlled by GPIO 1 */
6545 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6771 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6546 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6772 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6547} 6773}
6548 6774
6549static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, 6775static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6585,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
6585 .supported = 0, 6811 .supported = 0,
6586 .media_type = ETH_PHY_NOT_PRESENT, 6812 .media_type = ETH_PHY_NOT_PRESENT,
6587 .ver_addr = 0, 6813 .ver_addr = 0,
6588 .req_flow_ctrl = 0, 6814 .req_flow_ctrl = 0,
6589 .req_line_speed = 0, 6815 .req_line_speed = 0,
6590 .speed_cap_mask = 0, 6816 .speed_cap_mask = 0,
6591 .req_duplex = 0, 6817 .req_duplex = 0,
6592 .rsrv = 0, 6818 .rsrv = 0,
6593 .config_init = (config_init_t)NULL, 6819 .config_init = (config_init_t)NULL,
@@ -6622,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
6622 .media_type = ETH_PHY_UNSPECIFIED, 6848 .media_type = ETH_PHY_UNSPECIFIED,
6623 .ver_addr = 0, 6849 .ver_addr = 0,
6624 .req_flow_ctrl = 0, 6850 .req_flow_ctrl = 0,
6625 .req_line_speed = 0, 6851 .req_line_speed = 0,
6626 .speed_cap_mask = 0, 6852 .speed_cap_mask = 0,
6627 .req_duplex = 0, 6853 .req_duplex = 0,
6628 .rsrv = 0, 6854 .rsrv = 0,
6629 .config_init = (config_init_t)bnx2x_init_serdes, 6855 .config_init = (config_init_t)bnx2x_init_serdes,
@@ -6659,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
6659 .media_type = ETH_PHY_UNSPECIFIED, 6885 .media_type = ETH_PHY_UNSPECIFIED,
6660 .ver_addr = 0, 6886 .ver_addr = 0,
6661 .req_flow_ctrl = 0, 6887 .req_flow_ctrl = 0,
6662 .req_line_speed = 0, 6888 .req_line_speed = 0,
6663 .speed_cap_mask = 0, 6889 .speed_cap_mask = 0,
6664 .req_duplex = 0, 6890 .req_duplex = 0,
6665 .rsrv = 0, 6891 .rsrv = 0,
6666 .config_init = (config_init_t)bnx2x_init_xgxs, 6892 .config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6690,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
6690 .media_type = ETH_PHY_BASE_T, 6916 .media_type = ETH_PHY_BASE_T,
6691 .ver_addr = 0, 6917 .ver_addr = 0,
6692 .req_flow_ctrl = 0, 6918 .req_flow_ctrl = 0,
6693 .req_line_speed = 0, 6919 .req_line_speed = 0,
6694 .speed_cap_mask = 0, 6920 .speed_cap_mask = 0,
6695 .req_duplex = 0, 6921 .req_duplex = 0,
6696 .rsrv = 0, 6922 .rsrv = 0,
6697 .config_init = (config_init_t)bnx2x_7101_config_init, 6923 .config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6721,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
6721 SUPPORTED_Asym_Pause), 6947 SUPPORTED_Asym_Pause),
6722 .media_type = ETH_PHY_UNSPECIFIED, 6948 .media_type = ETH_PHY_UNSPECIFIED,
6723 .ver_addr = 0, 6949 .ver_addr = 0,
6724 .req_flow_ctrl = 0, 6950 .req_flow_ctrl = 0,
6725 .req_line_speed = 0, 6951 .req_line_speed = 0,
6726 .speed_cap_mask = 0, 6952 .speed_cap_mask = 0,
6727 .req_duplex = 0, 6953 .req_duplex = 0,
6728 .rsrv = 0, 6954 .rsrv = 0,
6729 .config_init = (config_init_t)bnx2x_8073_config_init, 6955 .config_init = (config_init_t)bnx2x_8073_config_init,
@@ -6932,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
6932 .phy_specific_func = (phy_specific_func_t)NULL 7158 .phy_specific_func = (phy_specific_func_t)NULL
6933}; 7159};
6934 7160
7161static struct bnx2x_phy phy_84833 = {
7162 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
7163 .addr = 0xff,
7164 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7165 FLAGS_REARM_LATCH_SIGNAL,
7166 .def_md_devad = 0,
7167 .reserved = 0,
7168 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7169 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7170 .mdio_ctrl = 0,
7171 .supported = (SUPPORTED_10baseT_Half |
7172 SUPPORTED_10baseT_Full |
7173 SUPPORTED_100baseT_Half |
7174 SUPPORTED_100baseT_Full |
7175 SUPPORTED_1000baseT_Full |
7176 SUPPORTED_10000baseT_Full |
7177 SUPPORTED_TP |
7178 SUPPORTED_Autoneg |
7179 SUPPORTED_Pause |
7180 SUPPORTED_Asym_Pause),
7181 .media_type = ETH_PHY_BASE_T,
7182 .ver_addr = 0,
7183 .req_flow_ctrl = 0,
7184 .req_line_speed = 0,
7185 .speed_cap_mask = 0,
7186 .req_duplex = 0,
7187 .rsrv = 0,
7188 .config_init = (config_init_t)bnx2x_848x3_config_init,
7189 .read_status = (read_status_t)bnx2x_848xx_read_status,
7190 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
7191 .config_loopback = (config_loopback_t)NULL,
7192 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7193 .hw_reset = (hw_reset_t)NULL,
7194 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7195 .phy_specific_func = (phy_specific_func_t)NULL
7196};
7197
6935/*****************************************************************/ 7198/*****************************************************************/
6936/* */ 7199/* */
6937/* Populate the phy according. Main function: bnx2x_populate_phy */ 7200/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -6945,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
6945 /* Get the 4 lanes xgxs config rx and tx */ 7208 /* Get the 4 lanes xgxs config rx and tx */
6946 u32 rx = 0, tx = 0, i; 7209 u32 rx = 0, tx = 0, i;
6947 for (i = 0; i < 2; i++) { 7210 for (i = 0; i < 2; i++) {
6948 /** 7211 /*
6949 * INT_PHY and EXT_PHY1 share the same value location in the 7212 * INT_PHY and EXT_PHY1 share the same value location in the
6950 * shmem. When num_phys is greater than 1, than this value 7213 * shmem. When num_phys is greater than 1, than this value
6951 * applies only to EXT_PHY1 7214 * applies only to EXT_PHY1
@@ -6953,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
6953 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 7216 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
6954 rx = REG_RD(bp, shmem_base + 7217 rx = REG_RD(bp, shmem_base +
6955 offsetof(struct shmem_region, 7218 offsetof(struct shmem_region,
6956 dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); 7219 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
6957 7220
6958 tx = REG_RD(bp, shmem_base + 7221 tx = REG_RD(bp, shmem_base +
6959 offsetof(struct shmem_region, 7222 offsetof(struct shmem_region,
6960 dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); 7223 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
6961 } else { 7224 } else {
6962 rx = REG_RD(bp, shmem_base + 7225 rx = REG_RD(bp, shmem_base +
6963 offsetof(struct shmem_region, 7226 offsetof(struct shmem_region,
6964 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7227 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
6965 7228
6966 tx = REG_RD(bp, shmem_base + 7229 tx = REG_RD(bp, shmem_base +
6967 offsetof(struct shmem_region, 7230 offsetof(struct shmem_region,
6968 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7231 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
6969 } 7232 }
6970 7233
6971 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); 7234 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7085,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7085 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 7348 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
7086 *phy = phy_84823; 7349 *phy = phy_84823;
7087 break; 7350 break;
7351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
7352 *phy = phy_84833;
7353 break;
7088 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 7354 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7089 *phy = phy_7101; 7355 *phy = phy_7101;
7090 break; 7356 break;
@@ -7099,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7099 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 7365 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
7100 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 7366 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
7101 7367
7102 /** 7368 /*
7103 * The shmem address of the phy version is located on different 7369 * The shmem address of the phy version is located on different
7104 * structures. In case this structure is too old, do not set 7370 * structures. In case this structure is too old, do not set
7105 * the address 7371 * the address
7106 */ 7372 */
7107 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, 7373 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
7108 dev_info.shared_hw_config.config2)); 7374 dev_info.shared_hw_config.config2));
7109 if (phy_index == EXT_PHY1) { 7375 if (phy_index == EXT_PHY1) {
7110 phy->ver_addr = shmem_base + offsetof(struct shmem_region, 7376 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
7111 port_mb[port].ext_phy_fw_version); 7377 port_mb[port].ext_phy_fw_version);
7112 7378
7113 /* Check specific mdc mdio settings */ 7379 /* Check specific mdc mdio settings */
7114 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) 7380 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
7115 mdc_mdio_access = config2 & 7381 mdc_mdio_access = config2 &
7116 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; 7382 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
7117 } else { 7383 } else {
7118 u32 size = REG_RD(bp, shmem2_base); 7384 u32 size = REG_RD(bp, shmem2_base);
7119 7385
@@ -7132,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7132 } 7398 }
7133 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 7399 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
7134 7400
7135 /** 7401 /*
7136 * In case mdc/mdio_access of the external phy is different than the 7402 * In case mdc/mdio_access of the external phy is different than the
7137 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 7403 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
7138 * to prevent one port interfere with another port's CL45 operations. 7404 * to prevent one port interfere with another port's CL45 operations.
@@ -7167,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
7167 /* Populate the default phy configuration for MF mode */ 7433 /* Populate the default phy configuration for MF mode */
7168 if (phy_index == EXT_PHY2) { 7434 if (phy_index == EXT_PHY2) {
7169 link_config = REG_RD(bp, params->shmem_base + 7435 link_config = REG_RD(bp, params->shmem_base +
7170 offsetof(struct shmem_region, dev_info. 7436 offsetof(struct shmem_region, dev_info.
7171 port_feature_config[params->port].link_config2)); 7437 port_feature_config[params->port].link_config2));
7172 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7438 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7173 offsetof(struct shmem_region, dev_info. 7439 offsetof(struct shmem_region,
7440 dev_info.
7174 port_hw_config[params->port].speed_capability_mask2)); 7441 port_hw_config[params->port].speed_capability_mask2));
7175 } else { 7442 } else {
7176 link_config = REG_RD(bp, params->shmem_base + 7443 link_config = REG_RD(bp, params->shmem_base +
7177 offsetof(struct shmem_region, dev_info. 7444 offsetof(struct shmem_region, dev_info.
7178 port_feature_config[params->port].link_config)); 7445 port_feature_config[params->port].link_config));
7179 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7446 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7180 offsetof(struct shmem_region, dev_info. 7447 offsetof(struct shmem_region,
7181 port_hw_config[params->port].speed_capability_mask)); 7448 dev_info.
7449 port_hw_config[params->port].speed_capability_mask));
7182 } 7450 }
7183 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" 7451 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
7184 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); 7452 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7325,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
7325 else if (phy_index == EXT_PHY2) 7593 else if (phy_index == EXT_PHY2)
7326 actual_phy_idx = EXT_PHY1; 7594 actual_phy_idx = EXT_PHY1;
7327 } 7595 }
7328 params->phy[actual_phy_idx].req_flow_ctrl = 7596 params->phy[actual_phy_idx].req_flow_ctrl =
7329 params->req_flow_ctrl[link_cfg_idx]; 7597 params->req_flow_ctrl[link_cfg_idx];
7330 7598
7331 params->phy[actual_phy_idx].req_line_speed = 7599 params->phy[actual_phy_idx].req_line_speed =
@@ -7378,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7378 set_phy_vars(params); 7646 set_phy_vars(params);
7379 7647
7380 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); 7648 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
7381 if (CHIP_REV_IS_FPGA(bp)) {
7382
7383 vars->link_up = 1;
7384 vars->line_speed = SPEED_10000;
7385 vars->duplex = DUPLEX_FULL;
7386 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7387 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7388 /* enable on E1.5 FPGA */
7389 if (CHIP_IS_E1H(bp)) {
7390 vars->flow_ctrl |=
7391 (BNX2X_FLOW_CTRL_TX |
7392 BNX2X_FLOW_CTRL_RX);
7393 vars->link_status |=
7394 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
7395 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
7396 }
7397
7398 bnx2x_emac_enable(params, vars, 0);
7399 if (!(CHIP_IS_E2(bp)))
7400 bnx2x_pbf_update(params, vars->flow_ctrl,
7401 vars->line_speed);
7402 /* disable drain */
7403 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7404
7405 /* update shared memory */
7406 bnx2x_update_mng(params, vars->link_status);
7407
7408 return 0;
7409
7410 } else
7411 if (CHIP_REV_IS_EMUL(bp)) {
7412
7413 vars->link_up = 1;
7414 vars->line_speed = SPEED_10000;
7415 vars->duplex = DUPLEX_FULL;
7416 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7417 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7418
7419 bnx2x_bmac_enable(params, vars, 0);
7420
7421 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
7422 /* Disable drain */
7423 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
7424 + params->port*4, 0);
7425
7426 /* update shared memory */
7427 bnx2x_update_mng(params, vars->link_status);
7428
7429 return 0;
7430
7431 } else
7432 if (params->loopback_mode == LOOPBACK_BMAC) { 7649 if (params->loopback_mode == LOOPBACK_BMAC) {
7433 7650
7434 vars->link_up = 1; 7651 vars->link_up = 1;
@@ -7444,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7444 /* set bmac loopback */ 7661 /* set bmac loopback */
7445 bnx2x_bmac_enable(params, vars, 1); 7662 bnx2x_bmac_enable(params, vars, 1);
7446 7663
7447 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7664 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7448 params->port*4, 0);
7449 7665
7450 } else if (params->loopback_mode == LOOPBACK_EMAC) { 7666 } else if (params->loopback_mode == LOOPBACK_EMAC) {
7451 7667
@@ -7461,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7461 /* set bmac loopback */ 7677 /* set bmac loopback */
7462 bnx2x_emac_enable(params, vars, 1); 7678 bnx2x_emac_enable(params, vars, 1);
7463 bnx2x_emac_program(params, vars); 7679 bnx2x_emac_program(params, vars);
7464 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7680 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7465 params->port*4, 0);
7466 7681
7467 } else if ((params->loopback_mode == LOOPBACK_XGXS) || 7682 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
7468 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 7683 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7485,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7485 bnx2x_emac_program(params, vars); 7700 bnx2x_emac_program(params, vars);
7486 bnx2x_emac_enable(params, vars, 0); 7701 bnx2x_emac_enable(params, vars, 0);
7487 } else 7702 } else
7488 bnx2x_bmac_enable(params, vars, 0); 7703 bnx2x_bmac_enable(params, vars, 0);
7489
7490 if (params->loopback_mode == LOOPBACK_XGXS) { 7704 if (params->loopback_mode == LOOPBACK_XGXS) {
7491 /* set 10G XGXS loopback */ 7705 /* set 10G XGXS loopback */
7492 params->phy[INT_PHY].config_loopback( 7706 params->phy[INT_PHY].config_loopback(
@@ -7504,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7504 params); 7718 params);
7505 } 7719 }
7506 } 7720 }
7507 7721 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7508 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
7509 params->port*4, 0);
7510 7722
7511 bnx2x_set_led(params, vars, 7723 bnx2x_set_led(params, vars,
7512 LED_MODE_OPER, vars->line_speed); 7724 LED_MODE_OPER, vars->line_speed);
@@ -7525,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7525 return 0; 7737 return 0;
7526} 7738}
7527u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 7739u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7528 u8 reset_ext_phy) 7740 u8 reset_ext_phy)
7529{ 7741{
7530 struct bnx2x *bp = params->bp; 7742 struct bnx2x *bp = params->bp;
7531 u8 phy_index, port = params->port, clear_latch_ind = 0; 7743 u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7534,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7534 vars->link_status = 0; 7746 vars->link_status = 0;
7535 bnx2x_update_mng(params, vars->link_status); 7747 bnx2x_update_mng(params, vars->link_status);
7536 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7748 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
7537 (NIG_MASK_XGXS0_LINK_STATUS | 7749 (NIG_MASK_XGXS0_LINK_STATUS |
7538 NIG_MASK_XGXS0_LINK10G | 7750 NIG_MASK_XGXS0_LINK10G |
7539 NIG_MASK_SERDES0_LINK_STATUS | 7751 NIG_MASK_SERDES0_LINK_STATUS |
7540 NIG_MASK_MI_INT)); 7752 NIG_MASK_MI_INT));
7541 7753
7542 /* activate nig drain */ 7754 /* activate nig drain */
7543 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 7755 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7605,10 +7817,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7605 struct bnx2x_phy phy[PORT_MAX]; 7817 struct bnx2x_phy phy[PORT_MAX];
7606 struct bnx2x_phy *phy_blk[PORT_MAX]; 7818 struct bnx2x_phy *phy_blk[PORT_MAX];
7607 u16 val; 7819 u16 val;
7608 s8 port; 7820 s8 port = 0;
7609 s8 port_of_path = 0; 7821 s8 port_of_path = 0;
7610 7822 u32 swap_val, swap_override;
7611 bnx2x_ext_phy_hw_reset(bp, 0); 7823 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7824 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7825 port ^= (swap_val && swap_override);
7826 bnx2x_ext_phy_hw_reset(bp, port);
7612 /* PART1 - Reset both phys */ 7827 /* PART1 - Reset both phys */
7613 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7828 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7614 u32 shmem_base, shmem2_base; 7829 u32 shmem_base, shmem2_base;
@@ -7633,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7633 /* disable attentions */ 7848 /* disable attentions */
7634 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 7849 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7635 port_of_path*4, 7850 port_of_path*4,
7636 (NIG_MASK_XGXS0_LINK_STATUS | 7851 (NIG_MASK_XGXS0_LINK_STATUS |
7637 NIG_MASK_XGXS0_LINK10G | 7852 NIG_MASK_XGXS0_LINK10G |
7638 NIG_MASK_SERDES0_LINK_STATUS | 7853 NIG_MASK_SERDES0_LINK_STATUS |
7639 NIG_MASK_MI_INT)); 7854 NIG_MASK_MI_INT));
7640 7855
7641 /* Need to take the phy out of low power mode in order 7856 /* Need to take the phy out of low power mode in order
7642 to write to access its registers */ 7857 to write to access its registers */
7643 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7858 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7644 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 7859 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
7860 port);
7645 7861
7646 /* Reset the phy */ 7862 /* Reset the phy */
7647 bnx2x_cl45_write(bp, &phy[port], 7863 bnx2x_cl45_write(bp, &phy[port],
7648 MDIO_PMA_DEVAD, 7864 MDIO_PMA_DEVAD,
7649 MDIO_PMA_REG_CTRL, 7865 MDIO_PMA_REG_CTRL,
7650 1<<15); 7866 1<<15);
7651 } 7867 }
7652 7868
7653 /* Add delay of 150ms after reset */ 7869 /* Add delay of 150ms after reset */
@@ -7663,7 +7879,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7663 7879
7664 /* PART2 - Download firmware to both phys */ 7880 /* PART2 - Download firmware to both phys */
7665 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7881 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7666 u16 fw_ver1;
7667 if (CHIP_IS_E2(bp)) 7882 if (CHIP_IS_E2(bp))
7668 port_of_path = 0; 7883 port_of_path = 0;
7669 else 7884 else
@@ -7671,34 +7886,26 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7671 7886
7672 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7887 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7673 phy_blk[port]->addr); 7888 phy_blk[port]->addr);
7674 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7889 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7675 port_of_path); 7890 port_of_path))
7676
7677 bnx2x_cl45_read(bp, phy_blk[port],
7678 MDIO_PMA_DEVAD,
7679 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7680 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7681 DP(NETIF_MSG_LINK,
7682 "bnx2x_8073_common_init_phy port %x:"
7683 "Download failed. fw version = 0x%x\n",
7684 port, fw_ver1);
7685 return -EINVAL; 7891 return -EINVAL;
7686 }
7687 7892
7688 /* Only set bit 10 = 1 (Tx power down) */ 7893 /* Only set bit 10 = 1 (Tx power down) */
7689 bnx2x_cl45_read(bp, phy_blk[port], 7894 bnx2x_cl45_read(bp, phy_blk[port],
7690 MDIO_PMA_DEVAD, 7895 MDIO_PMA_DEVAD,
7691 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7896 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7692 7897
7693 /* Phase1 of TX_POWER_DOWN reset */ 7898 /* Phase1 of TX_POWER_DOWN reset */
7694 bnx2x_cl45_write(bp, phy_blk[port], 7899 bnx2x_cl45_write(bp, phy_blk[port],
7695 MDIO_PMA_DEVAD, 7900 MDIO_PMA_DEVAD,
7696 MDIO_PMA_REG_TX_POWER_DOWN, 7901 MDIO_PMA_REG_TX_POWER_DOWN,
7697 (val | 1<<10)); 7902 (val | 1<<10));
7698 } 7903 }
7699 7904
7700 /* Toggle Transmitter: Power down and then up with 600ms 7905 /*
7701 delay between */ 7906 * Toggle Transmitter: Power down and then up with 600ms delay
7907 * between
7908 */
7702 msleep(600); 7909 msleep(600);
7703 7910
7704 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ 7911 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7706,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7706 /* Phase2 of POWER_DOWN_RESET */ 7913 /* Phase2 of POWER_DOWN_RESET */
7707 /* Release bit 10 (Release Tx power down) */ 7914 /* Release bit 10 (Release Tx power down) */
7708 bnx2x_cl45_read(bp, phy_blk[port], 7915 bnx2x_cl45_read(bp, phy_blk[port],
7709 MDIO_PMA_DEVAD, 7916 MDIO_PMA_DEVAD,
7710 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7917 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7711 7918
7712 bnx2x_cl45_write(bp, phy_blk[port], 7919 bnx2x_cl45_write(bp, phy_blk[port],
7713 MDIO_PMA_DEVAD, 7920 MDIO_PMA_DEVAD,
7714 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 7921 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
7715 msleep(15); 7922 msleep(15);
7716 7923
7717 /* Read modify write the SPI-ROM version select register */ 7924 /* Read modify write the SPI-ROM version select register */
7718 bnx2x_cl45_read(bp, phy_blk[port], 7925 bnx2x_cl45_read(bp, phy_blk[port],
7719 MDIO_PMA_DEVAD, 7926 MDIO_PMA_DEVAD,
7720 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7927 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
7721 bnx2x_cl45_write(bp, phy_blk[port], 7928 bnx2x_cl45_write(bp, phy_blk[port],
7722 MDIO_PMA_DEVAD, 7929 MDIO_PMA_DEVAD,
7723 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 7930 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
7724 7931
7725 /* set GPIO2 back to LOW */ 7932 /* set GPIO2 back to LOW */
7726 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7933 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7727 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7934 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
7728 } 7935 }
7729 return 0; 7936 return 0;
7730} 7937}
@@ -7771,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7771 7978
7772 /* Set fault module detected LED on */ 7979 /* Set fault module detected LED on */
7773 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 7980 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
7774 MISC_REGISTERS_GPIO_HIGH, 7981 MISC_REGISTERS_GPIO_HIGH,
7775 port); 7982 port);
7776 } 7983 }
7777 7984
7778 return 0; 7985 return 0;
7779} 7986}
7987static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
7988 u8 *io_gpio, u8 *io_port)
7989{
7990
7991 u32 phy_gpio_reset = REG_RD(bp, shmem_base +
7992 offsetof(struct shmem_region,
7993 dev_info.port_hw_config[PORT_0].default_cfg));
7994 switch (phy_gpio_reset) {
7995 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
7996 *io_gpio = 0;
7997 *io_port = 0;
7998 break;
7999 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
8000 *io_gpio = 1;
8001 *io_port = 0;
8002 break;
8003 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
8004 *io_gpio = 2;
8005 *io_port = 0;
8006 break;
8007 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
8008 *io_gpio = 3;
8009 *io_port = 0;
8010 break;
8011 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
8012 *io_gpio = 0;
8013 *io_port = 1;
8014 break;
8015 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
8016 *io_gpio = 1;
8017 *io_port = 1;
8018 break;
8019 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
8020 *io_gpio = 2;
8021 *io_port = 1;
8022 break;
8023 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
8024 *io_gpio = 3;
8025 *io_port = 1;
8026 break;
8027 default:
8028 /* Don't override the io_gpio and io_port */
8029 break;
8030 }
8031}
7780static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, 8032static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7781 u32 shmem_base_path[], 8033 u32 shmem_base_path[],
7782 u32 shmem2_base_path[], u8 phy_index, 8034 u32 shmem2_base_path[], u8 phy_index,
7783 u32 chip_id) 8035 u32 chip_id)
7784{ 8036{
7785 s8 port; 8037 s8 port, reset_gpio;
7786 u32 swap_val, swap_override; 8038 u32 swap_val, swap_override;
7787 struct bnx2x_phy phy[PORT_MAX]; 8039 struct bnx2x_phy phy[PORT_MAX];
7788 struct bnx2x_phy *phy_blk[PORT_MAX]; 8040 struct bnx2x_phy *phy_blk[PORT_MAX];
7789 s8 port_of_path; 8041 s8 port_of_path;
7790 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 8042 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7791 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 8043 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7792 8044
8045 reset_gpio = MISC_REGISTERS_GPIO_1;
7793 port = 1; 8046 port = 1;
7794 8047
7795 bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override)); 8048 /*
8049 * Retrieve the reset gpio/port which control the reset.
8050 * Default is GPIO1, PORT1
8051 */
8052 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
8053 (u8 *)&reset_gpio, (u8 *)&port);
7796 8054
7797 /* Calculate the port based on port swap */ 8055 /* Calculate the port based on port swap */
7798 port ^= (swap_val && swap_override); 8056 port ^= (swap_val && swap_override);
7799 8057
8058 /* Initiate PHY reset*/
8059 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
8060 port);
8061 msleep(1);
8062 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
8063 port);
8064
7800 msleep(5); 8065 msleep(5);
7801 8066
7802 /* PART1 - Reset both phys */ 8067 /* PART1 - Reset both phys */
@@ -7832,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7832 8097
7833 /* Reset the phy */ 8098 /* Reset the phy */
7834 bnx2x_cl45_write(bp, &phy[port], 8099 bnx2x_cl45_write(bp, &phy[port],
7835 MDIO_PMA_DEVAD, 8100 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
7836 MDIO_PMA_REG_CTRL,
7837 1<<15);
7838 } 8101 }
7839 8102
7840 /* Add delay of 150ms after reset */ 8103 /* Add delay of 150ms after reset */
@@ -7848,27 +8111,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7848 } 8111 }
7849 /* PART2 - Download firmware to both phys */ 8112 /* PART2 - Download firmware to both phys */
7850 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 8113 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7851 u16 fw_ver1; 8114 if (CHIP_IS_E2(bp))
7852 if (CHIP_IS_E2(bp))
7853 port_of_path = 0; 8115 port_of_path = 0;
7854 else 8116 else
7855 port_of_path = port; 8117 port_of_path = port;
7856 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 8118 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7857 phy_blk[port]->addr); 8119 phy_blk[port]->addr);
7858 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 8120 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7859 port_of_path); 8121 port_of_path))
7860 bnx2x_cl45_read(bp, phy_blk[port],
7861 MDIO_PMA_DEVAD,
7862 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7863 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7864 DP(NETIF_MSG_LINK,
7865 "bnx2x_8727_common_init_phy port %x:"
7866 "Download failed. fw version = 0x%x\n",
7867 port, fw_ver1);
7868 return -EINVAL; 8122 return -EINVAL;
7869 }
7870 }
7871 8123
8124 }
7872 return 0; 8125 return 0;
7873} 8126}
7874 8127
@@ -7893,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7893 break; 8146 break;
7894 8147
7895 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8148 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7896 /* GPIO1 affects both ports, so there's need to pull 8149 /*
7897 it for single port alone */ 8150 * GPIO1 affects both ports, so there's need to pull
8151 * it for single port alone
8152 */
7898 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 8153 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
7899 shmem2_base_path, 8154 shmem2_base_path,
7900 phy_index, chip_id); 8155 phy_index, chip_id);
@@ -7904,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7904 break; 8159 break;
7905 default: 8160 default:
7906 DP(NETIF_MSG_LINK, 8161 DP(NETIF_MSG_LINK,
7907 "bnx2x_common_init_phy: ext_phy 0x%x not required\n", 8162 "ext_phy 0x%x common init not required\n",
7908 ext_phy_type); 8163 ext_phy_type);
7909 break; 8164 break;
7910 } 8165 }
7911 8166
8167 if (rc != 0)
8168 netdev_err(bp->dev, "Warning: PHY was not initialized,"
8169 " Port %d\n",
8170 0);
7912 return rc; 8171 return rc;
7913} 8172}
7914 8173
@@ -7916,12 +8175,20 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7916 u32 shmem2_base_path[], u32 chip_id) 8175 u32 shmem2_base_path[], u32 chip_id)
7917{ 8176{
7918 u8 rc = 0; 8177 u8 rc = 0;
8178 u32 phy_ver;
7919 u8 phy_index; 8179 u8 phy_index;
7920 u32 ext_phy_type, ext_phy_config; 8180 u32 ext_phy_type, ext_phy_config;
7921 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 8181 DP(NETIF_MSG_LINK, "Begin common phy init\n");
7922 8182
7923 if (CHIP_REV_IS_EMUL(bp)) 8183 /* Check if common init was already done */
8184 phy_ver = REG_RD(bp, shmem_base_path[0] +
8185 offsetof(struct shmem_region,
8186 port_mb[PORT_0].ext_phy_fw_version));
8187 if (phy_ver) {
8188 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
8189 phy_ver);
7924 return 0; 8190 return 0;
8191 }
7925 8192
7926 /* Read the ext_phy_type for arbitrary port(0) */ 8193 /* Read the ext_phy_type for arbitrary port(0) */
7927 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 8194 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a942c4..92f36b6950dc 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2010 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
35 35
36#define SPEED_AUTO_NEG 0 36#define SPEED_AUTO_NEG 0
37#define SPEED_12000 12000 37#define SPEED_12000 12000
38#define SPEED_12500 12500 38#define SPEED_12500 12500
39#define SPEED_13000 13000 39#define SPEED_13000 13000
@@ -44,8 +44,8 @@
44#define SFP_EEPROM_VENDOR_NAME_SIZE 16 44#define SFP_EEPROM_VENDOR_NAME_SIZE 16
45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
46#define SFP_EEPROM_VENDOR_OUI_SIZE 3 46#define SFP_EEPROM_VENDOR_OUI_SIZE 3
47#define SFP_EEPROM_PART_NO_ADDR 0x28 47#define SFP_EEPROM_PART_NO_ADDR 0x28
48#define SFP_EEPROM_PART_NO_SIZE 16 48#define SFP_EEPROM_PART_NO_SIZE 16
49#define PWR_FLT_ERR_MSG_LEN 250 49#define PWR_FLT_ERR_MSG_LEN 250
50 50
51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ 51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
62#define SINGLE_MEDIA(params) (params->num_phys == 2) 62#define SINGLE_MEDIA(params) (params->num_phys == 2)
63/* Dual Media board contains two external phy with different media */ 63/* Dual Media board contains two external phy with different media */
64#define DUAL_MEDIA(params) (params->num_phys == 3) 64#define DUAL_MEDIA(params) (params->num_phys == 3)
65#define FW_PARAM_MDIO_CTRL_OFFSET 16 65#define FW_PARAM_MDIO_CTRL_OFFSET 16
66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ 66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) 67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
68 68
@@ -201,12 +201,14 @@ struct link_params {
201 201
202 /* Default / User Configuration */ 202 /* Default / User Configuration */
203 u8 loopback_mode; 203 u8 loopback_mode;
204#define LOOPBACK_NONE 0 204#define LOOPBACK_NONE 0
205#define LOOPBACK_EMAC 1 205#define LOOPBACK_EMAC 1
206#define LOOPBACK_BMAC 2 206#define LOOPBACK_BMAC 2
207#define LOOPBACK_XGXS 3 207#define LOOPBACK_XGXS 3
208#define LOOPBACK_EXT_PHY 4 208#define LOOPBACK_EXT_PHY 4
209#define LOOPBACK_EXT 5 209#define LOOPBACK_EXT 5
210#define LOOPBACK_UMAC 6
211#define LOOPBACK_XMAC 7
210 212
211 /* Device parameters */ 213 /* Device parameters */
212 u8 mac_addr[6]; 214 u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
230 /* Phy register parameter */ 232 /* Phy register parameter */
231 u32 chip_id; 233 u32 chip_id;
232 234
235 /* features */
233 u32 feature_config_flags; 236 u32 feature_config_flags;
234#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 237#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
235#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 238#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
236#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 239#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
237#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 240#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
238 /* Will be populated during common init */ 241 /* Will be populated during common init */
239 struct bnx2x_phy phy[MAX_PHYS]; 242 struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
334/* Reset the external of SFX7101 */ 337/* Reset the external of SFX7101 */
335void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 338void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
336 339
340/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
341u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
342 struct link_params *params, u16 addr,
343 u8 byte_cnt, u8 *o_buf);
344
337void bnx2x_hw_reset_phy(struct link_params *params); 345void bnx2x_hw_reset_phy(struct link_params *params);
338 346
339/* Checks if HW lock is required for this phy/board type */ 347/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
379 387
380/* Used to configure the ETS to BW limited */ 388/* Used to configure the ETS to BW limited */
381void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, 389void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
382 const u32 cos1_bw); 390 const u32 cos1_bw);
383 391
384/* Used to configure the ETS to strict */ 392/* Used to configure the ETS to strict */
385u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); 393u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b39d1e..6c7745eee00d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587 587
588 /* lock the dmae channel */ 588 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex); 589 spin_lock_bh(&bp->dmae_lock);
590 590
591 /* reset completion */ 591 /* reset completion */
592 *wb_comp = 0; 592 *wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618 618
619unlock: 619unlock:
620 mutex_unlock(&bp->dmae_mutex); 620 spin_unlock_bh(&bp->dmae_lock);
621 return rc; 621 return rc;
622} 622}
623 623
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1397 } 1397 }
1398 1398
1399 smp_mb__before_atomic_inc(); 1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left); 1400 atomic_inc(&bp->cq_spq_left);
1401 /* push the change in fp->state and towards the memory */ 1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb(); 1402 smp_wmb();
1403 1403
@@ -2301,15 +2301,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2301 /* accept matched ucast */ 2301 /* accept matched ucast */
2302 drop_all_ucast = 0; 2302 drop_all_ucast = 0;
2303 } 2303 }
2304 if (filters & BNX2X_ACCEPT_MULTICAST) { 2304 if (filters & BNX2X_ACCEPT_MULTICAST)
2305 /* accept matched mcast */ 2305 /* accept matched mcast */
2306 drop_all_mcast = 0; 2306 drop_all_mcast = 0;
2307 if (IS_MF_SI(bp)) 2307
2308 /* since mcast addresses won't arrive with ovlan,
2309 * fw needs to accept all of them in
2310 * switch-independent mode */
2311 accp_all_mcast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2308 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2314 /* accept all mcast */ 2309 /* accept all mcast */
2315 drop_all_ucast = 0; 2310 drop_all_ucast = 0;
@@ -2478,8 +2473,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2478 rxq_init->sge_map = fp->rx_sge_mapping; 2473 rxq_init->sge_map = fp->rx_sge_mapping;
2479 rxq_init->rcq_map = fp->rx_comp_mapping; 2474 rxq_init->rcq_map = fp->rx_comp_mapping;
2480 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2475 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2481 rxq_init->mtu = bp->dev->mtu; 2476
2482 rxq_init->buf_sz = bp->rx_buf_size; 2477 /* Always use mini-jumbo MTU for FCoE L2 ring */
2478 if (IS_FCOE_FP(fp))
2479 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2480 else
2481 rxq_init->mtu = bp->dev->mtu;
2482
2483 rxq_init->buf_sz = fp->rx_buf_size;
2483 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2484 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2484 rxq_init->cl_id = fp->cl_id; 2485 rxq_init->cl_id = fp->cl_id;
2485 rxq_init->spcl_id = fp->cl_id; 2486 rxq_init->spcl_id = fp->cl_id;
@@ -2731,11 +2732,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2731 2732
2732 spin_lock_bh(&bp->spq_lock); 2733 spin_lock_bh(&bp->spq_lock);
2733 2734
2734 if (!atomic_read(&bp->spq_left)) { 2735 if (common) {
2735 BNX2X_ERR("BUG! SPQ ring full!\n"); 2736 if (!atomic_read(&bp->eq_spq_left)) {
2736 spin_unlock_bh(&bp->spq_lock); 2737 BNX2X_ERR("BUG! EQ ring full!\n");
2737 bnx2x_panic(); 2738 spin_unlock_bh(&bp->spq_lock);
2738 return -EBUSY; 2739 bnx2x_panic();
2740 return -EBUSY;
2741 }
2742 } else if (!atomic_read(&bp->cq_spq_left)) {
2743 BNX2X_ERR("BUG! SPQ ring full!\n");
2744 spin_unlock_bh(&bp->spq_lock);
2745 bnx2x_panic();
2746 return -EBUSY;
2739 } 2747 }
2740 2748
2741 spe = bnx2x_sp_get_next(bp); 2749 spe = bnx2x_sp_get_next(bp);
@@ -2766,20 +2774,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2766 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 2774 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2767 2775
2768 /* stats ramrod has it's own slot on the spq */ 2776 /* stats ramrod has it's own slot on the spq */
2769 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) 2777 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2770 /* It's ok if the actual decrement is issued towards the memory 2778 /* It's ok if the actual decrement is issued towards the memory
2771 * somewhere between the spin_lock and spin_unlock. Thus no 2779 * somewhere between the spin_lock and spin_unlock. Thus no
2772 * more explict memory barrier is needed. 2780 * more explict memory barrier is needed.
2773 */ 2781 */
2774 atomic_dec(&bp->spq_left); 2782 if (common)
2783 atomic_dec(&bp->eq_spq_left);
2784 else
2785 atomic_dec(&bp->cq_spq_left);
2786 }
2787
2775 2788
2776 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " 2790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2778 "type(0x%x) left %x\n", 2791 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2779 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2792 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2780 (u32)(U64_LO(bp->spq_mapping) + 2793 (u32)(U64_LO(bp->spq_mapping) +
2781 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2794 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2782 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left)); 2795 HW_CID(bp, cid), data_hi, data_lo, type,
2796 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2783 2797
2784 bnx2x_sp_prod_update(bp); 2798 bnx2x_sp_prod_update(bp);
2785 spin_unlock_bh(&bp->spq_lock); 2799 spin_unlock_bh(&bp->spq_lock);
@@ -3691,8 +3705,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3691 sw_cons = bp->eq_cons; 3705 sw_cons = bp->eq_cons;
3692 sw_prod = bp->eq_prod; 3706 sw_prod = bp->eq_prod;
3693 3707
3694 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", 3708 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3695 hw_cons, sw_cons, atomic_read(&bp->spq_left)); 3709 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3696 3710
3697 for (; sw_cons != hw_cons; 3711 for (; sw_cons != hw_cons;
3698 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 3712 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3757,13 +3771,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3757 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 3771 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3758 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 3772 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3759 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 3773 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3760 bp->set_mac_pending = 0; 3774 if (elem->message.data.set_mac_event.echo)
3775 bp->set_mac_pending = 0;
3761 break; 3776 break;
3762 3777
3763 case (EVENT_RING_OPCODE_SET_MAC | 3778 case (EVENT_RING_OPCODE_SET_MAC |
3764 BNX2X_STATE_CLOSING_WAIT4_HALT): 3779 BNX2X_STATE_CLOSING_WAIT4_HALT):
3765 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 3780 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3766 bp->set_mac_pending = 0; 3781 if (elem->message.data.set_mac_event.echo)
3782 bp->set_mac_pending = 0;
3767 break; 3783 break;
3768 default: 3784 default:
3769 /* unknown event log error and continue */ 3785 /* unknown event log error and continue */
@@ -3775,7 +3791,7 @@ next_spqe:
3775 } /* for */ 3791 } /* for */
3776 3792
3777 smp_mb__before_atomic_inc(); 3793 smp_mb__before_atomic_inc();
3778 atomic_add(spqe_cnt, &bp->spq_left); 3794 atomic_add(spqe_cnt, &bp->eq_spq_left);
3779 3795
3780 bp->eq_cons = sw_cons; 3796 bp->eq_cons = sw_cons;
3781 bp->eq_prod = sw_prod; 3797 bp->eq_prod = sw_prod;
@@ -4208,7 +4224,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
4208static void bnx2x_init_sp_ring(struct bnx2x *bp) 4224static void bnx2x_init_sp_ring(struct bnx2x *bp)
4209{ 4225{
4210 spin_lock_init(&bp->spq_lock); 4226 spin_lock_init(&bp->spq_lock);
4211 atomic_set(&bp->spq_left, MAX_SPQ_PENDING); 4227 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4212 4228
4213 bp->spq_prod_idx = 0; 4229 bp->spq_prod_idx = 0;
4214 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4230 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4233,9 +4249,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
4233 bp->eq_cons = 0; 4249 bp->eq_cons = 0;
4234 bp->eq_prod = NUM_EQ_DESC; 4250 bp->eq_prod = NUM_EQ_DESC;
4235 bp->eq_cons_sb = BNX2X_EQ_INDEX; 4251 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4252 /* we want a warning message before it gets rought... */
4253 atomic_set(&bp->eq_spq_left,
4254 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4236} 4255}
4237 4256
4238static void bnx2x_init_ind_table(struct bnx2x *bp) 4257void bnx2x_push_indir_table(struct bnx2x *bp)
4239{ 4258{
4240 int func = BP_FUNC(bp); 4259 int func = BP_FUNC(bp);
4241 int i; 4260 int i;
@@ -4243,13 +4262,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4243 if (bp->multi_mode == ETH_RSS_MODE_DISABLED) 4262 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4244 return; 4263 return;
4245 4264
4246 DP(NETIF_MSG_IFUP,
4247 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4248 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4265 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4249 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4266 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4250 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 4267 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4251 bp->fp->cl_id + (i % (bp->num_queues - 4268 bp->fp->cl_id + bp->rx_indir_table[i]);
4252 NONE_ETH_CONTEXT_USE))); 4269}
4270
4271static void bnx2x_init_ind_table(struct bnx2x *bp)
4272{
4273 int i;
4274
4275 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4276 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4277
4278 bnx2x_push_indir_table(bp);
4253} 4279}
4254 4280
4255void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4281void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -4281,9 +4307,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4281 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4307 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4282 BNX2X_ACCEPT_MULTICAST; 4308 BNX2X_ACCEPT_MULTICAST;
4283#ifdef BCM_CNIC 4309#ifdef BCM_CNIC
4284 cl_id = bnx2x_fcoe(bp, cl_id); 4310 if (!NO_FCOE(bp)) {
4285 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4311 cl_id = bnx2x_fcoe(bp, cl_id);
4286 BNX2X_ACCEPT_MULTICAST); 4312 bnx2x_rxq_set_mac_filters(bp, cl_id,
4313 BNX2X_ACCEPT_UNICAST |
4314 BNX2X_ACCEPT_MULTICAST);
4315 }
4287#endif 4316#endif
4288 break; 4317 break;
4289 4318
@@ -4291,18 +4320,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4291 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4320 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4292 BNX2X_ACCEPT_ALL_MULTICAST; 4321 BNX2X_ACCEPT_ALL_MULTICAST;
4293#ifdef BCM_CNIC 4322#ifdef BCM_CNIC
4294 cl_id = bnx2x_fcoe(bp, cl_id); 4323 /*
4295 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4324 * Prevent duplication of multicast packets by configuring FCoE
4296 BNX2X_ACCEPT_MULTICAST); 4325 * L2 Client to receive only matched unicast frames.
4326 */
4327 if (!NO_FCOE(bp)) {
4328 cl_id = bnx2x_fcoe(bp, cl_id);
4329 bnx2x_rxq_set_mac_filters(bp, cl_id,
4330 BNX2X_ACCEPT_UNICAST);
4331 }
4297#endif 4332#endif
4298 break; 4333 break;
4299 4334
4300 case BNX2X_RX_MODE_PROMISC: 4335 case BNX2X_RX_MODE_PROMISC:
4301 def_q_filters |= BNX2X_PROMISCUOUS_MODE; 4336 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4302#ifdef BCM_CNIC 4337#ifdef BCM_CNIC
4303 cl_id = bnx2x_fcoe(bp, cl_id); 4338 /*
4304 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4339 * Prevent packets duplication by configuring DROP_ALL for FCoE
4305 BNX2X_ACCEPT_MULTICAST); 4340 * L2 Client.
4341 */
4342 if (!NO_FCOE(bp)) {
4343 cl_id = bnx2x_fcoe(bp, cl_id);
4344 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4345 }
4306#endif 4346#endif
4307 /* pass management unicast packets as well */ 4347 /* pass management unicast packets as well */
4308 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4348 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5336,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5296 } 5336 }
5297 } 5337 }
5298 5338
5299 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5300 bp->common.shmem_base,
5301 bp->common.shmem2_base);
5302
5303 bnx2x_setup_fan_failure_detection(bp); 5339 bnx2x_setup_fan_failure_detection(bp);
5304 5340
5305 /* clear PXP2 attentions */ 5341 /* clear PXP2 attentions */
@@ -5503,9 +5539,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5503 5539
5504 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 5540 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5505 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 5541 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5506 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5507 bp->common.shmem_base,
5508 bp->common.shmem2_base);
5509 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, 5542 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5510 bp->common.shmem2_base, port)) { 5543 bp->common.shmem2_base, port)) {
5511 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5544 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -5838,7 +5871,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5838 BP_ABS_FUNC(bp), load_code); 5871 BP_ABS_FUNC(bp), load_code);
5839 5872
5840 bp->dmae_ready = 0; 5873 bp->dmae_ready = 0;
5841 mutex_init(&bp->dmae_mutex); 5874 spin_lock_init(&bp->dmae_lock);
5842 rc = bnx2x_gunzip_init(bp); 5875 rc = bnx2x_gunzip_init(bp);
5843 if (rc) 5876 if (rc)
5844 return rc; 5877 return rc;
@@ -5990,6 +6023,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
5990 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 6023 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5991 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6024 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5992 6025
6026 BNX2X_FREE(bp->rx_indir_table);
6027
5993#undef BNX2X_PCI_FREE 6028#undef BNX2X_PCI_FREE
5994#undef BNX2X_KFREE 6029#undef BNX2X_KFREE
5995} 6030}
@@ -6120,6 +6155,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
6120 /* EQ */ 6155 /* EQ */
6121 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 6156 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6122 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6157 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6158
6159 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6160 TSTORM_INDIRECTION_TABLE_SIZE);
6123 return 0; 6161 return 0;
6124 6162
6125alloc_mem_err: 6163alloc_mem_err:
@@ -6173,12 +6211,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6173 int ramrod_flags = WAIT_RAMROD_COMMON; 6211 int ramrod_flags = WAIT_RAMROD_COMMON;
6174 6212
6175 bp->set_mac_pending = 1; 6213 bp->set_mac_pending = 1;
6176 smp_wmb();
6177 6214
6178 config->hdr.length = 1; 6215 config->hdr.length = 1;
6179 config->hdr.offset = cam_offset; 6216 config->hdr.offset = cam_offset;
6180 config->hdr.client_id = 0xff; 6217 config->hdr.client_id = 0xff;
6181 config->hdr.reserved1 = 0; 6218 /* Mark the single MAC configuration ramrod as opposed to a
6219 * UC/MC list configuration).
6220 */
6221 config->hdr.echo = 1;
6182 6222
6183 /* primary MAC */ 6223 /* primary MAC */
6184 config->config_table[0].msb_mac_addr = 6224 config->config_table[0].msb_mac_addr =
@@ -6210,6 +6250,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6210 config->config_table[0].middle_mac_addr, 6250 config->config_table[0].middle_mac_addr,
6211 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); 6251 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6212 6252
6253 mb();
6254
6213 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6255 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6214 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6256 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6215 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); 6257 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6274,20 +6316,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6274 if (CHIP_IS_E1H(bp)) 6316 if (CHIP_IS_E1H(bp))
6275 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6317 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6276 else if (CHIP_MODE_IS_4_PORT(bp)) 6318 else if (CHIP_MODE_IS_4_PORT(bp))
6277 return BP_FUNC(bp) * 32 + rel_offset; 6319 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6278 else 6320 else
6279 return BP_VN(bp) * 32 + rel_offset; 6321 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6280} 6322}
6281 6323
6282/** 6324/**
6283 * LLH CAM line allocations: currently only iSCSI and ETH macs are 6325 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6284 * relevant. In addition, current implementation is tuned for a 6326 * relevant. In addition, current implementation is tuned for a
6285 * single ETH MAC. 6327 * single ETH MAC.
6286 *
6287 * When multiple unicast ETH MACs PF configuration in switch
6288 * independent mode is required (NetQ, multiple netdev MACs,
6289 * etc.), consider better utilisation of 16 per function MAC
6290 * entries in the LLH memory.
6291 */ 6328 */
6292enum { 6329enum {
6293 LLH_CAM_ISCSI_ETH_LINE = 0, 6330 LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6362,14 +6399,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6362 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); 6399 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6363 } 6400 }
6364} 6401}
6365static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) 6402
6403static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6404{
6405 return CHIP_REV_IS_SLOW(bp) ?
6406 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6407 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6408}
6409
6410/* set mc list, do not wait as wait implies sleep and
6411 * set_rx_mode can be invoked from non-sleepable context.
6412 *
6413 * Instead we use the same ramrod data buffer each time we need
6414 * to configure a list of addresses, and use the fact that the
6415 * list of MACs is changed in an incremental way and that the
6416 * function is called under the netif_addr_lock. A temporary
6417 * inconsistent CAM configuration (possible in case of a very fast
6418 * sequence of add/del/add on the host side) will shortly be
6419 * restored by the handler of the last ramrod.
6420 */
6421static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6366{ 6422{
6367 int i = 0, old; 6423 int i = 0, old;
6368 struct net_device *dev = bp->dev; 6424 struct net_device *dev = bp->dev;
6425 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6369 struct netdev_hw_addr *ha; 6426 struct netdev_hw_addr *ha;
6370 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6427 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6371 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6428 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6372 6429
6430 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6431 return -EINVAL;
6432
6373 netdev_for_each_mc_addr(ha, dev) { 6433 netdev_for_each_mc_addr(ha, dev) {
6374 /* copy mac */ 6434 /* copy mac */
6375 config_cmd->config_table[i].msb_mac_addr = 6435 config_cmd->config_table[i].msb_mac_addr =
@@ -6410,32 +6470,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6410 } 6470 }
6411 } 6471 }
6412 6472
6473 wmb();
6474
6413 config_cmd->hdr.length = i; 6475 config_cmd->hdr.length = i;
6414 config_cmd->hdr.offset = offset; 6476 config_cmd->hdr.offset = offset;
6415 config_cmd->hdr.client_id = 0xff; 6477 config_cmd->hdr.client_id = 0xff;
6416 config_cmd->hdr.reserved1 = 0; 6478 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6479 * synchronization.
6480 */
6481 config_cmd->hdr.echo = 0;
6417 6482
6418 bp->set_mac_pending = 1; 6483 mb();
6419 smp_wmb();
6420 6484
6421 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6485 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6422 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6486 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6423} 6487}
6424static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) 6488
6489void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6425{ 6490{
6426 int i; 6491 int i;
6427 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6492 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6428 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6493 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6429 int ramrod_flags = WAIT_RAMROD_COMMON; 6494 int ramrod_flags = WAIT_RAMROD_COMMON;
6495 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6430 6496
6431 bp->set_mac_pending = 1; 6497 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6432 smp_wmb();
6433
6434 for (i = 0; i < config_cmd->hdr.length; i++)
6435 SET_FLAG(config_cmd->config_table[i].flags, 6498 SET_FLAG(config_cmd->config_table[i].flags,
6436 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 6499 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6437 T_ETH_MAC_COMMAND_INVALIDATE); 6500 T_ETH_MAC_COMMAND_INVALIDATE);
6438 6501
6502 wmb();
6503
6504 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6505 config_cmd->hdr.offset = offset;
6506 config_cmd->hdr.client_id = 0xff;
6507 /* We'll wait for a completion this time... */
6508 config_cmd->hdr.echo = 1;
6509
6510 bp->set_mac_pending = 1;
6511
6512 mb();
6513
6439 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6514 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6440 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6515 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6441 6516
@@ -6445,6 +6520,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6445 6520
6446} 6521}
6447 6522
6523/* Accept one or more multicasts */
6524static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6525{
6526 struct net_device *dev = bp->dev;
6527 struct netdev_hw_addr *ha;
6528 u32 mc_filter[MC_HASH_SIZE];
6529 u32 crc, bit, regidx;
6530 int i;
6531
6532 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6533
6534 netdev_for_each_mc_addr(ha, dev) {
6535 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6536 bnx2x_mc_addr(ha));
6537
6538 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6539 ETH_ALEN);
6540 bit = (crc >> 24) & 0xff;
6541 regidx = bit >> 5;
6542 bit &= 0x1f;
6543 mc_filter[regidx] |= (1 << bit);
6544 }
6545
6546 for (i = 0; i < MC_HASH_SIZE; i++)
6547 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6548 mc_filter[i]);
6549
6550 return 0;
6551}
6552
6553void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6554{
6555 int i;
6556
6557 for (i = 0; i < MC_HASH_SIZE; i++)
6558 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6559}
6560
6448#ifdef BCM_CNIC 6561#ifdef BCM_CNIC
6449/** 6562/**
6450 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6563 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6463,12 +6576,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6463 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + 6576 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6464 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 6577 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6465 u32 cl_bit_vec = (1 << iscsi_l2_cl_id); 6578 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6579 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6466 6580
6467 /* Send a SET_MAC ramrod */ 6581 /* Send a SET_MAC ramrod */
6468 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6582 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6469 cam_offset, 0); 6583 cam_offset, 0);
6470 6584
6471 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); 6585 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6472 6586
6473 return 0; 6587 return 0;
6474} 6588}
@@ -7110,20 +7224,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7110 /* Give HW time to discard old tx messages */ 7224 /* Give HW time to discard old tx messages */
7111 msleep(1); 7225 msleep(1);
7112 7226
7113 if (CHIP_IS_E1(bp)) { 7227 bnx2x_set_eth_mac(bp, 0);
7114 /* invalidate mc list,
7115 * wait and poll (interrupts are off)
7116 */
7117 bnx2x_invlidate_e1_mc_list(bp);
7118 bnx2x_set_eth_mac(bp, 0);
7119
7120 } else {
7121 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7122 7228
7123 bnx2x_set_eth_mac(bp, 0); 7229 bnx2x_invalidate_uc_list(bp);
7124 7230
7125 for (i = 0; i < MC_HASH_SIZE; i++) 7231 if (CHIP_IS_E1(bp))
7126 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7232 bnx2x_invalidate_e1_mc_list(bp);
7233 else {
7234 bnx2x_invalidate_e1h_mc_list(bp);
7235 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7127 } 7236 }
7128 7237
7129#ifdef BCM_CNIC 7238#ifdef BCM_CNIC
@@ -8379,13 +8488,60 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8379 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8488 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8380 bp->mdio.prtad = 8489 bp->mdio.prtad =
8381 XGXS_EXT_PHY_ADDR(ext_phy_config); 8490 XGXS_EXT_PHY_ADDR(ext_phy_config);
8491
8492 /*
8493 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8494 * In MF mode, it is set to cover self test cases
8495 */
8496 if (IS_MF(bp))
8497 bp->port.need_hw_lock = 1;
8498 else
8499 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8500 bp->common.shmem_base,
8501 bp->common.shmem2_base);
8382} 8502}
8383 8503
8504#ifdef BCM_CNIC
8505static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8506{
8507 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8508 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8509 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8510 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8511
8512 /* Get the number of maximum allowed iSCSI and FCoE connections */
8513 bp->cnic_eth_dev.max_iscsi_conn =
8514 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8515 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8516
8517 bp->cnic_eth_dev.max_fcoe_conn =
8518 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8519 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8520
8521 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8522 bp->cnic_eth_dev.max_iscsi_conn,
8523 bp->cnic_eth_dev.max_fcoe_conn);
8524
8525 /* If mamimum allowed number of connections is zero -
8526 * disable the feature.
8527 */
8528 if (!bp->cnic_eth_dev.max_iscsi_conn)
8529 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8530
8531 if (!bp->cnic_eth_dev.max_fcoe_conn)
8532 bp->flags |= NO_FCOE_FLAG;
8533}
8534#endif
8535
8384static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8536static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8385{ 8537{
8386 u32 val, val2; 8538 u32 val, val2;
8387 int func = BP_ABS_FUNC(bp); 8539 int func = BP_ABS_FUNC(bp);
8388 int port = BP_PORT(bp); 8540 int port = BP_PORT(bp);
8541#ifdef BCM_CNIC
8542 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8543 u8 *fip_mac = bp->fip_mac;
8544#endif
8389 8545
8390 if (BP_NOMCP(bp)) { 8546 if (BP_NOMCP(bp)) {
8391 BNX2X_ERROR("warning: random MAC workaround active\n"); 8547 BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8398,7 +8554,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8398 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 8554 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8399 8555
8400#ifdef BCM_CNIC 8556#ifdef BCM_CNIC
8401 /* iSCSI NPAR MAC */ 8557 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8558 * FCoE MAC then the appropriate feature should be disabled.
8559 */
8402 if (IS_MF_SI(bp)) { 8560 if (IS_MF_SI(bp)) {
8403 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 8561 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8404 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 8562 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8406,8 +8564,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8406 iscsi_mac_addr_upper); 8564 iscsi_mac_addr_upper);
8407 val = MF_CFG_RD(bp, func_ext_config[func]. 8565 val = MF_CFG_RD(bp, func_ext_config[func].
8408 iscsi_mac_addr_lower); 8566 iscsi_mac_addr_lower);
8409 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8567 BNX2X_DEV_INFO("Read iSCSI MAC: "
8410 } 8568 "0x%x:0x%04x\n", val2, val);
8569 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8570
8571 /* Disable iSCSI OOO if MAC configuration is
8572 * invalid.
8573 */
8574 if (!is_valid_ether_addr(iscsi_mac)) {
8575 bp->flags |= NO_ISCSI_OOO_FLAG |
8576 NO_ISCSI_FLAG;
8577 memset(iscsi_mac, 0, ETH_ALEN);
8578 }
8579 } else
8580 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8581
8582 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8583 val2 = MF_CFG_RD(bp, func_ext_config[func].
8584 fcoe_mac_addr_upper);
8585 val = MF_CFG_RD(bp, func_ext_config[func].
8586 fcoe_mac_addr_lower);
8587 BNX2X_DEV_INFO("Read FCoE MAC to "
8588 "0x%x:0x%04x\n", val2, val);
8589 bnx2x_set_mac_buf(fip_mac, val, val2);
8590
8591 /* Disable FCoE if MAC configuration is
8592 * invalid.
8593 */
8594 if (!is_valid_ether_addr(fip_mac)) {
8595 bp->flags |= NO_FCOE_FLAG;
8596 memset(bp->fip_mac, 0, ETH_ALEN);
8597 }
8598 } else
8599 bp->flags |= NO_FCOE_FLAG;
8411 } 8600 }
8412#endif 8601#endif
8413 } else { 8602 } else {
@@ -8421,7 +8610,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8421 iscsi_mac_upper); 8610 iscsi_mac_upper);
8422 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 8611 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8423 iscsi_mac_lower); 8612 iscsi_mac_lower);
8424 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8613 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8425#endif 8614#endif
8426 } 8615 }
8427 8616
@@ -8429,14 +8618,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8429 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8618 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8430 8619
8431#ifdef BCM_CNIC 8620#ifdef BCM_CNIC
8432 /* Inform the upper layers about FCoE MAC */ 8621 /* Set the FCoE MAC in modes other then MF_SI */
8433 if (!CHIP_IS_E1x(bp)) { 8622 if (!CHIP_IS_E1x(bp)) {
8434 if (IS_MF_SD(bp)) 8623 if (IS_MF_SD(bp))
8435 memcpy(bp->fip_mac, bp->dev->dev_addr, 8624 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8436 sizeof(bp->fip_mac)); 8625 else if (!IS_MF(bp))
8437 else 8626 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8438 memcpy(bp->fip_mac, bp->iscsi_mac,
8439 sizeof(bp->fip_mac));
8440 } 8627 }
8441#endif 8628#endif
8442} 8629}
@@ -8599,6 +8786,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8599 /* Get MAC addresses */ 8786 /* Get MAC addresses */
8600 bnx2x_get_mac_hwinfo(bp); 8787 bnx2x_get_mac_hwinfo(bp);
8601 8788
8789#ifdef BCM_CNIC
8790 bnx2x_get_cnic_info(bp);
8791#endif
8792
8602 return rc; 8793 return rc;
8603} 8794}
8604 8795
@@ -8813,12 +9004,197 @@ static int bnx2x_close(struct net_device *dev)
8813 return 0; 9004 return 0;
8814} 9005}
8815 9006
9007#define E1_MAX_UC_LIST 29
9008#define E1H_MAX_UC_LIST 30
9009#define E2_MAX_UC_LIST 14
9010static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
9011{
9012 if (CHIP_IS_E1(bp))
9013 return E1_MAX_UC_LIST;
9014 else if (CHIP_IS_E1H(bp))
9015 return E1H_MAX_UC_LIST;
9016 else
9017 return E2_MAX_UC_LIST;
9018}
9019
9020
9021static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9022{
9023 if (CHIP_IS_E1(bp))
9024 /* CAM Entries for Port0:
9025 * 0 - prim ETH MAC
9026 * 1 - BCAST MAC
9027 * 2 - iSCSI L2 ring ETH MAC
9028 * 3-31 - UC MACs
9029 *
9030 * Port1 entries are allocated the same way starting from
9031 * entry 32.
9032 */
9033 return 3 + 32 * BP_PORT(bp);
9034 else if (CHIP_IS_E1H(bp)) {
9035 /* CAM Entries:
9036 * 0-7 - prim ETH MAC for each function
9037 * 8-15 - iSCSI L2 ring ETH MAC for each function
9038 * 16 till 255 UC MAC lists for each function
9039 *
9040 * Remark: There is no FCoE support for E1H, thus FCoE related
9041 * MACs are not considered.
9042 */
9043 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9044 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9045 } else {
9046 /* CAM Entries (there is a separate CAM per engine):
9047 * 0-4 - prim ETH MAC for each function
9048 * 4-7 - iSCSI L2 ring ETH MAC for each function
9049 * 8-11 - FIP ucast L2 MAC for each function
9050 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9051 * 16 till 71 UC MAC lists for each function
9052 */
9053 u8 func_idx =
9054 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9055
9056 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9057 bnx2x_max_uc_list(bp) * func_idx;
9058 }
9059}
9060
9061/* set uc list, do not wait as wait implies sleep and
9062 * set_rx_mode can be invoked from non-sleepable context.
9063 *
9064 * Instead we use the same ramrod data buffer each time we need
9065 * to configure a list of addresses, and use the fact that the
9066 * list of MACs is changed in an incremental way and that the
9067 * function is called under the netif_addr_lock. A temporary
9068 * inconsistent CAM configuration (possible in case of very fast
9069 * sequence of add/del/add on the host side) will shortly be
9070 * restored by the handler of the last ramrod.
9071 */
9072static int bnx2x_set_uc_list(struct bnx2x *bp)
9073{
9074 int i = 0, old;
9075 struct net_device *dev = bp->dev;
9076 u8 offset = bnx2x_uc_list_cam_offset(bp);
9077 struct netdev_hw_addr *ha;
9078 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9079 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9080
9081 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9082 return -EINVAL;
9083
9084 netdev_for_each_uc_addr(ha, dev) {
9085 /* copy mac */
9086 config_cmd->config_table[i].msb_mac_addr =
9087 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9088 config_cmd->config_table[i].middle_mac_addr =
9089 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9090 config_cmd->config_table[i].lsb_mac_addr =
9091 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9092
9093 config_cmd->config_table[i].vlan_id = 0;
9094 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9095 config_cmd->config_table[i].clients_bit_vector =
9096 cpu_to_le32(1 << BP_L_ID(bp));
9097
9098 SET_FLAG(config_cmd->config_table[i].flags,
9099 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9100 T_ETH_MAC_COMMAND_SET);
9101
9102 DP(NETIF_MSG_IFUP,
9103 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9104 config_cmd->config_table[i].msb_mac_addr,
9105 config_cmd->config_table[i].middle_mac_addr,
9106 config_cmd->config_table[i].lsb_mac_addr);
9107
9108 i++;
9109
9110 /* Set uc MAC in NIG */
9111 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9112 LLH_CAM_ETH_LINE + i);
9113 }
9114 old = config_cmd->hdr.length;
9115 if (old > i) {
9116 for (; i < old; i++) {
9117 if (CAM_IS_INVALID(config_cmd->
9118 config_table[i])) {
9119 /* already invalidated */
9120 break;
9121 }
9122 /* invalidate */
9123 SET_FLAG(config_cmd->config_table[i].flags,
9124 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9125 T_ETH_MAC_COMMAND_INVALIDATE);
9126 }
9127 }
9128
9129 wmb();
9130
9131 config_cmd->hdr.length = i;
9132 config_cmd->hdr.offset = offset;
9133 config_cmd->hdr.client_id = 0xff;
9134 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9135 * synchronization.
9136 */
9137 config_cmd->hdr.echo = 0;
9138
9139 mb();
9140
9141 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9142 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9143
9144}
9145
9146void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9147{
9148 int i;
9149 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9150 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9151 int ramrod_flags = WAIT_RAMROD_COMMON;
9152 u8 offset = bnx2x_uc_list_cam_offset(bp);
9153 u8 max_list_size = bnx2x_max_uc_list(bp);
9154
9155 for (i = 0; i < max_list_size; i++) {
9156 SET_FLAG(config_cmd->config_table[i].flags,
9157 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9158 T_ETH_MAC_COMMAND_INVALIDATE);
9159 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9160 }
9161
9162 wmb();
9163
9164 config_cmd->hdr.length = max_list_size;
9165 config_cmd->hdr.offset = offset;
9166 config_cmd->hdr.client_id = 0xff;
9167 /* We'll wait for a completion this time... */
9168 config_cmd->hdr.echo = 1;
9169
9170 bp->set_mac_pending = 1;
9171
9172 mb();
9173
9174 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9175 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9176
9177 /* Wait for a completion */
9178 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9179 ramrod_flags);
9180
9181}
9182
9183static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9184{
9185 /* some multicasts */
9186 if (CHIP_IS_E1(bp)) {
9187 return bnx2x_set_e1_mc_list(bp);
9188 } else { /* E1H and newer */
9189 return bnx2x_set_e1h_mc_list(bp);
9190 }
9191}
9192
8816/* called with netif_tx_lock from dev_mcast.c */ 9193/* called with netif_tx_lock from dev_mcast.c */
8817void bnx2x_set_rx_mode(struct net_device *dev) 9194void bnx2x_set_rx_mode(struct net_device *dev)
8818{ 9195{
8819 struct bnx2x *bp = netdev_priv(dev); 9196 struct bnx2x *bp = netdev_priv(dev);
8820 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 9197 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8821 int port = BP_PORT(bp);
8822 9198
8823 if (bp->state != BNX2X_STATE_OPEN) { 9199 if (bp->state != BNX2X_STATE_OPEN) {
8824 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 9200 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8829,47 +9205,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8829 9205
8830 if (dev->flags & IFF_PROMISC) 9206 if (dev->flags & IFF_PROMISC)
8831 rx_mode = BNX2X_RX_MODE_PROMISC; 9207 rx_mode = BNX2X_RX_MODE_PROMISC;
8832 else if ((dev->flags & IFF_ALLMULTI) || 9208 else if (dev->flags & IFF_ALLMULTI)
8833 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8834 CHIP_IS_E1(bp)))
8835 rx_mode = BNX2X_RX_MODE_ALLMULTI; 9209 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8836 else { /* some multicasts */ 9210 else {
8837 if (CHIP_IS_E1(bp)) { 9211 /* some multicasts */
8838 /* 9212 if (bnx2x_set_mc_list(bp))
8839 * set mc list, do not wait as wait implies sleep 9213 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8840 * and set_rx_mode can be invoked from non-sleepable
8841 * context
8842 */
8843 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8844 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8845 BNX2X_MAX_MULTICAST*(1 + port));
8846
8847 bnx2x_set_e1_mc_list(bp, offset);
8848 } else { /* E1H */
8849 /* Accept one or more multicasts */
8850 struct netdev_hw_addr *ha;
8851 u32 mc_filter[MC_HASH_SIZE];
8852 u32 crc, bit, regidx;
8853 int i;
8854
8855 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8856
8857 netdev_for_each_mc_addr(ha, dev) {
8858 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8859 bnx2x_mc_addr(ha));
8860
8861 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8862 ETH_ALEN);
8863 bit = (crc >> 24) & 0xff;
8864 regidx = bit >> 5;
8865 bit &= 0x1f;
8866 mc_filter[regidx] |= (1 << bit);
8867 }
8868 9214
8869 for (i = 0; i < MC_HASH_SIZE; i++) 9215 /* some unicasts */
8870 REG_WR(bp, MC_HASH_OFFSET(bp, i), 9216 if (bnx2x_set_uc_list(bp))
8871 mc_filter[i]); 9217 rx_mode = BNX2X_RX_MODE_PROMISC;
8872 }
8873 } 9218 }
8874 9219
8875 bp->rx_mode = rx_mode; 9220 bp->rx_mode = rx_mode;
@@ -8950,7 +9295,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
8950 .ndo_stop = bnx2x_close, 9295 .ndo_stop = bnx2x_close,
8951 .ndo_start_xmit = bnx2x_start_xmit, 9296 .ndo_start_xmit = bnx2x_start_xmit,
8952 .ndo_select_queue = bnx2x_select_queue, 9297 .ndo_select_queue = bnx2x_select_queue,
8953 .ndo_set_multicast_list = bnx2x_set_rx_mode, 9298 .ndo_set_rx_mode = bnx2x_set_rx_mode,
8954 .ndo_set_mac_address = bnx2x_change_mac_addr, 9299 .ndo_set_mac_address = bnx2x_change_mac_addr,
8955 .ndo_validate_addr = eth_validate_addr, 9300 .ndo_validate_addr = eth_validate_addr,
8956 .ndo_do_ioctl = bnx2x_ioctl, 9301 .ndo_do_ioctl = bnx2x_ioctl,
@@ -9776,15 +10121,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9776 HW_CID(bp, BNX2X_ISCSI_ETH_CID)); 10121 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9777 } 10122 }
9778 10123
9779 /* There may be not more than 8 L2 and COMMON SPEs and not more 10124 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9780 * than 8 L5 SPEs in the air. 10125 * We also check that the number of outstanding
10126 * COMMON ramrods is not more than the EQ and SPQ can
10127 * accommodate.
9781 */ 10128 */
9782 if ((type == NONE_CONNECTION_TYPE) || 10129 if (type == ETH_CONNECTION_TYPE) {
9783 (type == ETH_CONNECTION_TYPE)) { 10130 if (!atomic_read(&bp->cq_spq_left))
9784 if (!atomic_read(&bp->spq_left)) 10131 break;
10132 else
10133 atomic_dec(&bp->cq_spq_left);
10134 } else if (type == NONE_CONNECTION_TYPE) {
10135 if (!atomic_read(&bp->eq_spq_left))
9785 break; 10136 break;
9786 else 10137 else
9787 atomic_dec(&bp->spq_left); 10138 atomic_dec(&bp->eq_spq_left);
9788 } else if ((type == ISCSI_CONNECTION_TYPE) || 10139 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9789 (type == FCOE_CONNECTION_TYPE)) { 10140 (type == FCOE_CONNECTION_TYPE)) {
9790 if (bp->cnic_spq_pending >= 10141 if (bp->cnic_spq_pending >=
@@ -9862,7 +10213,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9862 int rc = 0; 10213 int rc = 0;
9863 10214
9864 mutex_lock(&bp->cnic_mutex); 10215 mutex_lock(&bp->cnic_mutex);
9865 c_ops = bp->cnic_ops; 10216 c_ops = rcu_dereference_protected(bp->cnic_ops,
10217 lockdep_is_held(&bp->cnic_mutex));
9866 if (c_ops) 10218 if (c_ops)
9867 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 10219 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9868 mutex_unlock(&bp->cnic_mutex); 10220 mutex_unlock(&bp->cnic_mutex);
@@ -9976,7 +10328,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9976 int count = ctl->data.credit.credit_count; 10328 int count = ctl->data.credit.credit_count;
9977 10329
9978 smp_mb__before_atomic_inc(); 10330 smp_mb__before_atomic_inc();
9979 atomic_add(count, &bp->spq_left); 10331 atomic_add(count, &bp->cq_spq_left);
9980 smp_mb__after_atomic_inc(); 10332 smp_mb__after_atomic_inc();
9981 break; 10333 break;
9982 } 10334 }
@@ -10072,6 +10424,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10072 struct bnx2x *bp = netdev_priv(dev); 10424 struct bnx2x *bp = netdev_priv(dev);
10073 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10425 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10074 10426
10427 /* If both iSCSI and FCoE are disabled - return NULL in
10428 * order to indicate CNIC that it should not try to work
10429 * with this device.
10430 */
10431 if (NO_ISCSI(bp) && NO_FCOE(bp))
10432 return NULL;
10433
10075 cp->drv_owner = THIS_MODULE; 10434 cp->drv_owner = THIS_MODULE;
10076 cp->chip_id = CHIP_ID(bp); 10435 cp->chip_id = CHIP_ID(bp);
10077 cp->pdev = bp->pdev; 10436 cp->pdev = bp->pdev;
@@ -10092,6 +10451,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10092 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 10451 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10093 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 10452 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10094 10453
10454 if (NO_ISCSI_OOO(bp))
10455 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10456
10457 if (NO_ISCSI(bp))
10458 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10459
10460 if (NO_FCOE(bp))
10461 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10462
10095 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " 10463 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10096 "starting cid %d\n", 10464 "starting cid %d\n",
10097 cp->ctx_blk_size, 10465 cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683e3d61..1c89f19a4425 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842 6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842
6086#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
6086 6087
6087#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 6088#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
6088 6089
@@ -6194,7 +6195,11 @@ Theotherbitsarereservedandshouldbezero*/
6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 6196#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 6197#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6198#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6199#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6197 6200
6201#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6202#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6198 6203
6199#define IGU_FUNC_BASE 0x0400 6204#define IGU_FUNC_BASE 0x0400
6200 6205
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 171782e2bb39..1024ae158227 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2470,6 +2470,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2470 if (!(dev->flags & IFF_MASTER)) 2470 if (!(dev->flags & IFF_MASTER))
2471 goto out; 2471 goto out;
2472 2472
2473 skb = skb_share_check(skb, GFP_ATOMIC);
2474 if (!skb)
2475 goto out;
2476
2473 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2477 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2474 goto out; 2478 goto out;
2475 2479
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c65129..5c6fba802f2b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
326 goto out; 326 goto out;
327 } 327 }
328 328
329 skb = skb_share_check(skb, GFP_ATOMIC);
330 if (!skb)
331 goto out;
332
329 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 333 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
330 goto out; 334 goto out;
331 335
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b85acf1..77e3c6a7176a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1372,8 +1372,8 @@ static int bond_compute_features(struct bonding *bond)
1372{ 1372{
1373 struct slave *slave; 1373 struct slave *slave;
1374 struct net_device *bond_dev = bond->dev; 1374 struct net_device *bond_dev = bond->dev;
1375 unsigned long features = bond_dev->features; 1375 u32 features = bond_dev->features;
1376 unsigned long vlan_features = 0; 1376 u32 vlan_features = 0;
1377 unsigned short max_hard_header_len = max((u16)ETH_HLEN, 1377 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1378 bond_dev->hard_header_len); 1378 bond_dev->hard_header_len);
1379 int i; 1379 int i;
@@ -1400,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
1400 1400
1401done: 1401done:
1402 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1402 features |= (bond_dev->features & BOND_VLAN_FEATURES);
1403 bond_dev->features = netdev_fix_features(features, NULL); 1403 bond_dev->features = netdev_fix_features(bond_dev, features);
1404 bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); 1404 bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
1405 bond_dev->hard_header_len = max_hard_header_len; 1405 bond_dev->hard_header_len = max_hard_header_len;
1406 1406
1407 return 0; 1407 return 0;
@@ -1594,9 +1594,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1594 } 1594 }
1595 } 1595 }
1596 1596
1597 res = netdev_set_master(slave_dev, bond_dev); 1597 res = netdev_set_bond_master(slave_dev, bond_dev);
1598 if (res) { 1598 if (res) {
1599 pr_debug("Error %d calling netdev_set_master\n", res); 1599 pr_debug("Error %d calling netdev_set_bond_master\n", res);
1600 goto err_restore_mac; 1600 goto err_restore_mac;
1601 } 1601 }
1602 /* open the slave since the application closed it */ 1602 /* open the slave since the application closed it */
@@ -1812,7 +1812,7 @@ err_close:
1812 dev_close(slave_dev); 1812 dev_close(slave_dev);
1813 1813
1814err_unset_master: 1814err_unset_master:
1815 netdev_set_master(slave_dev, NULL); 1815 netdev_set_bond_master(slave_dev, NULL);
1816 1816
1817err_restore_mac: 1817err_restore_mac:
1818 if (!bond->params.fail_over_mac) { 1818 if (!bond->params.fail_over_mac) {
@@ -1992,7 +1992,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1992 netif_addr_unlock_bh(bond_dev); 1992 netif_addr_unlock_bh(bond_dev);
1993 } 1993 }
1994 1994
1995 netdev_set_master(slave_dev, NULL); 1995 netdev_set_bond_master(slave_dev, NULL);
1996 1996
1997#ifdef CONFIG_NET_POLL_CONTROLLER 1997#ifdef CONFIG_NET_POLL_CONTROLLER
1998 read_lock_bh(&bond->lock); 1998 read_lock_bh(&bond->lock);
@@ -2114,7 +2114,7 @@ static int bond_release_all(struct net_device *bond_dev)
2114 netif_addr_unlock_bh(bond_dev); 2114 netif_addr_unlock_bh(bond_dev);
2115 } 2115 }
2116 2116
2117 netdev_set_master(slave_dev, NULL); 2117 netdev_set_bond_master(slave_dev, NULL);
2118 2118
2119 /* close slave before restoring its mac address */ 2119 /* close slave before restoring its mac address */
2120 dev_close(slave_dev); 2120 dev_close(slave_dev);
@@ -2733,6 +2733,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2733 if (!slave || !slave_do_arp_validate(bond, slave)) 2733 if (!slave || !slave_do_arp_validate(bond, slave))
2734 goto out_unlock; 2734 goto out_unlock;
2735 2735
2736 skb = skb_share_check(skb, GFP_ATOMIC);
2737 if (!skb)
2738 goto out_unlock;
2739
2736 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2740 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
2737 goto out_unlock; 2741 goto out_unlock;
2738 2742
@@ -4653,6 +4657,8 @@ static const struct net_device_ops bond_netdev_ops = {
4653 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4657 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4654 .ndo_poll_controller = bond_poll_controller, 4658 .ndo_poll_controller = bond_poll_controller,
4655#endif 4659#endif
4660 .ndo_add_slave = bond_enslave,
4661 .ndo_del_slave = bond_release,
4656}; 4662};
4657 4663
4658static void bond_destructor(struct net_device *bond_dev) 4664static void bond_destructor(struct net_device *bond_dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c5380..72bb0f6cc9bf 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
1198 bond->dev->name, new_value); 1198 bond->dev->name, new_value);
1199 } 1199 }
1200out: 1200out:
1201 return count; 1201 return ret;
1202} 1202}
1203static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, 1203static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
1204 bonding_show_carrier, bonding_store_carrier); 1204 bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1595 } 1595 }
1596 } 1596 }
1597out: 1597out:
1598 return count; 1598 return ret;
1599} 1599}
1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1601 bonding_show_slaves_active, bonding_store_slaves_active); 1601 bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db60ade9..1d699e3df547 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
23 23
24 As only the sending and receiving of CAN frames is implemented, this 24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from: 25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de 26 www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
27 27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach, 28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see 29 slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -115,8 +115,12 @@ source "drivers/net/can/mscan/Kconfig"
115 115
116source "drivers/net/can/sja1000/Kconfig" 116source "drivers/net/can/sja1000/Kconfig"
117 117
118source "drivers/net/can/c_can/Kconfig"
119
118source "drivers/net/can/usb/Kconfig" 120source "drivers/net/can/usb/Kconfig"
119 121
122source "drivers/net/can/softing/Kconfig"
123
120config CAN_DEBUG_DEVICES 124config CAN_DEBUG_DEVICES
121 bool "CAN devices debugging messages" 125 bool "CAN devices debugging messages"
122 depends on CAN 126 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159ba3f9..24ebfe8d758a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o
9can-dev-y := dev.o 9can-dev-y := dev.o
10 10
11obj-y += usb/ 11obj-y += usb/
12obj-y += softing/
12 13
13obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14obj-$(CONFIG_CAN_SJA1000) += sja1000/
14obj-$(CONFIG_CAN_MSCAN) += mscan/ 15obj-$(CONFIG_CAN_MSCAN) += mscan/
16obj-$(CONFIG_CAN_C_CAN) += c_can/
15obj-$(CONFIG_CAN_AT91) += at91_can.o 17obj-$(CONFIG_CAN_AT91) += at91_can.o
16obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 18obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
17obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 19obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7ed..57d2ffbbb433 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 * 3 *
4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
5 * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de> 5 * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
6 * 6 *
7 * This software may be distributed under the terms of the GNU General 7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING' 8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/netdevice.h> 31#include <linux/netdevice.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/rtnetlink.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/string.h> 36#include <linux/string.h>
@@ -40,22 +41,23 @@
40 41
41#include <mach/board.h> 42#include <mach/board.h>
42 43
43#define AT91_NAPI_WEIGHT 12 44#define AT91_NAPI_WEIGHT 11
44 45
45/* 46/*
46 * RX/TX Mailbox split 47 * RX/TX Mailbox split
47 * don't dare to touch 48 * don't dare to touch
48 */ 49 */
49#define AT91_MB_RX_NUM 12 50#define AT91_MB_RX_NUM 11
50#define AT91_MB_TX_SHIFT 2 51#define AT91_MB_TX_SHIFT 2
51 52
52#define AT91_MB_RX_FIRST 0 53#define AT91_MB_RX_FIRST 1
53#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) 54#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
54 55
55#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) 56#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
56#define AT91_MB_RX_SPLIT 8 57#define AT91_MB_RX_SPLIT 8
57#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) 58#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
58#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) 59#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
60 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
59 61
60#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) 62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
61#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) 63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
168 170
169 struct clk *clk; 171 struct clk *clk;
170 struct at91_can_data *pdata; 172 struct at91_can_data *pdata;
173
174 canid_t mb0_id;
171}; 175};
172 176
173static struct can_bittiming_const at91_bittiming_const = { 177static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
220 set_mb_mode_prio(priv, mb, mode, 0); 224 set_mb_mode_prio(priv, mb, mode, 0);
221} 225}
222 226
227static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
228{
229 u32 reg_mid;
230
231 if (can_id & CAN_EFF_FLAG)
232 reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
233 else
234 reg_mid = (can_id & CAN_SFF_MASK) << 18;
235
236 return reg_mid;
237}
238
223/* 239/*
224 * Swtich transceiver on or off 240 * Swtich transceiver on or off
225 */ 241 */
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
233{ 249{
234 struct at91_priv *priv = netdev_priv(dev); 250 struct at91_priv *priv = netdev_priv(dev);
235 unsigned int i; 251 unsigned int i;
252 u32 reg_mid;
236 253
237 /* 254 /*
238 * The first 12 mailboxes are used as a reception FIFO. The 255 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
239 * last mailbox is configured with overwrite option. The 256 * mailbox is disabled. The next 11 mailboxes are used as a
240 * overwrite flag indicates a FIFO overflow. 257 * reception FIFO. The last mailbox is configured with
258 * overwrite option. The overwrite flag indicates a FIFO
259 * overflow.
241 */ 260 */
261 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
262 for (i = 0; i < AT91_MB_RX_FIRST; i++) {
263 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
264 at91_write(priv, AT91_MID(i), reg_mid);
265 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
266 }
267
242 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 268 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
243 set_mb_mode(priv, i, AT91_MB_MODE_RX); 269 set_mb_mode(priv, i, AT91_MB_MODE_RX);
244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 270 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 280 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
255 281
256 /* Reset tx and rx helper pointers */ 282 /* Reset tx and rx helper pointers */
257 priv->tx_next = priv->tx_echo = priv->rx_next = 0; 283 priv->tx_next = priv->tx_echo = 0;
284 priv->rx_next = AT91_MB_RX_FIRST;
258} 285}
259 286
260static int at91_set_bittiming(struct net_device *dev) 287static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 399 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
373 return NETDEV_TX_BUSY; 400 return NETDEV_TX_BUSY;
374 } 401 }
375 402 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
376 if (cf->can_id & CAN_EFF_FLAG)
377 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
378 else
379 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
380
381 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | 403 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
382 (cf->can_dlc << 16) | AT91_MCR_MTCR; 404 (cf->can_dlc << 16) | AT91_MCR_MTCR;
383 405
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
539 * 561 *
540 * Theory of Operation: 562 * Theory of Operation:
541 * 563 *
542 * 12 of the 16 mailboxes on the chip are reserved for RX. we split 564 * 11 of the 16 mailboxes on the chip are reserved for RX. we split
543 * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. 565 * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
544 * 566 *
545 * Like it or not, but the chip always saves a received CAN message 567 * Like it or not, but the chip always saves a received CAN message
546 * into the first free mailbox it finds (starting with the 568 * into the first free mailbox it finds (starting with the
547 * lowest). This makes it very difficult to read the messages in the 569 * lowest). This makes it very difficult to read the messages in the
548 * right order from the chip. This is how we work around that problem: 570 * right order from the chip. This is how we work around that problem:
549 * 571 *
550 * The first message goes into mb nr. 0 and issues an interrupt. All 572 * The first message goes into mb nr. 1 and issues an interrupt. All
551 * rx ints are disabled in the interrupt handler and a napi poll is 573 * rx ints are disabled in the interrupt handler and a napi poll is
552 * scheduled. We read the mailbox, but do _not_ reenable the mb (to 574 * scheduled. We read the mailbox, but do _not_ reenable the mb (to
553 * receive another message). 575 * receive another message).
554 * 576 *
555 * lower mbxs upper 577 * lower mbxs upper
556 * ______^______ __^__ 578 * ____^______ __^__
557 * / \ / \ 579 * / \ / \
558 * +-+-+-+-+-+-+-+-++-+-+-+-+ 580 * +-+-+-+-+-+-+-+-++-+-+-+-+
559 * |x|x|x|x|x|x|x|x|| | | | | 581 * | |x|x|x|x|x|x|x|| | | | |
560 * +-+-+-+-+-+-+-+-++-+-+-+-+ 582 * +-+-+-+-+-+-+-+-++-+-+-+-+
561 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail 583 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
562 * 0 1 2 3 4 5 6 7 8 9 0 1 / box 584 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
585 * ^
586 * |
587 * \
588 * unused, due to chip bug
563 * 589 *
564 * The variable priv->rx_next points to the next mailbox to read a 590 * The variable priv->rx_next points to the next mailbox to read a
565 * message from. As long we're in the lower mailboxes we just read the 591 * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
590 "order of incoming frames cannot be guaranteed\n"); 616 "order of incoming frames cannot be guaranteed\n");
591 617
592 again: 618 again:
593 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 619 for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
594 mb < AT91_MB_RX_NUM && quota > 0; 620 mb < AT91_MB_RX_LAST + 1 && quota > 0;
595 reg_sr = at91_read(priv, AT91_SR), 621 reg_sr = at91_read(priv, AT91_SR),
596 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { 622 mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
597 at91_read_msg(dev, mb); 623 at91_read_msg(dev, mb);
598 624
599 /* reactivate mailboxes */ 625 /* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
610 636
611 /* upper group completed, look again in lower */ 637 /* upper group completed, look again in lower */
612 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 638 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
613 quota > 0 && mb >= AT91_MB_RX_NUM) { 639 quota > 0 && mb > AT91_MB_RX_LAST) {
614 priv->rx_next = 0; 640 priv->rx_next = AT91_MB_RX_FIRST;
615 goto again; 641 goto again;
616 } 642 }
617 643
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
1037 .ndo_start_xmit = at91_start_xmit, 1063 .ndo_start_xmit = at91_start_xmit,
1038}; 1064};
1039 1065
1066static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
1067 struct device_attribute *attr, char *buf)
1068{
1069 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1070
1071 if (priv->mb0_id & CAN_EFF_FLAG)
1072 return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
1073 else
1074 return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
1075}
1076
1077static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1078 struct device_attribute *attr, const char *buf, size_t count)
1079{
1080 struct net_device *ndev = to_net_dev(dev);
1081 struct at91_priv *priv = netdev_priv(ndev);
1082 unsigned long can_id;
1083 ssize_t ret;
1084 int err;
1085
1086 rtnl_lock();
1087
1088 if (ndev->flags & IFF_UP) {
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 err = strict_strtoul(buf, 0, &can_id);
1094 if (err) {
1095 ret = err;
1096 goto out;
1097 }
1098
1099 if (can_id & CAN_EFF_FLAG)
1100 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1101 else
1102 can_id &= CAN_SFF_MASK;
1103
1104 priv->mb0_id = can_id;
1105 ret = count;
1106
1107 out:
1108 rtnl_unlock();
1109 return ret;
1110}
1111
1112static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
1113 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1114
1115static struct attribute *at91_sysfs_attrs[] = {
1116 &dev_attr_mb0_id.attr,
1117 NULL,
1118};
1119
1120static struct attribute_group at91_sysfs_attr_group = {
1121 .attrs = at91_sysfs_attrs,
1122};
1123
1040static int __devinit at91_can_probe(struct platform_device *pdev) 1124static int __devinit at91_can_probe(struct platform_device *pdev)
1041{ 1125{
1042 struct net_device *dev; 1126 struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1082 dev->netdev_ops = &at91_netdev_ops; 1166 dev->netdev_ops = &at91_netdev_ops;
1083 dev->irq = irq; 1167 dev->irq = irq;
1084 dev->flags |= IFF_ECHO; 1168 dev->flags |= IFF_ECHO;
1169 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1085 1170
1086 priv = netdev_priv(dev); 1171 priv = netdev_priv(dev);
1087 priv->can.clock.freq = clk_get_rate(clk); 1172 priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1093 priv->dev = dev; 1178 priv->dev = dev;
1094 priv->clk = clk; 1179 priv->clk = clk;
1095 priv->pdata = pdev->dev.platform_data; 1180 priv->pdata = pdev->dev.platform_data;
1181 priv->mb0_id = 0x7ff;
1096 1182
1097 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1183 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1098 1184
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 000000000000..ffb9773d102d
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
1menuconfig CAN_C_CAN
2 tristate "Bosch C_CAN devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_C_CAN
6
7config CAN_C_CAN_PLATFORM
8 tristate "Generic Platform Bus based C_CAN driver"
9 ---help---
10 This driver adds support for the C_CAN chips connected to
11 the "platform bus" (Linux abstraction for directly to the
12 processor attached devices) which can be found on various
13 boards from ST Microelectronics (http://www.st.com)
14 like the SPEAr1310 and SPEAr320 evaluation boards.
15endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 000000000000..9273f6d5c4b7
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Bosch C_CAN controller drivers.
3#
4
5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 000000000000..14050786218a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
29#include <linux/version.h>
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/if_arp.h>
35#include <linux/if_ether.h>
36#include <linux/list.h>
37#include <linux/delay.h>
38#include <linux/io.h>
39
40#include <linux/can.h>
41#include <linux/can/dev.h>
42#include <linux/can/error.h>
43
44#include "c_can.h"
45
46/* control register */
47#define CONTROL_TEST BIT(7)
48#define CONTROL_CCE BIT(6)
49#define CONTROL_DISABLE_AR BIT(5)
50#define CONTROL_ENABLE_AR (0 << 5)
51#define CONTROL_EIE BIT(3)
52#define CONTROL_SIE BIT(2)
53#define CONTROL_IE BIT(1)
54#define CONTROL_INIT BIT(0)
55
56/* test register */
57#define TEST_RX BIT(7)
58#define TEST_TX1 BIT(6)
59#define TEST_TX2 BIT(5)
60#define TEST_LBACK BIT(4)
61#define TEST_SILENT BIT(3)
62#define TEST_BASIC BIT(2)
63
64/* status register */
65#define STATUS_BOFF BIT(7)
66#define STATUS_EWARN BIT(6)
67#define STATUS_EPASS BIT(5)
68#define STATUS_RXOK BIT(4)
69#define STATUS_TXOK BIT(3)
70
71/* error counter register */
72#define ERR_CNT_TEC_MASK 0xff
73#define ERR_CNT_TEC_SHIFT 0
74#define ERR_CNT_REC_SHIFT 8
75#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
76#define ERR_CNT_RP_SHIFT 15
77#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
78
79/* bit-timing register */
80#define BTR_BRP_MASK 0x3f
81#define BTR_BRP_SHIFT 0
82#define BTR_SJW_SHIFT 6
83#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
84#define BTR_TSEG1_SHIFT 8
85#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
86#define BTR_TSEG2_SHIFT 12
87#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
88
89/* brp extension register */
90#define BRP_EXT_BRPE_MASK 0x0f
91#define BRP_EXT_BRPE_SHIFT 0
92
93/* IFx command request */
94#define IF_COMR_BUSY BIT(15)
95
96/* IFx command mask */
97#define IF_COMM_WR BIT(7)
98#define IF_COMM_MASK BIT(6)
99#define IF_COMM_ARB BIT(5)
100#define IF_COMM_CONTROL BIT(4)
101#define IF_COMM_CLR_INT_PND BIT(3)
102#define IF_COMM_TXRQST BIT(2)
103#define IF_COMM_DATAA BIT(1)
104#define IF_COMM_DATAB BIT(0)
105#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
106 IF_COMM_CONTROL | IF_COMM_TXRQST | \
107 IF_COMM_DATAA | IF_COMM_DATAB)
108
109/* IFx arbitration */
110#define IF_ARB_MSGVAL BIT(15)
111#define IF_ARB_MSGXTD BIT(14)
112#define IF_ARB_TRANSMIT BIT(13)
113
114/* IFx message control */
115#define IF_MCONT_NEWDAT BIT(15)
116#define IF_MCONT_MSGLST BIT(14)
117#define IF_MCONT_CLR_MSGLST (0 << 14)
118#define IF_MCONT_INTPND BIT(13)
119#define IF_MCONT_UMASK BIT(12)
120#define IF_MCONT_TXIE BIT(11)
121#define IF_MCONT_RXIE BIT(10)
122#define IF_MCONT_RMTEN BIT(9)
123#define IF_MCONT_TXRQST BIT(8)
124#define IF_MCONT_EOB BIT(7)
125#define IF_MCONT_DLC_MASK 0xf
126
127/*
128 * IFx register masks:
129 * allow easy operation on 16-bit registers when the
130 * argument is 32-bit instead
131 */
132#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
133#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
134
135/* message object split */
136#define C_CAN_NO_OF_OBJECTS 32
137#define C_CAN_MSG_OBJ_RX_NUM 16
138#define C_CAN_MSG_OBJ_TX_NUM 16
139
140#define C_CAN_MSG_OBJ_RX_FIRST 1
141#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
142 C_CAN_MSG_OBJ_RX_NUM - 1)
143
144#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
145#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
146 C_CAN_MSG_OBJ_TX_NUM - 1)
147
148#define C_CAN_MSG_OBJ_RX_SPLIT 9
149#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
150
151#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
152#define RECEIVE_OBJECT_BITS 0x0000ffff
153
154/* status interrupt */
155#define STATUS_INTERRUPT 0x8000
156
157/* global interrupt masks */
158#define ENABLE_ALL_INTERRUPTS 1
159#define DISABLE_ALL_INTERRUPTS 0
160
161/* minimum timeout for checking BUSY status */
162#define MIN_TIMEOUT_VALUE 6
163
164/* napi related */
165#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
166
167/* c_can lec values */
168enum c_can_lec_type {
169 LEC_NO_ERROR = 0,
170 LEC_STUFF_ERROR,
171 LEC_FORM_ERROR,
172 LEC_ACK_ERROR,
173 LEC_BIT1_ERROR,
174 LEC_BIT0_ERROR,
175 LEC_CRC_ERROR,
176 LEC_UNUSED,
177};
178
179/*
180 * c_can error types:
181 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
182 */
183enum c_can_bus_error_types {
184 C_CAN_NO_ERROR = 0,
185 C_CAN_BUS_OFF,
186 C_CAN_ERROR_WARNING,
187 C_CAN_ERROR_PASSIVE,
188};
189
190static struct can_bittiming_const c_can_bittiming_const = {
191 .name = KBUILD_MODNAME,
192 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
193 .tseg1_max = 16,
194 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
195 .tseg2_max = 8,
196 .sjw_max = 4,
197 .brp_min = 1,
198 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
199 .brp_inc = 1,
200};
201
202static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
203{
204 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
205 C_CAN_MSG_OBJ_TX_FIRST;
206}
207
208static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
209{
210 return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
211 C_CAN_MSG_OBJ_TX_FIRST;
212}
213
214static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
215{
216 u32 val = priv->read_reg(priv, reg);
217 val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
218 return val;
219}
220
221static void c_can_enable_all_interrupts(struct c_can_priv *priv,
222 int enable)
223{
224 unsigned int cntrl_save = priv->read_reg(priv,
225 &priv->regs->control);
226
227 if (enable)
228 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
229 else
230 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
231
232 priv->write_reg(priv, &priv->regs->control, cntrl_save);
233}
234
235static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
236{
237 int count = MIN_TIMEOUT_VALUE;
238
239 while (count && priv->read_reg(priv,
240 &priv->regs->ifregs[iface].com_req) &
241 IF_COMR_BUSY) {
242 count--;
243 udelay(1);
244 }
245
246 if (!count)
247 return 1;
248
249 return 0;
250}
251
252static inline void c_can_object_get(struct net_device *dev,
253 int iface, int objno, int mask)
254{
255 struct c_can_priv *priv = netdev_priv(dev);
256
257 /*
258 * As per specs, after writting the message object number in the
259 * IF command request register the transfer b/w interface
260 * register and message RAM must be complete in 6 CAN-CLK
261 * period.
262 */
263 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
264 IFX_WRITE_LOW_16BIT(mask));
265 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
266 IFX_WRITE_LOW_16BIT(objno));
267
268 if (c_can_msg_obj_is_busy(priv, iface))
269 netdev_err(dev, "timed out in object get\n");
270}
271
272static inline void c_can_object_put(struct net_device *dev,
273 int iface, int objno, int mask)
274{
275 struct c_can_priv *priv = netdev_priv(dev);
276
277 /*
278 * As per specs, after writting the message object number in the
279 * IF command request register the transfer b/w interface
280 * register and message RAM must be complete in 6 CAN-CLK
281 * period.
282 */
283 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
284 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
285 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
286 IFX_WRITE_LOW_16BIT(objno));
287
288 if (c_can_msg_obj_is_busy(priv, iface))
289 netdev_err(dev, "timed out in object put\n");
290}
291
292static void c_can_write_msg_object(struct net_device *dev,
293 int iface, struct can_frame *frame, int objno)
294{
295 int i;
296 u16 flags = 0;
297 unsigned int id;
298 struct c_can_priv *priv = netdev_priv(dev);
299
300 if (!(frame->can_id & CAN_RTR_FLAG))
301 flags |= IF_ARB_TRANSMIT;
302
303 if (frame->can_id & CAN_EFF_FLAG) {
304 id = frame->can_id & CAN_EFF_MASK;
305 flags |= IF_ARB_MSGXTD;
306 } else
307 id = ((frame->can_id & CAN_SFF_MASK) << 18);
308
309 flags |= IF_ARB_MSGVAL;
310
311 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
312 IFX_WRITE_LOW_16BIT(id));
313 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
314 IFX_WRITE_HIGH_16BIT(id));
315
316 for (i = 0; i < frame->can_dlc; i += 2) {
317 priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
318 frame->data[i] | (frame->data[i + 1] << 8));
319 }
320
321 /* enable interrupt for this message object */
322 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
323 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
324 frame->can_dlc);
325 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
326}
327
328static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
329 int iface, int ctrl_mask,
330 int obj)
331{
332 struct c_can_priv *priv = netdev_priv(dev);
333
334 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
335 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
336 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
337
338}
339
340static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
341 int iface,
342 int ctrl_mask)
343{
344 int i;
345 struct c_can_priv *priv = netdev_priv(dev);
346
347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
348 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
349 ctrl_mask & ~(IF_MCONT_MSGLST |
350 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
351 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
352 }
353}
354
355static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
356 int iface, int ctrl_mask,
357 int obj)
358{
359 struct c_can_priv *priv = netdev_priv(dev);
360
361 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
362 ctrl_mask & ~(IF_MCONT_MSGLST |
363 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
364 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
365}
366
367static void c_can_handle_lost_msg_obj(struct net_device *dev,
368 int iface, int objno)
369{
370 struct c_can_priv *priv = netdev_priv(dev);
371 struct net_device_stats *stats = &dev->stats;
372 struct sk_buff *skb;
373 struct can_frame *frame;
374
375 netdev_err(dev, "msg lost in buffer %d\n", objno);
376
377 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
378
379 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
380 IF_MCONT_CLR_MSGLST);
381
382 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
383
384 /* create an error msg */
385 skb = alloc_can_err_skb(dev, &frame);
386 if (unlikely(!skb))
387 return;
388
389 frame->can_id |= CAN_ERR_CRTL;
390 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
391 stats->rx_errors++;
392 stats->rx_over_errors++;
393
394 netif_receive_skb(skb);
395}
396
397static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
398{
399 u16 flags, data;
400 int i;
401 unsigned int val;
402 struct c_can_priv *priv = netdev_priv(dev);
403 struct net_device_stats *stats = &dev->stats;
404 struct sk_buff *skb;
405 struct can_frame *frame;
406
407 skb = alloc_can_skb(dev, &frame);
408 if (!skb) {
409 stats->rx_dropped++;
410 return -ENOMEM;
411 }
412
413 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
414
415 flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
416 val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
417 (flags << 16);
418
419 if (flags & IF_ARB_MSGXTD)
420 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
421 else
422 frame->can_id = (val >> 18) & CAN_SFF_MASK;
423
424 if (flags & IF_ARB_TRANSMIT)
425 frame->can_id |= CAN_RTR_FLAG;
426 else {
427 for (i = 0; i < frame->can_dlc; i += 2) {
428 data = priv->read_reg(priv,
429 &priv->regs->ifregs[iface].data[i / 2]);
430 frame->data[i] = data;
431 frame->data[i + 1] = data >> 8;
432 }
433 }
434
435 netif_receive_skb(skb);
436
437 stats->rx_packets++;
438 stats->rx_bytes += frame->can_dlc;
439
440 return 0;
441}
442
443static void c_can_setup_receive_object(struct net_device *dev, int iface,
444 int objno, unsigned int mask,
445 unsigned int id, unsigned int mcont)
446{
447 struct c_can_priv *priv = netdev_priv(dev);
448
449 priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
450 IFX_WRITE_LOW_16BIT(mask));
451 priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
452 IFX_WRITE_HIGH_16BIT(mask));
453
454 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
455 IFX_WRITE_LOW_16BIT(id));
456 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458
459 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, &priv->regs->msgval1));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
471 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
472 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, &priv->regs->msgval1));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492}
493
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev)
496{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data;
500
501 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK;
503
504 msg_obj_no = get_tx_next_msg_obj(priv);
505
506 /* prepare message object for transmission */
507 c_can_write_msg_object(dev, 0, frame, msg_obj_no);
508 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
509
510 /*
511 * we have to stop the queue in case of a wrap around or
512 * if the next TX message object is still in use
513 */
514 priv->tx_next++;
515 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
516 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
517 netif_stop_queue(dev);
518
519 return NETDEV_TX_OK;
520}
521
522static int c_can_set_bittiming(struct net_device *dev)
523{
524 unsigned int reg_btr, reg_brpe, ctrl_save;
525 u8 brp, brpe, sjw, tseg1, tseg2;
526 u32 ten_bit_brp;
527 struct c_can_priv *priv = netdev_priv(dev);
528 const struct can_bittiming *bt = &priv->can.bittiming;
529
530 /* c_can provides a 6-bit brp and 4-bit brpe fields */
531 ten_bit_brp = bt->brp - 1;
532 brp = ten_bit_brp & BTR_BRP_MASK;
533 brpe = ten_bit_brp >> 6;
534
535 sjw = bt->sjw - 1;
536 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
537 tseg2 = bt->phase_seg2 - 1;
538 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
539 (tseg2 << BTR_TSEG2_SHIFT);
540 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
541
542 netdev_info(dev,
543 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
544
545 ctrl_save = priv->read_reg(priv, &priv->regs->control);
546 priv->write_reg(priv, &priv->regs->control,
547 ctrl_save | CONTROL_CCE | CONTROL_INIT);
548 priv->write_reg(priv, &priv->regs->btr, reg_btr);
549 priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
550 priv->write_reg(priv, &priv->regs->control, ctrl_save);
551
552 return 0;
553}
554
555/*
556 * Configure C_CAN message objects for Tx and Rx purposes:
557 * C_CAN provides a total of 32 message objects that can be configured
558 * either for Tx or Rx purposes. Here the first 16 message objects are used as
559 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
560 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
561 * See user guide document for further details on configuring message
562 * objects.
563 */
564static void c_can_configure_msg_objects(struct net_device *dev)
565{
566 int i;
567
568 /* first invalidate all message objects */
569 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
570 c_can_inval_msg_object(dev, 0, i);
571
572 /* setup receive message objects */
573 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
574 c_can_setup_receive_object(dev, 0, i, 0, 0,
575 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
576
577 c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
578 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
579}
580
581/*
582 * Configure C_CAN chip:
583 * - enable/disable auto-retransmission
584 * - set operating mode
585 * - configure message objects
586 */
587static void c_can_chip_config(struct net_device *dev)
588{
589 struct c_can_priv *priv = netdev_priv(dev);
590
591 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
592 /* disable automatic retransmission */
593 priv->write_reg(priv, &priv->regs->control,
594 CONTROL_DISABLE_AR);
595 else
596 /* enable automatic retransmission */
597 priv->write_reg(priv, &priv->regs->control,
598 CONTROL_ENABLE_AR);
599
600 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
601 CAN_CTRLMODE_LOOPBACK)) {
602 /* loopback + silent mode : useful for hot self-test */
603 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
604 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
605 priv->write_reg(priv, &priv->regs->test,
606 TEST_LBACK | TEST_SILENT);
607 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
608 /* loopback mode : useful for self-test function */
609 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
610 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
611 priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
612 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
613 /* silent mode : bus-monitoring mode */
614 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
615 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
616 priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
617 } else
618 /* normal mode*/
619 priv->write_reg(priv, &priv->regs->control,
620 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
621
622 /* configure message objects */
623 c_can_configure_msg_objects(dev);
624
625 /* set a `lec` value so that we can check for updates later */
626 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
627
628 /* set bittiming params */
629 c_can_set_bittiming(dev);
630}
631
632static void c_can_start(struct net_device *dev)
633{
634 struct c_can_priv *priv = netdev_priv(dev);
635
636 /* enable status change, error and module interrupts */
637 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
638
639 /* basic c_can configuration */
640 c_can_chip_config(dev);
641
642 priv->can.state = CAN_STATE_ERROR_ACTIVE;
643
644 /* reset tx helper pointers */
645 priv->tx_next = priv->tx_echo = 0;
646}
647
648static void c_can_stop(struct net_device *dev)
649{
650 struct c_can_priv *priv = netdev_priv(dev);
651
652 /* disable all interrupts */
653 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
654
655 /* set the state as STOPPED */
656 priv->can.state = CAN_STATE_STOPPED;
657}
658
659static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
660{
661 switch (mode) {
662 case CAN_MODE_START:
663 c_can_start(dev);
664 netif_wake_queue(dev);
665 break;
666 default:
667 return -EOPNOTSUPP;
668 }
669
670 return 0;
671}
672
673static int c_can_get_berr_counter(const struct net_device *dev,
674 struct can_berr_counter *bec)
675{
676 unsigned int reg_err_counter;
677 struct c_can_priv *priv = netdev_priv(dev);
678
679 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
680 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
681 ERR_CNT_REC_SHIFT;
682 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
683
684 return 0;
685}
686
687/*
688 * theory of operation:
689 *
690 * priv->tx_echo holds the number of the oldest can_frame put for
691 * transmission into the hardware, but not yet ACKed by the CAN tx
692 * complete IRQ.
693 *
694 * We iterate from priv->tx_echo to priv->tx_next and check if the
695 * packet has been transmitted, echo it back to the CAN framework.
696 * If we discover a not yet transmitted package, stop looking for more.
697 */
698static void c_can_do_tx(struct net_device *dev)
699{
700 u32 val;
701 u32 msg_obj_no;
702 struct c_can_priv *priv = netdev_priv(dev);
703 struct net_device_stats *stats = &dev->stats;
704
705 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
706 msg_obj_no = get_tx_echo_msg_obj(priv);
707 c_can_inval_msg_object(dev, 0, msg_obj_no);
708 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
709 if (!(val & (1 << msg_obj_no))) {
710 can_get_echo_skb(dev,
711 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
712 stats->tx_bytes += priv->read_reg(priv,
713 &priv->regs->ifregs[0].msg_cntrl)
714 & IF_MCONT_DLC_MASK;
715 stats->tx_packets++;
716 }
717 }
718
719 /* restart queue if wrap-up or if queue stalled on last pkt */
720 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
721 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
722 netif_wake_queue(dev);
723}
724
725/*
726 * theory of operation:
727 *
728 * c_can core saves a received CAN message into the first free message
729 * object it finds free (starting with the lowest). Bits NEWDAT and
730 * INTPND are set for this message object indicating that a new message
731 * has arrived. To work-around this issue, we keep two groups of message
732 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
733 *
734 * To ensure in-order frame reception we use the following
735 * approach while re-activating a message object to receive further
736 * frames:
737 * - if the current message object number is lower than
738 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
739 * the INTPND bit.
740 * - if the current message object number is equal to
741 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
742 * receive message objects.
743 * - if the current message object number is greater than
744 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
745 * only this message object.
746 */
747static int c_can_do_rx_poll(struct net_device *dev, int quota)
748{
749 u32 num_rx_pkts = 0;
750 unsigned int msg_obj, msg_ctrl_save;
751 struct c_can_priv *priv = netdev_priv(dev);
752 u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
753
754 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
755 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
756 val = c_can_read_reg32(priv, &priv->regs->intpnd1),
757 msg_obj++) {
758 /*
759 * as interrupt pending register's bit n-1 corresponds to
760 * message object n, we need to handle the same properly.
761 */
762 if (val & (1 << (msg_obj - 1))) {
763 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
764 ~IF_COMM_TXRQST);
765 msg_ctrl_save = priv->read_reg(priv,
766 &priv->regs->ifregs[0].msg_cntrl);
767
768 if (msg_ctrl_save & IF_MCONT_EOB)
769 return num_rx_pkts;
770
771 if (msg_ctrl_save & IF_MCONT_MSGLST) {
772 c_can_handle_lost_msg_obj(dev, 0, msg_obj);
773 num_rx_pkts++;
774 quota--;
775 continue;
776 }
777
778 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
779 continue;
780
781 /* read the data from the message object */
782 c_can_read_msg_object(dev, 0, msg_ctrl_save);
783
784 if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
785 c_can_mark_rx_msg_obj(dev, 0,
786 msg_ctrl_save, msg_obj);
787 else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
788 /* activate this msg obj */
789 c_can_activate_rx_msg_obj(dev, 0,
790 msg_ctrl_save, msg_obj);
791 else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
792 /* activate all lower message objects */
793 c_can_activate_all_lower_rx_msg_obj(dev,
794 0, msg_ctrl_save);
795
796 num_rx_pkts++;
797 quota--;
798 }
799 }
800
801 return num_rx_pkts;
802}
803
804static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
805{
806 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
807 (priv->current_status & LEC_UNUSED);
808}
809
810static int c_can_handle_state_change(struct net_device *dev,
811 enum c_can_bus_error_types error_type)
812{
813 unsigned int reg_err_counter;
814 unsigned int rx_err_passive;
815 struct c_can_priv *priv = netdev_priv(dev);
816 struct net_device_stats *stats = &dev->stats;
817 struct can_frame *cf;
818 struct sk_buff *skb;
819 struct can_berr_counter bec;
820
821 /* propogate the error condition to the CAN stack */
822 skb = alloc_can_err_skb(dev, &cf);
823 if (unlikely(!skb))
824 return 0;
825
826 c_can_get_berr_counter(dev, &bec);
827 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
828 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
829 ERR_CNT_RP_SHIFT;
830
831 switch (error_type) {
832 case C_CAN_ERROR_WARNING:
833 /* error warning state */
834 priv->can.can_stats.error_warning++;
835 priv->can.state = CAN_STATE_ERROR_WARNING;
836 cf->can_id |= CAN_ERR_CRTL;
837 cf->data[1] = (bec.txerr > bec.rxerr) ?
838 CAN_ERR_CRTL_TX_WARNING :
839 CAN_ERR_CRTL_RX_WARNING;
840 cf->data[6] = bec.txerr;
841 cf->data[7] = bec.rxerr;
842
843 break;
844 case C_CAN_ERROR_PASSIVE:
845 /* error passive state */
846 priv->can.can_stats.error_passive++;
847 priv->can.state = CAN_STATE_ERROR_PASSIVE;
848 cf->can_id |= CAN_ERR_CRTL;
849 if (rx_err_passive)
850 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
851 if (bec.txerr > 127)
852 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
853
854 cf->data[6] = bec.txerr;
855 cf->data[7] = bec.rxerr;
856 break;
857 case C_CAN_BUS_OFF:
858 /* bus-off state */
859 priv->can.state = CAN_STATE_BUS_OFF;
860 cf->can_id |= CAN_ERR_BUSOFF;
861 /*
862 * disable all interrupts in bus-off mode to ensure that
863 * the CPU is not hogged down
864 */
865 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
866 can_bus_off(dev);
867 break;
868 default:
869 break;
870 }
871
872 netif_receive_skb(skb);
873 stats->rx_packets++;
874 stats->rx_bytes += cf->can_dlc;
875
876 return 1;
877}
878
879static int c_can_handle_bus_err(struct net_device *dev,
880 enum c_can_lec_type lec_type)
881{
882 struct c_can_priv *priv = netdev_priv(dev);
883 struct net_device_stats *stats = &dev->stats;
884 struct can_frame *cf;
885 struct sk_buff *skb;
886
887 /*
888 * early exit if no lec update or no error.
889 * no lec update means that no CAN bus event has been detected
890 * since CPU wrote 0x7 value to status reg.
891 */
892 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
893 return 0;
894
895 /* propogate the error condition to the CAN stack */
896 skb = alloc_can_err_skb(dev, &cf);
897 if (unlikely(!skb))
898 return 0;
899
900 /*
901 * check for 'last error code' which tells us the
902 * type of the last error to occur on the CAN bus
903 */
904
905 /* common for all type of bus errors */
906 priv->can.can_stats.bus_error++;
907 stats->rx_errors++;
908 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
909 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
910
911 switch (lec_type) {
912 case LEC_STUFF_ERROR:
913 netdev_dbg(dev, "stuff error\n");
914 cf->data[2] |= CAN_ERR_PROT_STUFF;
915 break;
916 case LEC_FORM_ERROR:
917 netdev_dbg(dev, "form error\n");
918 cf->data[2] |= CAN_ERR_PROT_FORM;
919 break;
920 case LEC_ACK_ERROR:
921 netdev_dbg(dev, "ack error\n");
922 cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
923 CAN_ERR_PROT_LOC_ACK_DEL);
924 break;
925 case LEC_BIT1_ERROR:
926 netdev_dbg(dev, "bit1 error\n");
927 cf->data[2] |= CAN_ERR_PROT_BIT1;
928 break;
929 case LEC_BIT0_ERROR:
930 netdev_dbg(dev, "bit0 error\n");
931 cf->data[2] |= CAN_ERR_PROT_BIT0;
932 break;
933 case LEC_CRC_ERROR:
934 netdev_dbg(dev, "CRC error\n");
935 cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
936 CAN_ERR_PROT_LOC_CRC_DEL);
937 break;
938 default:
939 break;
940 }
941
942 /* set a `lec` value so that we can check for updates later */
943 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
944
945 netif_receive_skb(skb);
946 stats->rx_packets++;
947 stats->rx_bytes += cf->can_dlc;
948
949 return 1;
950}
951
952static int c_can_poll(struct napi_struct *napi, int quota)
953{
954 u16 irqstatus;
955 int lec_type = 0;
956 int work_done = 0;
957 struct net_device *dev = napi->dev;
958 struct c_can_priv *priv = netdev_priv(dev);
959
960 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
961 if (!irqstatus)
962 goto end;
963
964 /* status events have the highest priority */
965 if (irqstatus == STATUS_INTERRUPT) {
966 priv->current_status = priv->read_reg(priv,
967 &priv->regs->status);
968
969 /* handle Tx/Rx events */
970 if (priv->current_status & STATUS_TXOK)
971 priv->write_reg(priv, &priv->regs->status,
972 priv->current_status & ~STATUS_TXOK);
973
974 if (priv->current_status & STATUS_RXOK)
975 priv->write_reg(priv, &priv->regs->status,
976 priv->current_status & ~STATUS_RXOK);
977
978 /* handle state changes */
979 if ((priv->current_status & STATUS_EWARN) &&
980 (!(priv->last_status & STATUS_EWARN))) {
981 netdev_dbg(dev, "entered error warning state\n");
982 work_done += c_can_handle_state_change(dev,
983 C_CAN_ERROR_WARNING);
984 }
985 if ((priv->current_status & STATUS_EPASS) &&
986 (!(priv->last_status & STATUS_EPASS))) {
987 netdev_dbg(dev, "entered error passive state\n");
988 work_done += c_can_handle_state_change(dev,
989 C_CAN_ERROR_PASSIVE);
990 }
991 if ((priv->current_status & STATUS_BOFF) &&
992 (!(priv->last_status & STATUS_BOFF))) {
993 netdev_dbg(dev, "entered bus off state\n");
994 work_done += c_can_handle_state_change(dev,
995 C_CAN_BUS_OFF);
996 }
997
998 /* handle bus recovery events */
999 if ((!(priv->current_status & STATUS_BOFF)) &&
1000 (priv->last_status & STATUS_BOFF)) {
1001 netdev_dbg(dev, "left bus off state\n");
1002 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1003 }
1004 if ((!(priv->current_status & STATUS_EPASS)) &&
1005 (priv->last_status & STATUS_EPASS)) {
1006 netdev_dbg(dev, "left error passive state\n");
1007 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1008 }
1009
1010 priv->last_status = priv->current_status;
1011
1012 /* handle lec errors on the bus */
1013 lec_type = c_can_has_and_handle_berr(priv);
1014 if (lec_type)
1015 work_done += c_can_handle_bus_err(dev, lec_type);
1016 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1017 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1018 /* handle events corresponding to receive message objects */
1019 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1020 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1021 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1022 /* handle events corresponding to transmit message objects */
1023 c_can_do_tx(dev);
1024 }
1025
1026end:
1027 if (work_done < quota) {
1028 napi_complete(napi);
1029 /* enable all IRQs */
1030 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1031 }
1032
1033 return work_done;
1034}
1035
1036static irqreturn_t c_can_isr(int irq, void *dev_id)
1037{
1038 u16 irqstatus;
1039 struct net_device *dev = (struct net_device *)dev_id;
1040 struct c_can_priv *priv = netdev_priv(dev);
1041
1042 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1043 if (!irqstatus)
1044 return IRQ_NONE;
1045
1046 /* disable all interrupts and schedule the NAPI */
1047 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1048 napi_schedule(&priv->napi);
1049
1050 return IRQ_HANDLED;
1051}
1052
1053static int c_can_open(struct net_device *dev)
1054{
1055 int err;
1056 struct c_can_priv *priv = netdev_priv(dev);
1057
1058 /* open the can device */
1059 err = open_candev(dev);
1060 if (err) {
1061 netdev_err(dev, "failed to open can device\n");
1062 return err;
1063 }
1064
1065 /* register interrupt handler */
1066 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1067 dev);
1068 if (err < 0) {
1069 netdev_err(dev, "failed to request interrupt\n");
1070 goto exit_irq_fail;
1071 }
1072
1073 /* start the c_can controller */
1074 c_can_start(dev);
1075
1076 napi_enable(&priv->napi);
1077 netif_start_queue(dev);
1078
1079 return 0;
1080
1081exit_irq_fail:
1082 close_candev(dev);
1083 return err;
1084}
1085
1086static int c_can_close(struct net_device *dev)
1087{
1088 struct c_can_priv *priv = netdev_priv(dev);
1089
1090 netif_stop_queue(dev);
1091 napi_disable(&priv->napi);
1092 c_can_stop(dev);
1093 free_irq(dev->irq, dev);
1094 close_candev(dev);
1095
1096 return 0;
1097}
1098
1099struct net_device *alloc_c_can_dev(void)
1100{
1101 struct net_device *dev;
1102 struct c_can_priv *priv;
1103
1104 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1105 if (!dev)
1106 return NULL;
1107
1108 priv = netdev_priv(dev);
1109 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1110
1111 priv->dev = dev;
1112 priv->can.bittiming_const = &c_can_bittiming_const;
1113 priv->can.do_set_mode = c_can_set_mode;
1114 priv->can.do_get_berr_counter = c_can_get_berr_counter;
1115 priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
1116 CAN_CTRLMODE_LOOPBACK |
1117 CAN_CTRLMODE_LISTENONLY |
1118 CAN_CTRLMODE_BERR_REPORTING;
1119
1120 return dev;
1121}
1122EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1123
1124void free_c_can_dev(struct net_device *dev)
1125{
1126 free_candev(dev);
1127}
1128EXPORT_SYMBOL_GPL(free_c_can_dev);
1129
1130static const struct net_device_ops c_can_netdev_ops = {
1131 .ndo_open = c_can_open,
1132 .ndo_stop = c_can_close,
1133 .ndo_start_xmit = c_can_start_xmit,
1134};
1135
1136int register_c_can_dev(struct net_device *dev)
1137{
1138 dev->flags |= IFF_ECHO; /* we support local echo */
1139 dev->netdev_ops = &c_can_netdev_ops;
1140
1141 return register_candev(dev);
1142}
1143EXPORT_SYMBOL_GPL(register_c_can_dev);
1144
1145void unregister_c_can_dev(struct net_device *dev)
1146{
1147 struct c_can_priv *priv = netdev_priv(dev);
1148
1149 /* disable all interrupts */
1150 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1151
1152 unregister_candev(dev);
1153}
1154EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1155
1156MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1157MODULE_LICENSE("GPL v2");
1158MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 000000000000..9b7fbef3d09a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#ifndef C_CAN_H
23#define C_CAN_H
24
25/* c_can IF registers */
26struct c_can_if_regs {
27 u16 com_req;
28 u16 com_mask;
29 u16 mask1;
30 u16 mask2;
31 u16 arb1;
32 u16 arb2;
33 u16 msg_cntrl;
34 u16 data[4];
35 u16 _reserved[13];
36};
37
38/* c_can hardware registers */
39struct c_can_regs {
40 u16 control;
41 u16 status;
42 u16 err_cnt;
43 u16 btr;
44 u16 interrupt;
45 u16 test;
46 u16 brp_ext;
47 u16 _reserved1;
48 struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
49 u16 _reserved2[8];
50 u16 txrqst1;
51 u16 txrqst2;
52 u16 _reserved3[6];
53 u16 newdat1;
54 u16 newdat2;
55 u16 _reserved4[6];
56 u16 intpnd1;
57 u16 intpnd2;
58 u16 _reserved5[6];
59 u16 msgval1;
60 u16 msgval2;
61 u16 _reserved6[6];
62};
63
64/* c_can private data structure */
65struct c_can_priv {
66 struct can_priv can; /* must be the first member */
67 struct napi_struct napi;
68 struct net_device *dev;
69 int tx_object;
70 int current_status;
71 int last_status;
72 u16 (*read_reg) (struct c_can_priv *priv, void *reg);
73 void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
74 struct c_can_regs __iomem *regs;
75 unsigned long irq_flags; /* for request_irq() */
76 unsigned int tx_next;
77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */
79};
80
81struct net_device *alloc_c_can_dev(void);
82void free_c_can_dev(struct net_device *dev);
83int register_c_can_dev(struct net_device *dev);
84void unregister_c_can_dev(struct net_device *dev);
85
86#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 000000000000..e629b961ae2d
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
1/*
2 * Platform CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#include <linux/kernel.h>
23#include <linux/version.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/delay.h>
32#include <linux/io.h>
33#include <linux/platform_device.h>
34#include <linux/clk.h>
35
36#include <linux/can/dev.h>
37
38#include "c_can.h"
39
40/*
41 * 16-bit c_can registers can be arranged differently in the memory
42 * architecture of different implementations. For example: 16-bit
43 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
44 * Handle the same by providing a common read/write interface.
45 */
46static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
47 void *reg)
48{
49 return readw(reg);
50}
51
52static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
53 void *reg, u16 val)
54{
55 writew(val, reg);
56}
57
58static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
59 void *reg)
60{
61 return readw(reg + (long)reg - (long)priv->regs);
62}
63
64static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
65 void *reg, u16 val)
66{
67 writew(val, reg + (long)reg - (long)priv->regs);
68}
69
70static int __devinit c_can_plat_probe(struct platform_device *pdev)
71{
72 int ret;
73 void __iomem *addr;
74 struct net_device *dev;
75 struct c_can_priv *priv;
76 struct resource *mem, *irq;
77#ifdef CONFIG_HAVE_CLK
78 struct clk *clk;
79
80 /* get the appropriate clk */
81 clk = clk_get(&pdev->dev, NULL);
82 if (IS_ERR(clk)) {
83 dev_err(&pdev->dev, "no clock defined\n");
84 ret = -ENODEV;
85 goto exit;
86 }
87#endif
88
89 /* get the platform data */
90 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
91 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
92 if (!mem || (irq <= 0)) {
93 ret = -ENODEV;
94 goto exit_free_clk;
95 }
96
97 if (!request_mem_region(mem->start, resource_size(mem),
98 KBUILD_MODNAME)) {
99 dev_err(&pdev->dev, "resource unavailable\n");
100 ret = -ENODEV;
101 goto exit_free_clk;
102 }
103
104 addr = ioremap(mem->start, resource_size(mem));
105 if (!addr) {
106 dev_err(&pdev->dev, "failed to map can port\n");
107 ret = -ENOMEM;
108 goto exit_release_mem;
109 }
110
111 /* allocate the c_can device */
112 dev = alloc_c_can_dev();
113 if (!dev) {
114 ret = -ENOMEM;
115 goto exit_iounmap;
116 }
117
118 priv = netdev_priv(dev);
119
120 dev->irq = irq->start;
121 priv->regs = addr;
122#ifdef CONFIG_HAVE_CLK
123 priv->can.clock.freq = clk_get_rate(clk);
124 priv->priv = clk;
125#endif
126
127 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
128 case IORESOURCE_MEM_32BIT:
129 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
130 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
131 break;
132 case IORESOURCE_MEM_16BIT:
133 default:
134 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
135 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
136 break;
137 }
138
139 platform_set_drvdata(pdev, dev);
140 SET_NETDEV_DEV(dev, &pdev->dev);
141
142 ret = register_c_can_dev(dev);
143 if (ret) {
144 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
145 KBUILD_MODNAME, ret);
146 goto exit_free_device;
147 }
148
149 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
150 KBUILD_MODNAME, priv->regs, dev->irq);
151 return 0;
152
153exit_free_device:
154 platform_set_drvdata(pdev, NULL);
155 free_c_can_dev(dev);
156exit_iounmap:
157 iounmap(addr);
158exit_release_mem:
159 release_mem_region(mem->start, resource_size(mem));
160exit_free_clk:
161#ifdef CONFIG_HAVE_CLK
162 clk_put(clk);
163exit:
164#endif
165 dev_err(&pdev->dev, "probe failed\n");
166
167 return ret;
168}
169
170static int __devexit c_can_plat_remove(struct platform_device *pdev)
171{
172 struct net_device *dev = platform_get_drvdata(pdev);
173 struct c_can_priv *priv = netdev_priv(dev);
174 struct resource *mem;
175
176 unregister_c_can_dev(dev);
177 platform_set_drvdata(pdev, NULL);
178
179 free_c_can_dev(dev);
180 iounmap(priv->regs);
181
182 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183 release_mem_region(mem->start, resource_size(mem));
184
185#ifdef CONFIG_HAVE_CLK
186 clk_put(priv->priv);
187#endif
188
189 return 0;
190}
191
192static struct platform_driver c_can_plat_driver = {
193 .driver = {
194 .name = KBUILD_MODNAME,
195 .owner = THIS_MODULE,
196 },
197 .probe = c_can_plat_probe,
198 .remove = __devexit_p(c_can_plat_remove),
199};
200
201static int __init c_can_plat_init(void)
202{
203 return platform_driver_register(&c_can_plat_driver);
204}
205module_init(c_can_plat_init);
206
207static void __exit c_can_plat_exit(void)
208{
209 platform_driver_unregister(&c_can_plat_driver);
210}
211module_exit(c_can_plat_exit);
212
213MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
214MODULE_LICENSE("GPL v2");
215MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a5a739..366f5cc050ae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
1618 return count; 1618 return count;
1619} 1619}
1620 1620
1621static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, 1621static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
1622 ican3_sysfs_set_term); 1622 ican3_sysfs_set_term);
1623 1623
1624static struct attribute *ican3_sysfs_attrs[] = { 1624static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
940 goto open_unlock; 940 goto open_unlock;
941 } 941 }
942 942
943 priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 priv->wq = create_freezable_workqueue("mcp251x_wq");
944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
946 946
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25e..d38706958af6 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
1config CAN_MSCAN 1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU) 2 depends on CAN_DEV && (PPC || M68K)
3 tristate "Support for Freescale MSCAN based chips" 3 tristate "Support for Freescale MSCAN based chips"
4 ---help--- 4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition 5 The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e97268248..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
185 185
186static struct can_bittiming_const pch_can_bittiming_const = { 186static struct can_bittiming_const pch_can_bittiming_const = {
187 .name = KBUILD_MODNAME, 187 .name = KBUILD_MODNAME,
188 .tseg1_min = 1, 188 .tseg1_min = 2,
189 .tseg1_max = 16, 189 .tseg1_max = 16,
190 .tseg2_min = 1, 190 .tseg2_min = 1,
191 .tseg2_max = 8, 191 .tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
959 struct pch_can_priv *priv = netdev_priv(ndev); 959 struct pch_can_priv *priv = netdev_priv(ndev);
960 960
961 unregister_candev(priv->ndev); 961 unregister_candev(priv->ndev);
962 pci_iounmap(pdev, priv->regs);
963 if (priv->use_msi) 962 if (priv->use_msi)
964 pci_disable_msi(priv->dev); 963 pci_disable_msi(priv->dev);
965 pci_release_regions(pdev); 964 pci_release_regions(pdev);
966 pci_disable_device(pdev); 965 pci_disable_device(pdev);
967 pci_set_drvdata(pdev, NULL); 966 pci_set_drvdata(pdev, NULL);
968 pch_can_reset(priv); 967 pch_can_reset(priv);
968 pci_iounmap(pdev, priv->regs);
969 free_candev(priv->ndev); 969 free_candev(priv->ndev);
970} 970}
971 971
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1238 priv->use_msi = 0; 1238 priv->use_msi = 0;
1239 } else { 1239 } else {
1240 netdev_err(ndev, "PCH CAN opened with MSI\n"); 1240 netdev_err(ndev, "PCH CAN opened with MSI\n");
1241 pci_set_master(pdev);
1241 priv->use_msi = 1; 1242 priv->use_msi = 1;
1242 } 1243 }
1243 1244
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 000000000000..5de46a9a77bb
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
1config CAN_SOFTING
2 tristate "Softing Gmbh CAN generic support"
3 depends on CAN_DEV && HAS_IOMEM
4 ---help---
5 Support for CAN cards from Softing Gmbh & some cards
6 from Vector Gmbh.
7 Softing Gmbh CAN cards come with 1 or 2 physical busses.
8 Those cards typically use Dual Port RAM to communicate
9 with the host CPU. The interface is then identical for PCI
10 and PCMCIA cards. This driver operates on a platform device,
11 which has been created by softing_cs or softing_pci driver.
12 Warning:
13 The API of the card does not allow fine control per bus, but
14 controls the 2 busses on the card together.
15 As such, some actions (start/stop/busoff recovery) on 1 bus
16 must bring down the other bus too temporarily.
17
18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA
21 depends on CAN_SOFTING
22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh.
25 You need firmware for these, which you can get at
26 http://developer.berlios.de/projects/socketcan/
27 This version of the driver is written against
28 firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
29 In order to use the card as CAN device, you need the Softing generic
30 support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 000000000000..c5e5016c742e
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
1
2softing-y := softing_main.o softing_fw.o
3obj-$(CONFIG_CAN_SOFTING) += softing.o
4obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
5
6ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 000000000000..7ec9f4db3d52
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
1/*
2 * softing common interfaces
3 *
4 * by Kurt Van Dijck, 2008-2010
5 */
6
7#include <linux/atomic.h>
8#include <linux/netdevice.h>
9#include <linux/ktime.h>
10#include <linux/mutex.h>
11#include <linux/spinlock.h>
12#include <linux/can.h>
13#include <linux/can/dev.h>
14
15#include "softing_platform.h"
16
17struct softing;
18
19struct softing_priv {
20 struct can_priv can; /* must be the first member! */
21 struct net_device *netdev;
22 struct softing *card;
23 struct {
24 int pending;
25 /* variables wich hold the circular buffer */
26 int echo_put;
27 int echo_get;
28 } tx;
29 struct can_bittiming_const btr_const;
30 int index;
31 uint8_t output;
32 uint16_t chip;
33};
34#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
35
36struct softing {
37 const struct softing_platform_data *pdat;
38 struct platform_device *pdev;
39 struct net_device *net[2];
40 spinlock_t spin; /* protect this structure & DPRAM access */
41 ktime_t ts_ref;
42 ktime_t ts_overflow; /* timestamp overflow value, in ktime */
43
44 struct {
45 /* indication of firmware status */
46 int up;
47 /* protection of the 'up' variable */
48 struct mutex lock;
49 } fw;
50 struct {
51 int nr;
52 int requested;
53 int svc_count;
54 unsigned int dpram_position;
55 } irq;
56 struct {
57 int pending;
58 int last_bus;
59 /*
60 * keep the bus that last tx'd a message,
61 * in order to let every netdev queue resume
62 */
63 } tx;
64 __iomem uint8_t *dpram;
65 unsigned long dpram_phys;
66 unsigned long dpram_size;
67 struct {
68 uint16_t fw_version, hw_version, license, serial;
69 uint16_t chip[2];
70 unsigned int freq; /* remote cpu's operating frequency */
71 } id;
72};
73
74extern int softing_default_output(struct net_device *netdev);
75
76extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
77
78extern int softing_chip_poweron(struct softing *card);
79
80extern int softing_bootloader_command(struct softing *card, int16_t cmd,
81 const char *msg);
82
83/* Load firmware after reset */
84extern int softing_load_fw(const char *file, struct softing *card,
85 __iomem uint8_t *virt, unsigned int size, int offset);
86
87/* Load final application firmware after bootloader */
88extern int softing_load_app_fw(const char *file, struct softing *card);
89
90/*
91 * enable or disable irq
92 * only called with fw.lock locked
93 */
94extern int softing_enable_irq(struct softing *card, int enable);
95
96/* start/stop 1 bus on card */
97extern int softing_startstop(struct net_device *netdev, int up);
98
99/* netif_rx() */
100extern int softing_netdev_rx(struct net_device *netdev,
101 const struct can_frame *msg, ktime_t ktime);
102
103/* SOFTING DPRAM mappings */
104#define DPRAM_RX 0x0000
105 #define DPRAM_RX_SIZE 32
106 #define DPRAM_RX_CNT 16
107#define DPRAM_RX_RD 0x0201 /* uint8_t */
108#define DPRAM_RX_WR 0x0205 /* uint8_t */
109#define DPRAM_RX_LOST 0x0207 /* uint8_t */
110
111#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
112#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
113#define DPRAM_FCT_HOST 0x032b /* uint16_t */
114
115#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
116#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
117#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
118#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
119#define DPRAM_RESET 0x0341 /* uint16_t */
120#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
121#define DPRAM_RESET_TIME 0x034d /* uint16_t */
122#define DPRAM_TIME 0x0350 /* uint64_t */
123#define DPRAM_WR_START 0x0358 /* uint8_t */
124#define DPRAM_WR_END 0x0359 /* uint8_t */
125#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
126#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
127#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
128#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
129#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
130
131#define DPRAM_TX 0x0400 /* uint16_t */
132 #define DPRAM_TX_SIZE 16
133 #define DPRAM_TX_CNT 32
134#define DPRAM_TX_RD 0x0601 /* uint8_t */
135#define DPRAM_TX_WR 0x0605 /* uint8_t */
136
137#define DPRAM_COMMAND 0x07e0 /* uint16_t */
138#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
139#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
140#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
141
142#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
143#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
144
145#define TXMAX (DPRAM_TX_CNT - 1)
146
147/* DPRAM return codes */
148#define RES_NONE 0
149#define RES_OK 1
150#define RES_NOK 2
151#define RES_UNKNOWN 3
152/* DPRAM flags */
153#define CMD_TX 0x01
154#define CMD_ACK 0x02
155#define CMD_XTD 0x04
156#define CMD_RTR 0x08
157#define CMD_ERR 0x10
158#define CMD_BUS2 0x80
159
160/* returned fifo entry bus state masks */
161#define SF_MASK_BUSOFF 0x80
162#define SF_MASK_EPASSIVE 0x60
163
164/* bus states */
165#define STATE_BUSOFF 2
166#define STATE_EPASSIVE 1
167#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 000000000000..c11bb4de8630
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ds.h>
26
27#include "softing_platform.h"
28
29static int softingcs_index;
30static spinlock_t softingcs_index_lock;
31
32static int softingcs_reset(struct platform_device *pdev, int v);
33static int softingcs_enable_irq(struct platform_device *pdev, int v);
34
35/*
36 * platform_data descriptions
37 */
38#define MHZ (1000*1000)
39static const struct softing_platform_data softingcs_platform_data[] = {
40{
41 .name = "CANcard",
42 .manf = 0x0168, .prod = 0x001,
43 .generation = 1,
44 .nbus = 2,
45 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
46 .dpram_size = 0x0800,
47 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
48 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
49 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
50 .reset = softingcs_reset,
51 .enable_irq = softingcs_enable_irq,
52}, {
53 .name = "CANcard-NEC",
54 .manf = 0x0168, .prod = 0x002,
55 .generation = 1,
56 .nbus = 2,
57 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
58 .dpram_size = 0x0800,
59 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
60 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
61 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
62 .reset = softingcs_reset,
63 .enable_irq = softingcs_enable_irq,
64}, {
65 .name = "CANcard-SJA",
66 .manf = 0x0168, .prod = 0x004,
67 .generation = 1,
68 .nbus = 2,
69 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
70 .dpram_size = 0x0800,
71 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
72 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
73 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
74 .reset = softingcs_reset,
75 .enable_irq = softingcs_enable_irq,
76}, {
77 .name = "CANcard-2",
78 .manf = 0x0168, .prod = 0x005,
79 .generation = 2,
80 .nbus = 2,
81 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
82 .dpram_size = 0x1000,
83 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
84 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
85 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
86 .reset = softingcs_reset,
87 .enable_irq = NULL,
88}, {
89 .name = "Vector-CANcard",
90 .manf = 0x0168, .prod = 0x081,
91 .generation = 1,
92 .nbus = 2,
93 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
94 .dpram_size = 0x0800,
95 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
96 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
97 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
98 .reset = softingcs_reset,
99 .enable_irq = softingcs_enable_irq,
100}, {
101 .name = "Vector-CANcard-SJA",
102 .manf = 0x0168, .prod = 0x084,
103 .generation = 1,
104 .nbus = 2,
105 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
106 .dpram_size = 0x0800,
107 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
108 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
109 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
110 .reset = softingcs_reset,
111 .enable_irq = softingcs_enable_irq,
112}, {
113 .name = "Vector-CANcard-2",
114 .manf = 0x0168, .prod = 0x085,
115 .generation = 2,
116 .nbus = 2,
117 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
118 .dpram_size = 0x1000,
119 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
120 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
121 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
122 .reset = softingcs_reset,
123 .enable_irq = NULL,
124}, {
125 .name = "EDICcard-NEC",
126 .manf = 0x0168, .prod = 0x102,
127 .generation = 1,
128 .nbus = 2,
129 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
130 .dpram_size = 0x0800,
131 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
132 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
133 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
134 .reset = softingcs_reset,
135 .enable_irq = softingcs_enable_irq,
136}, {
137 .name = "EDICcard-2",
138 .manf = 0x0168, .prod = 0x105,
139 .generation = 2,
140 .nbus = 2,
141 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
142 .dpram_size = 0x1000,
143 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
144 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
145 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
146 .reset = softingcs_reset,
147 .enable_irq = NULL,
148}, {
149 0, 0,
150},
151};
152
153MODULE_FIRMWARE(fw_dir "bcard.bin");
154MODULE_FIRMWARE(fw_dir "ldcard.bin");
155MODULE_FIRMWARE(fw_dir "cancard.bin");
156MODULE_FIRMWARE(fw_dir "cansja.bin");
157
158MODULE_FIRMWARE(fw_dir "bcard2.bin");
159MODULE_FIRMWARE(fw_dir "ldcard2.bin");
160MODULE_FIRMWARE(fw_dir "cancrd2.bin");
161
162static __devinit const struct softing_platform_data
163*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
164{
165 const struct softing_platform_data *lp;
166
167 for (lp = softingcs_platform_data; lp->manf; ++lp) {
168 if ((lp->manf == manf) && (lp->prod == prod))
169 return lp;
170 }
171 return NULL;
172}
173
174/*
175 * platformdata callbacks
176 */
177static int softingcs_reset(struct platform_device *pdev, int v)
178{
179 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
180
181 dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
182 return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
183}
184
185static int softingcs_enable_irq(struct platform_device *pdev, int v)
186{
187 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
188
189 dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
190 return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
191}
192
193/*
194 * pcmcia check
195 */
196static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
197 void *priv_data)
198{
199 struct softing_platform_data *pdat = priv_data;
200 struct resource *pres;
201 int memspeed = 0;
202
203 WARN_ON(!pdat);
204 pres = pcmcia->resource[PCMCIA_IOMEM_0];
205 if (resource_size(pres) < 0x1000)
206 return -ERANGE;
207
208 pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
209 if (pdat->generation < 2) {
210 pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
211 memspeed = 3;
212 } else {
213 pres->flags |= WIN_DATA_WIDTH_16;
214 }
215 return pcmcia_request_window(pcmcia, pres, memspeed);
216}
217
218static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
219{
220 struct platform_device *pdev = pcmcia->priv;
221
222 /* free bits */
223 platform_device_unregister(pdev);
224 /* release pcmcia stuff */
225 pcmcia_disable_device(pcmcia);
226}
227
228/*
229 * platform_device wrapper
230 * pdev->resource has 2 entries: io & irq
231 */
232static void softingcs_pdev_release(struct device *dev)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 kfree(pdev);
236}
237
238static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
239{
240 int ret;
241 struct platform_device *pdev;
242 const struct softing_platform_data *pdat;
243 struct resource *pres;
244 struct dev {
245 struct platform_device pdev;
246 struct resource res[2];
247 } *dev;
248
249 /* find matching platform_data */
250 pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
251 if (!pdat)
252 return -ENOTTY;
253
254 /* setup pcmcia device */
255 pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
256 CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
257 ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
258 if (ret)
259 goto pcmcia_failed;
260
261 ret = pcmcia_enable_device(pcmcia);
262 if (ret < 0)
263 goto pcmcia_failed;
264
265 pres = pcmcia->resource[PCMCIA_IOMEM_0];
266 if (!pres) {
267 ret = -EBADF;
268 goto pcmcia_bad;
269 }
270
271 /* create softing platform device */
272 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273 if (!dev) {
274 ret = -ENOMEM;
275 goto mem_failed;
276 }
277 dev->pdev.resource = dev->res;
278 dev->pdev.num_resources = ARRAY_SIZE(dev->res);
279 dev->pdev.dev.release = softingcs_pdev_release;
280
281 pdev = &dev->pdev;
282 pdev->dev.platform_data = (void *)pdat;
283 pdev->dev.parent = &pcmcia->dev;
284 pcmcia->priv = pdev;
285
286 /* platform device resources */
287 pdev->resource[0].flags = IORESOURCE_MEM;
288 pdev->resource[0].start = pres->start;
289 pdev->resource[0].end = pres->end;
290
291 pdev->resource[1].flags = IORESOURCE_IRQ;
292 pdev->resource[1].start = pcmcia->irq;
293 pdev->resource[1].end = pdev->resource[1].start;
294
295 /* platform device setup */
296 spin_lock(&softingcs_index_lock);
297 pdev->id = softingcs_index++;
298 spin_unlock(&softingcs_index_lock);
299 pdev->name = "softing";
300 dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
301 ret = platform_device_register(pdev);
302 if (ret < 0)
303 goto platform_failed;
304
305 dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
306 return 0;
307
308platform_failed:
309 kfree(dev);
310mem_failed:
311pcmcia_bad:
312pcmcia_failed:
313 pcmcia_disable_device(pcmcia);
314 pcmcia->priv = NULL;
315 return ret ?: -ENODEV;
316}
317
318static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
319 /* softing */
320 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
321 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
322 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
323 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
324 /* vector, manufacturer? */
325 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
326 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
327 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
328 /* EDIC */
329 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
330 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
331 PCMCIA_DEVICE_NULL,
332};
333
334MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
335
336static struct pcmcia_driver softingcs_driver = {
337 .owner = THIS_MODULE,
338 .name = "softingcs",
339 .id_table = softingcs_ids,
340 .probe = softingcs_probe,
341 .remove = __devexit_p(softingcs_remove),
342};
343
344static int __init softingcs_start(void)
345{
346 spin_lock_init(&softingcs_index_lock);
347 return pcmcia_register_driver(&softingcs_driver);
348}
349
350static void __exit softingcs_stop(void)
351{
352 pcmcia_unregister_driver(&softingcs_driver);
353}
354
355module_init(softingcs_start);
356module_exit(softingcs_stop);
357
358MODULE_DESCRIPTION("softing CANcard driver"
359 ", links PCMCIA card to softing driver");
360MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 000000000000..b520784fb197
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/firmware.h>
21#include <linux/sched.h>
22#include <asm/div64.h>
23
24#include "softing.h"
25
26/*
27 * low level DPRAM command.
28 * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
29 */
30static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
31 const char *msg)
32{
33 int ret;
34 unsigned long stamp;
35
36 iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
37 iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
38 iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
39 /* be sure to flush this to the card */
40 wmb();
41 stamp = jiffies + 1 * HZ;
42 /* wait for card */
43 do {
44 /* DPRAM_FCT_HOST is _not_ aligned */
45 ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
46 (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
47 /* don't have any cached variables */
48 rmb();
49 if (ret == RES_OK)
50 /* read return-value now */
51 return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
52
53 if ((ret != vector) || time_after(jiffies, stamp))
54 break;
55 /* process context => relax */
56 usleep_range(500, 10000);
57 } while (1);
58
59 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
60 dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
61 return ret;
62}
63
64static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
65{
66 int ret;
67
68 ret = _softing_fct_cmd(card, cmd, 0, msg);
69 if (ret > 0) {
70 dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
71 ret = -EIO;
72 }
73 return ret;
74}
75
76int softing_bootloader_command(struct softing *card, int16_t cmd,
77 const char *msg)
78{
79 int ret;
80 unsigned long stamp;
81
82 iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
83 iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
84 /* be sure to flush this to the card */
85 wmb();
86 stamp = jiffies + 3 * HZ;
87 /* wait for card */
88 do {
89 ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
90 /* don't have any cached variables */
91 rmb();
92 if (ret == RES_OK)
93 return 0;
94 if (time_after(jiffies, stamp))
95 break;
96 /* process context => relax */
97 usleep_range(500, 10000);
98 } while (!signal_pending(current));
99
100 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
101 dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
102 return ret;
103}
104
105static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
106 uint16_t *plen, const uint8_t **pdat)
107{
108 uint16_t checksum[2];
109 const uint8_t *mem;
110 const uint8_t *end;
111
112 /*
113 * firmware records are a binary, unaligned stream composed of:
114 * uint16_t type;
115 * uint32_t addr;
116 * uint16_t len;
117 * uint8_t dat[len];
118 * uint16_t checksum;
119 * all values in little endian.
120 * We could define a struct for this, with __attribute__((packed)),
121 * but would that solve the alignment in _all_ cases (cfr. the
122 * struct itself may be an odd address)?
123 *
124 * I chose to use leXX_to_cpup() since this solves both
125 * endianness & alignment.
126 */
127 mem = *pmem;
128 *ptype = le16_to_cpup((void *)&mem[0]);
129 *paddr = le32_to_cpup((void *)&mem[2]);
130 *plen = le16_to_cpup((void *)&mem[6]);
131 *pdat = &mem[8];
132 /* verify checksum */
133 end = &mem[8 + *plen];
134 checksum[0] = le16_to_cpup((void *)end);
135 for (checksum[1] = 0; mem < end; ++mem)
136 checksum[1] += *mem;
137 if (checksum[0] != checksum[1])
138 return -EINVAL;
139 /* increment */
140 *pmem += 10 + *plen;
141 return 0;
142}
143
144int softing_load_fw(const char *file, struct softing *card,
145 __iomem uint8_t *dpram, unsigned int size, int offset)
146{
147 const struct firmware *fw;
148 int ret;
149 const uint8_t *mem, *end, *dat;
150 uint16_t type, len;
151 uint32_t addr;
152 uint8_t *buf = NULL;
153 int buflen = 0;
154 int8_t type_end = 0;
155
156 ret = request_firmware(&fw, file, &card->pdev->dev);
157 if (ret < 0)
158 return ret;
159 dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
160 ", offset %c0x%04x\n",
161 card->pdat->name, file, (unsigned int)fw->size,
162 (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
163 /* parse the firmware */
164 mem = fw->data;
165 end = &mem[fw->size];
166 /* look for header record */
167 ret = fw_parse(&mem, &type, &addr, &len, &dat);
168 if (ret < 0)
169 goto failed;
170 if (type != 0xffff)
171 goto failed;
172 if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
173 ret = -EINVAL;
174 goto failed;
175 }
176 /* ok, we had a header */
177 while (mem < end) {
178 ret = fw_parse(&mem, &type, &addr, &len, &dat);
179 if (ret < 0)
180 goto failed;
181 if (type == 3) {
182 /* start address, not used here */
183 continue;
184 } else if (type == 1) {
185 /* eof */
186 type_end = 1;
187 break;
188 } else if (type != 0) {
189 ret = -EINVAL;
190 goto failed;
191 }
192
193 if ((addr + len + offset) > size)
194 goto failed;
195 memcpy_toio(&dpram[addr + offset], dat, len);
196 /* be sure to flush caches from IO space */
197 mb();
198 if (len > buflen) {
199 /* align buflen */
200 buflen = (len + (1024-1)) & ~(1024-1);
201 buf = krealloc(buf, buflen, GFP_KERNEL);
202 if (!buf) {
203 ret = -ENOMEM;
204 goto failed;
205 }
206 }
207 /* verify record data */
208 memcpy_fromio(buf, &dpram[addr + offset], len);
209 if (memcmp(buf, dat, len)) {
210 /* is not ok */
211 dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
212 ret = -EIO;
213 goto failed;
214 }
215 }
216 if (!type_end)
217 /* no end record seen */
218 goto failed;
219 ret = 0;
220failed:
221 kfree(buf);
222 release_firmware(fw);
223 if (ret < 0)
224 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
225 return ret;
226}
227
228int softing_load_app_fw(const char *file, struct softing *card)
229{
230 const struct firmware *fw;
231 const uint8_t *mem, *end, *dat;
232 int ret, j;
233 uint16_t type, len;
234 uint32_t addr, start_addr = 0;
235 unsigned int sum, rx_sum;
236 int8_t type_end = 0, type_entrypoint = 0;
237
238 ret = request_firmware(&fw, file, &card->pdev->dev);
239 if (ret) {
240 dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
241 file, ret);
242 return ret;
243 }
244 dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
245 file, (unsigned long)fw->size);
246 /* parse the firmware */
247 mem = fw->data;
248 end = &mem[fw->size];
249 /* look for header record */
250 ret = fw_parse(&mem, &type, &addr, &len, &dat);
251 if (ret)
252 goto failed;
253 ret = -EINVAL;
254 if (type != 0xffff) {
255 dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
256 type);
257 goto failed;
258 }
259 if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
260 dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
261 len, dat);
262 goto failed;
263 }
264 /* ok, we had a header */
265 while (mem < end) {
266 ret = fw_parse(&mem, &type, &addr, &len, &dat);
267 if (ret)
268 goto failed;
269
270 if (type == 3) {
271 /* start address */
272 start_addr = addr;
273 type_entrypoint = 1;
274 continue;
275 } else if (type == 1) {
276 /* eof */
277 type_end = 1;
278 break;
279 } else if (type != 0) {
280 dev_alert(&card->pdev->dev,
281 "unknown record type 0x%04x\n", type);
282 ret = -EINVAL;
283 goto failed;
284 }
285
286 /* regualar data */
287 for (sum = 0, j = 0; j < len; ++j)
288 sum += dat[j];
289 /* work in 16bit (target) */
290 sum &= 0xffff;
291
292 memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
293 iowrite32(card->pdat->app.offs + card->pdat->app.addr,
294 &card->dpram[DPRAM_COMMAND + 2]);
295 iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
296 iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
297 iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
298 ret = softing_bootloader_command(card, 1, "loading app.");
299 if (ret < 0)
300 goto failed;
301 /* verify checksum */
302 rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
303 if (rx_sum != sum) {
304 dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
305 ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
306 ret = -EIO;
307 goto failed;
308 }
309 }
310 if (!type_end || !type_entrypoint)
311 goto failed;
312 /* start application in card */
313 iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
314 iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
315 ret = softing_bootloader_command(card, 3, "start app.");
316 if (ret < 0)
317 goto failed;
318 ret = 0;
319failed:
320 release_firmware(fw);
321 if (ret < 0)
322 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
323 return ret;
324}
325
326static int softing_reset_chip(struct softing *card)
327{
328 int ret;
329
330 do {
331 /* reset chip */
332 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
333 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
334 iowrite8(1, &card->dpram[DPRAM_RESET]);
335 iowrite8(0, &card->dpram[DPRAM_RESET+1]);
336
337 ret = softing_fct_cmd(card, 0, "reset_can");
338 if (!ret)
339 break;
340 if (signal_pending(current))
341 /* don't wait any longer */
342 break;
343 } while (1);
344 card->tx.pending = 0;
345 return ret;
346}
347
348int softing_chip_poweron(struct softing *card)
349{
350 int ret;
351 /* sync */
352 ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
353 if (ret < 0)
354 goto failed;
355
356 ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
357 if (ret < 0)
358 goto failed;
359
360 ret = softing_reset_chip(card);
361 if (ret < 0)
362 goto failed;
363 /* get_serial */
364 ret = softing_fct_cmd(card, 43, "get_serial_number");
365 if (ret < 0)
366 goto failed;
367 card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
368 /* get_version */
369 ret = softing_fct_cmd(card, 12, "get_version");
370 if (ret < 0)
371 goto failed;
372 card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
373 card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
374 card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
375 card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
376 card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
377 return 0;
378failed:
379 return ret;
380}
381
382static void softing_initialize_timestamp(struct softing *card)
383{
384 uint64_t ovf;
385
386 card->ts_ref = ktime_get();
387
388 /* 16MHz is the reference */
389 ovf = 0x100000000ULL * 16;
390 do_div(ovf, card->pdat->freq ?: 16);
391
392 card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
393}
394
395ktime_t softing_raw2ktime(struct softing *card, u32 raw)
396{
397 uint64_t rawl;
398 ktime_t now, real_offset;
399 ktime_t target;
400 ktime_t tmp;
401
402 now = ktime_get();
403 real_offset = ktime_sub(ktime_get_real(), now);
404
405 /* find nsec from card */
406 rawl = raw * 16;
407 do_div(rawl, card->pdat->freq ?: 16);
408 target = ktime_add_us(card->ts_ref, rawl);
409 /* test for overflows */
410 tmp = ktime_add(target, card->ts_overflow);
411 while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
412 card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
413 target = tmp;
414 tmp = ktime_add(target, card->ts_overflow);
415 }
416 return ktime_add(target, real_offset);
417}
418
419static inline int softing_error_reporting(struct net_device *netdev)
420{
421 struct softing_priv *priv = netdev_priv(netdev);
422
423 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
424 ? 1 : 0;
425}
426
427int softing_startstop(struct net_device *dev, int up)
428{
429 int ret;
430 struct softing *card;
431 struct softing_priv *priv;
432 struct net_device *netdev;
433 int bus_bitmask_start;
434 int j, error_reporting;
435 struct can_frame msg;
436 const struct can_bittiming *bt;
437
438 priv = netdev_priv(dev);
439 card = priv->card;
440
441 if (!card->fw.up)
442 return -EIO;
443
444 ret = mutex_lock_interruptible(&card->fw.lock);
445 if (ret)
446 return ret;
447
448 bus_bitmask_start = 0;
449 if (dev && up)
450 /* prepare to start this bus as well */
451 bus_bitmask_start |= (1 << priv->index);
452 /* bring netdevs down */
453 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
454 netdev = card->net[j];
455 if (!netdev)
456 continue;
457 priv = netdev_priv(netdev);
458
459 if (dev != netdev)
460 netif_stop_queue(netdev);
461
462 if (netif_running(netdev)) {
463 if (dev != netdev)
464 bus_bitmask_start |= (1 << j);
465 priv->tx.pending = 0;
466 priv->tx.echo_put = 0;
467 priv->tx.echo_get = 0;
468 /*
469 * this bus' may just have called open_candev()
470 * which is rather stupid to call close_candev()
471 * already
472 * but we may come here from busoff recovery too
473 * in which case the echo_skb _needs_ flushing too.
474 * just be sure to call open_candev() again
475 */
476 close_candev(netdev);
477 }
478 priv->can.state = CAN_STATE_STOPPED;
479 }
480 card->tx.pending = 0;
481
482 softing_enable_irq(card, 0);
483 ret = softing_reset_chip(card);
484 if (ret)
485 goto failed;
486 if (!bus_bitmask_start)
487 /* no busses to be brought up */
488 goto card_done;
489
490 if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
491 && (softing_error_reporting(card->net[0])
492 != softing_error_reporting(card->net[1]))) {
493 dev_alert(&card->pdev->dev,
494 "err_reporting flag differs for busses\n");
495 goto invalid;
496 }
497 error_reporting = 0;
498 if (bus_bitmask_start & 1) {
499 netdev = card->net[0];
500 priv = netdev_priv(netdev);
501 error_reporting += softing_error_reporting(netdev);
502 /* init chip 1 */
503 bt = &priv->can.bittiming;
504 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
505 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
506 iowrite16(bt->phase_seg1 + bt->prop_seg,
507 &card->dpram[DPRAM_FCT_PARAM + 6]);
508 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
509 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
510 &card->dpram[DPRAM_FCT_PARAM + 10]);
511 ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
512 if (ret < 0)
513 goto failed;
514 /* set mode */
515 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
516 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
517 ret = softing_fct_cmd(card, 3, "set_mode[0]");
518 if (ret < 0)
519 goto failed;
520 /* set filter */
521 /* 11bit id & mask */
522 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
523 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
524 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
525 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
526 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
527 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
528 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
529 ret = softing_fct_cmd(card, 7, "set_filter[0]");
530 if (ret < 0)
531 goto failed;
532 /* set output control */
533 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
534 ret = softing_fct_cmd(card, 5, "set_output[0]");
535 if (ret < 0)
536 goto failed;
537 }
538 if (bus_bitmask_start & 2) {
539 netdev = card->net[1];
540 priv = netdev_priv(netdev);
541 error_reporting += softing_error_reporting(netdev);
542 /* init chip2 */
543 bt = &priv->can.bittiming;
544 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
545 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
546 iowrite16(bt->phase_seg1 + bt->prop_seg,
547 &card->dpram[DPRAM_FCT_PARAM + 6]);
548 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
549 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
550 &card->dpram[DPRAM_FCT_PARAM + 10]);
551 ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
552 if (ret < 0)
553 goto failed;
554 /* set mode2 */
555 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
556 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
557 ret = softing_fct_cmd(card, 4, "set_mode[1]");
558 if (ret < 0)
559 goto failed;
560 /* set filter2 */
561 /* 11bit id & mask */
562 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
563 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
564 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
565 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
566 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
567 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
568 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
569 ret = softing_fct_cmd(card, 8, "set_filter[1]");
570 if (ret < 0)
571 goto failed;
572 /* set output control2 */
573 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
574 ret = softing_fct_cmd(card, 6, "set_output[1]");
575 if (ret < 0)
576 goto failed;
577 }
578 /* enable_error_frame */
579 /*
580 * Error reporting is switched off at the moment since
581 * the receiving of them is not yet 100% verified
582 * This should be enabled sooner or later
583 *
584 if (error_reporting) {
585 ret = softing_fct_cmd(card, 51, "enable_error_frame");
586 if (ret < 0)
587 goto failed;
588 }
589 */
590 /* initialize interface */
591 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
592 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
593 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
594 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
595 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
596 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
597 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
598 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
599 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
600 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
601 ret = softing_fct_cmd(card, 17, "initialize_interface");
602 if (ret < 0)
603 goto failed;
604 /* enable_fifo */
605 ret = softing_fct_cmd(card, 36, "enable_fifo");
606 if (ret < 0)
607 goto failed;
608 /* enable fifo tx ack */
609 ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
610 if (ret < 0)
611 goto failed;
612 /* enable fifo tx ack2 */
613 ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
614 if (ret < 0)
615 goto failed;
616 /* start_chip */
617 ret = softing_fct_cmd(card, 11, "start_chip");
618 if (ret < 0)
619 goto failed;
620 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
621 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
622 if (card->pdat->generation < 2) {
623 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
624 /* flush the DPRAM caches */
625 wmb();
626 }
627
628 softing_initialize_timestamp(card);
629
630 /*
631 * do socketcan notifications/status changes
632 * from here, no errors should occur, or the failed: part
633 * must be reviewed
634 */
635 memset(&msg, 0, sizeof(msg));
636 msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
637 msg.can_dlc = CAN_ERR_DLC;
638 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
639 if (!(bus_bitmask_start & (1 << j)))
640 continue;
641 netdev = card->net[j];
642 if (!netdev)
643 continue;
644 priv = netdev_priv(netdev);
645 priv->can.state = CAN_STATE_ERROR_ACTIVE;
646 open_candev(netdev);
647 if (dev != netdev) {
648 /* notify other busses on the restart */
649 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
650 ++priv->can.can_stats.restarts;
651 }
652 netif_wake_queue(netdev);
653 }
654
655 /* enable interrupts */
656 ret = softing_enable_irq(card, 1);
657 if (ret)
658 goto failed;
659card_done:
660 mutex_unlock(&card->fw.lock);
661 return 0;
662invalid:
663 ret = -EINVAL;
664failed:
665 softing_enable_irq(card, 0);
666 softing_reset_chip(card);
667 mutex_unlock(&card->fw.lock);
668 /* bring all other interfaces down */
669 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
670 netdev = card->net[j];
671 if (!netdev)
672 continue;
673 dev_close(netdev);
674 }
675 return ret;
676}
677
678int softing_default_output(struct net_device *netdev)
679{
680 struct softing_priv *priv = netdev_priv(netdev);
681 struct softing *card = priv->card;
682
683 switch (priv->chip) {
684 case 1000:
685 return (card->pdat->generation < 2) ? 0xfb : 0xfa;
686 case 5:
687 return 0x60;
688 default:
689 return 0x40;
690 }
691}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 000000000000..5157e15e96eb
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,893 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/version.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24
25#include "softing.h"
26
27#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
28
29/*
30 * test is a specific CAN netdev
31 * is online (ie. up 'n running, not sleeping, not busoff
32 */
33static inline int canif_is_active(struct net_device *netdev)
34{
35 struct can_priv *can = netdev_priv(netdev);
36
37 if (!netif_running(netdev))
38 return 0;
39 return (can->state <= CAN_STATE_ERROR_PASSIVE);
40}
41
42/* reset DPRAM */
43static inline void softing_set_reset_dpram(struct softing *card)
44{
45 if (card->pdat->generation >= 2) {
46 spin_lock_bh(&card->spin);
47 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
48 &card->dpram[DPRAM_V2_RESET]);
49 spin_unlock_bh(&card->spin);
50 }
51}
52
53static inline void softing_clr_reset_dpram(struct softing *card)
54{
55 if (card->pdat->generation >= 2) {
56 spin_lock_bh(&card->spin);
57 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
58 &card->dpram[DPRAM_V2_RESET]);
59 spin_unlock_bh(&card->spin);
60 }
61}
62
63/* trigger the tx queue-ing */
64static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
65 struct net_device *dev)
66{
67 struct softing_priv *priv = netdev_priv(dev);
68 struct softing *card = priv->card;
69 int ret;
70 uint8_t *ptr;
71 uint8_t fifo_wr, fifo_rd;
72 struct can_frame *cf = (struct can_frame *)skb->data;
73 uint8_t buf[DPRAM_TX_SIZE];
74
75 if (can_dropped_invalid_skb(dev, skb))
76 return NETDEV_TX_OK;
77
78 spin_lock(&card->spin);
79
80 ret = NETDEV_TX_BUSY;
81 if (!card->fw.up ||
82 (card->tx.pending >= TXMAX) ||
83 (priv->tx.pending >= TX_ECHO_SKB_MAX))
84 goto xmit_done;
85 fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
86 fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
87 if (fifo_wr == fifo_rd)
88 /* fifo full */
89 goto xmit_done;
90 memset(buf, 0, sizeof(buf));
91 ptr = buf;
92 *ptr = CMD_TX;
93 if (cf->can_id & CAN_RTR_FLAG)
94 *ptr |= CMD_RTR;
95 if (cf->can_id & CAN_EFF_FLAG)
96 *ptr |= CMD_XTD;
97 if (priv->index)
98 *ptr |= CMD_BUS2;
99 ++ptr;
100 *ptr++ = cf->can_dlc;
101 *ptr++ = (cf->can_id >> 0);
102 *ptr++ = (cf->can_id >> 8);
103 if (cf->can_id & CAN_EFF_FLAG) {
104 *ptr++ = (cf->can_id >> 16);
105 *ptr++ = (cf->can_id >> 24);
106 } else {
107 /* increment 1, not 2 as you might think */
108 ptr += 1;
109 }
110 if (!(cf->can_id & CAN_RTR_FLAG))
111 memcpy(ptr, &cf->data[0], cf->can_dlc);
112 memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
113 buf, DPRAM_TX_SIZE);
114 if (++fifo_wr >= DPRAM_TX_CNT)
115 fifo_wr = 0;
116 iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
117 card->tx.last_bus = priv->index;
118 ++card->tx.pending;
119 ++priv->tx.pending;
120 can_put_echo_skb(skb, dev, priv->tx.echo_put);
121 ++priv->tx.echo_put;
122 if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
123 priv->tx.echo_put = 0;
124 /* can_put_echo_skb() saves the skb, safe to return TX_OK */
125 ret = NETDEV_TX_OK;
126xmit_done:
127 spin_unlock(&card->spin);
128 if (card->tx.pending >= TXMAX) {
129 int j;
130 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
131 if (card->net[j])
132 netif_stop_queue(card->net[j]);
133 }
134 }
135 if (ret != NETDEV_TX_OK)
136 netif_stop_queue(dev);
137
138 return ret;
139}
140
141/*
142 * shortcut for skb delivery
143 */
144int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
145 ktime_t ktime)
146{
147 struct sk_buff *skb;
148 struct can_frame *cf;
149
150 skb = alloc_can_skb(netdev, &cf);
151 if (!skb)
152 return -ENOMEM;
153 memcpy(cf, msg, sizeof(*msg));
154 skb->tstamp = ktime;
155 return netif_rx(skb);
156}
157
158/*
159 * softing_handle_1
160 * pop 1 entry from the DPRAM queue, and process
161 */
162static int softing_handle_1(struct softing *card)
163{
164 struct net_device *netdev;
165 struct softing_priv *priv;
166 ktime_t ktime;
167 struct can_frame msg;
168 int cnt = 0, lost_msg;
169 uint8_t fifo_rd, fifo_wr, cmd;
170 uint8_t *ptr;
171 uint32_t tmp_u32;
172 uint8_t buf[DPRAM_RX_SIZE];
173
174 memset(&msg, 0, sizeof(msg));
175 /* test for lost msgs */
176 lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
177 if (lost_msg) {
178 int j;
179 /* reset condition */
180 iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
181 /* prepare msg */
182 msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
183 msg.can_dlc = CAN_ERR_DLC;
184 msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
185 /*
186 * service to all busses, we don't know which it was applicable
187 * but only service busses that are online
188 */
189 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
190 netdev = card->net[j];
191 if (!netdev)
192 continue;
193 if (!canif_is_active(netdev))
194 /* a dead bus has no overflows */
195 continue;
196 ++netdev->stats.rx_over_errors;
197 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
198 }
199 /* prepare for other use */
200 memset(&msg, 0, sizeof(msg));
201 ++cnt;
202 }
203
204 fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
205 fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
206
207 if (++fifo_rd >= DPRAM_RX_CNT)
208 fifo_rd = 0;
209 if (fifo_wr == fifo_rd)
210 return cnt;
211
212 memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
213 DPRAM_RX_SIZE);
214 mb();
215 /* trigger dual port RAM */
216 iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
217
218 ptr = buf;
219 cmd = *ptr++;
220 if (cmd == 0xff)
221 /* not quite usefull, probably the card has got out */
222 return 0;
223 netdev = card->net[0];
224 if (cmd & CMD_BUS2)
225 netdev = card->net[1];
226 priv = netdev_priv(netdev);
227
228 if (cmd & CMD_ERR) {
229 uint8_t can_state, state;
230
231 state = *ptr++;
232
233 msg.can_id = CAN_ERR_FLAG;
234 msg.can_dlc = CAN_ERR_DLC;
235
236 if (state & SF_MASK_BUSOFF) {
237 can_state = CAN_STATE_BUS_OFF;
238 msg.can_id |= CAN_ERR_BUSOFF;
239 state = STATE_BUSOFF;
240 } else if (state & SF_MASK_EPASSIVE) {
241 can_state = CAN_STATE_ERROR_PASSIVE;
242 msg.can_id |= CAN_ERR_CRTL;
243 msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
244 state = STATE_EPASSIVE;
245 } else {
246 can_state = CAN_STATE_ERROR_ACTIVE;
247 msg.can_id |= CAN_ERR_CRTL;
248 state = STATE_EACTIVE;
249 }
250 /* update DPRAM */
251 iowrite8(state, &card->dpram[priv->index ?
252 DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
253 /* timestamp */
254 tmp_u32 = le32_to_cpup((void *)ptr);
255 ptr += 4;
256 ktime = softing_raw2ktime(card, tmp_u32);
257
258 ++netdev->stats.rx_errors;
259 /* update internal status */
260 if (can_state != priv->can.state) {
261 priv->can.state = can_state;
262 if (can_state == CAN_STATE_ERROR_PASSIVE)
263 ++priv->can.can_stats.error_passive;
264 else if (can_state == CAN_STATE_BUS_OFF) {
265 /* this calls can_close_cleanup() */
266 can_bus_off(netdev);
267 netif_stop_queue(netdev);
268 }
269 /* trigger socketcan */
270 softing_netdev_rx(netdev, &msg, ktime);
271 }
272
273 } else {
274 if (cmd & CMD_RTR)
275 msg.can_id |= CAN_RTR_FLAG;
276 msg.can_dlc = get_can_dlc(*ptr++);
277 if (cmd & CMD_XTD) {
278 msg.can_id |= CAN_EFF_FLAG;
279 msg.can_id |= le32_to_cpup((void *)ptr);
280 ptr += 4;
281 } else {
282 msg.can_id |= le16_to_cpup((void *)ptr);
283 ptr += 2;
284 }
285 /* timestamp */
286 tmp_u32 = le32_to_cpup((void *)ptr);
287 ptr += 4;
288 ktime = softing_raw2ktime(card, tmp_u32);
289 if (!(msg.can_id & CAN_RTR_FLAG))
290 memcpy(&msg.data[0], ptr, 8);
291 ptr += 8;
292 /* update socket */
293 if (cmd & CMD_ACK) {
294 /* acknowledge, was tx msg */
295 struct sk_buff *skb;
296 skb = priv->can.echo_skb[priv->tx.echo_get];
297 if (skb)
298 skb->tstamp = ktime;
299 can_get_echo_skb(netdev, priv->tx.echo_get);
300 ++priv->tx.echo_get;
301 if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
302 priv->tx.echo_get = 0;
303 if (priv->tx.pending)
304 --priv->tx.pending;
305 if (card->tx.pending)
306 --card->tx.pending;
307 ++netdev->stats.tx_packets;
308 if (!(msg.can_id & CAN_RTR_FLAG))
309 netdev->stats.tx_bytes += msg.can_dlc;
310 } else {
311 int ret;
312
313 ret = softing_netdev_rx(netdev, &msg, ktime);
314 if (ret == NET_RX_SUCCESS) {
315 ++netdev->stats.rx_packets;
316 if (!(msg.can_id & CAN_RTR_FLAG))
317 netdev->stats.rx_bytes += msg.can_dlc;
318 } else {
319 ++netdev->stats.rx_dropped;
320 }
321 }
322 }
323 ++cnt;
324 return cnt;
325}
326
327/*
328 * real interrupt handler
329 */
330static irqreturn_t softing_irq_thread(int irq, void *dev_id)
331{
332 struct softing *card = (struct softing *)dev_id;
333 struct net_device *netdev;
334 struct softing_priv *priv;
335 int j, offset, work_done;
336
337 work_done = 0;
338 spin_lock_bh(&card->spin);
339 while (softing_handle_1(card) > 0) {
340 ++card->irq.svc_count;
341 ++work_done;
342 }
343 spin_unlock_bh(&card->spin);
344 /* resume tx queue's */
345 offset = card->tx.last_bus;
346 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
347 if (card->tx.pending >= TXMAX)
348 break;
349 netdev = card->net[(j + offset + 1) % card->pdat->nbus];
350 if (!netdev)
351 continue;
352 priv = netdev_priv(netdev);
353 if (!canif_is_active(netdev))
354 /* it makes no sense to wake dead busses */
355 continue;
356 if (priv->tx.pending >= TX_ECHO_SKB_MAX)
357 continue;
358 ++work_done;
359 netif_wake_queue(netdev);
360 }
361 return work_done ? IRQ_HANDLED : IRQ_NONE;
362}
363
364/*
365 * interrupt routines:
366 * schedule the 'real interrupt handler'
367 */
368static irqreturn_t softing_irq_v2(int irq, void *dev_id)
369{
370 struct softing *card = (struct softing *)dev_id;
371 uint8_t ir;
372
373 ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
374 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
375 return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
376}
377
378static irqreturn_t softing_irq_v1(int irq, void *dev_id)
379{
380 struct softing *card = (struct softing *)dev_id;
381 uint8_t ir;
382
383 ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
384 iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
385 return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
386}
387
388/*
389 * netdev/candev inter-operability
390 */
391static int softing_netdev_open(struct net_device *ndev)
392{
393 int ret;
394
395 /* check or determine and set bittime */
396 ret = open_candev(ndev);
397 if (!ret)
398 ret = softing_startstop(ndev, 1);
399 return ret;
400}
401
402static int softing_netdev_stop(struct net_device *ndev)
403{
404 int ret;
405
406 netif_stop_queue(ndev);
407
408 /* softing cycle does close_candev() */
409 ret = softing_startstop(ndev, 0);
410 return ret;
411}
412
413static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
414{
415 int ret;
416
417 switch (mode) {
418 case CAN_MODE_START:
419 /* softing_startstop does close_candev() */
420 ret = softing_startstop(ndev, 1);
421 return ret;
422 case CAN_MODE_STOP:
423 case CAN_MODE_SLEEP:
424 return -EOPNOTSUPP;
425 }
426 return 0;
427}
428
429/*
430 * Softing device management helpers
431 */
432int softing_enable_irq(struct softing *card, int enable)
433{
434 int ret;
435
436 if (!card->irq.nr) {
437 return 0;
438 } else if (card->irq.requested && !enable) {
439 free_irq(card->irq.nr, card);
440 card->irq.requested = 0;
441 } else if (!card->irq.requested && enable) {
442 ret = request_threaded_irq(card->irq.nr,
443 (card->pdat->generation >= 2) ?
444 softing_irq_v2 : softing_irq_v1,
445 softing_irq_thread, IRQF_SHARED,
446 dev_name(&card->pdev->dev), card);
447 if (ret) {
448 dev_alert(&card->pdev->dev,
449 "request_threaded_irq(%u) failed\n",
450 card->irq.nr);
451 return ret;
452 }
453 card->irq.requested = 1;
454 }
455 return 0;
456}
457
458static void softing_card_shutdown(struct softing *card)
459{
460 int fw_up = 0;
461
462 if (mutex_lock_interruptible(&card->fw.lock))
463 /* return -ERESTARTSYS */;
464 fw_up = card->fw.up;
465 card->fw.up = 0;
466
467 if (card->irq.requested && card->irq.nr) {
468 free_irq(card->irq.nr, card);
469 card->irq.requested = 0;
470 }
471 if (fw_up) {
472 if (card->pdat->enable_irq)
473 card->pdat->enable_irq(card->pdev, 0);
474 softing_set_reset_dpram(card);
475 if (card->pdat->reset)
476 card->pdat->reset(card->pdev, 1);
477 }
478 mutex_unlock(&card->fw.lock);
479}
480
481static __devinit int softing_card_boot(struct softing *card)
482{
483 int ret, j;
484 static const uint8_t stream[] = {
485 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
486 unsigned char back[sizeof(stream)];
487
488 if (mutex_lock_interruptible(&card->fw.lock))
489 return -ERESTARTSYS;
490 if (card->fw.up) {
491 mutex_unlock(&card->fw.lock);
492 return 0;
493 }
494 /* reset board */
495 if (card->pdat->enable_irq)
496 card->pdat->enable_irq(card->pdev, 1);
497 /* boot card */
498 softing_set_reset_dpram(card);
499 if (card->pdat->reset)
500 card->pdat->reset(card->pdev, 1);
501 for (j = 0; (j + sizeof(stream)) < card->dpram_size;
502 j += sizeof(stream)) {
503
504 memcpy_toio(&card->dpram[j], stream, sizeof(stream));
505 /* flush IO cache */
506 mb();
507 memcpy_fromio(back, &card->dpram[j], sizeof(stream));
508
509 if (!memcmp(back, stream, sizeof(stream)))
510 continue;
511 /* memory is not equal */
512 dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
513 ret = -EIO;
514 goto failed;
515 }
516 wmb();
517 /* load boot firmware */
518 ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
519 card->dpram_size,
520 card->pdat->boot.offs - card->pdat->boot.addr);
521 if (ret < 0)
522 goto failed;
523 /* load loader firmware */
524 ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
525 card->dpram_size,
526 card->pdat->load.offs - card->pdat->load.addr);
527 if (ret < 0)
528 goto failed;
529
530 if (card->pdat->reset)
531 card->pdat->reset(card->pdev, 0);
532 softing_clr_reset_dpram(card);
533 ret = softing_bootloader_command(card, 0, "card boot");
534 if (ret < 0)
535 goto failed;
536 ret = softing_load_app_fw(card->pdat->app.fw, card);
537 if (ret < 0)
538 goto failed;
539
540 ret = softing_chip_poweron(card);
541 if (ret < 0)
542 goto failed;
543
544 card->fw.up = 1;
545 mutex_unlock(&card->fw.lock);
546 return 0;
547failed:
548 card->fw.up = 0;
549 if (card->pdat->enable_irq)
550 card->pdat->enable_irq(card->pdev, 0);
551 softing_set_reset_dpram(card);
552 if (card->pdat->reset)
553 card->pdat->reset(card->pdev, 1);
554 mutex_unlock(&card->fw.lock);
555 return ret;
556}
557
558/*
559 * netdev sysfs
560 */
561static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
562 char *buf)
563{
564 struct net_device *ndev = to_net_dev(dev);
565 struct softing_priv *priv = netdev2softing(ndev);
566
567 return sprintf(buf, "%i\n", priv->index);
568}
569
570static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
571 char *buf)
572{
573 struct net_device *ndev = to_net_dev(dev);
574 struct softing_priv *priv = netdev2softing(ndev);
575
576 return sprintf(buf, "%i\n", priv->chip);
577}
578
579static ssize_t show_output(struct device *dev, struct device_attribute *attr,
580 char *buf)
581{
582 struct net_device *ndev = to_net_dev(dev);
583 struct softing_priv *priv = netdev2softing(ndev);
584
585 return sprintf(buf, "0x%02x\n", priv->output);
586}
587
588static ssize_t store_output(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 struct net_device *ndev = to_net_dev(dev);
592 struct softing_priv *priv = netdev2softing(ndev);
593 struct softing *card = priv->card;
594 unsigned long val;
595 int ret;
596
597 ret = strict_strtoul(buf, 0, &val);
598 if (ret < 0)
599 return ret;
600 val &= 0xFF;
601
602 ret = mutex_lock_interruptible(&card->fw.lock);
603 if (ret)
604 return -ERESTARTSYS;
605 if (netif_running(ndev)) {
606 mutex_unlock(&card->fw.lock);
607 return -EBUSY;
608 }
609 priv->output = val;
610 mutex_unlock(&card->fw.lock);
611 return count;
612}
613
614static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
615static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
616static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
617
618static const struct attribute *const netdev_sysfs_attrs[] = {
619 &dev_attr_channel.attr,
620 &dev_attr_chip.attr,
621 &dev_attr_output.attr,
622 NULL,
623};
624static const struct attribute_group netdev_sysfs_group = {
625 .name = NULL,
626 .attrs = (struct attribute **)netdev_sysfs_attrs,
627};
628
629static const struct net_device_ops softing_netdev_ops = {
630 .ndo_open = softing_netdev_open,
631 .ndo_stop = softing_netdev_stop,
632 .ndo_start_xmit = softing_netdev_start_xmit,
633};
634
635static const struct can_bittiming_const softing_btr_const = {
636 .tseg1_min = 1,
637 .tseg1_max = 16,
638 .tseg2_min = 1,
639 .tseg2_max = 8,
640 .sjw_max = 4, /* overruled */
641 .brp_min = 1,
642 .brp_max = 32, /* overruled */
643 .brp_inc = 1,
644};
645
646
647static __devinit struct net_device *softing_netdev_create(struct softing *card,
648 uint16_t chip_id)
649{
650 struct net_device *netdev;
651 struct softing_priv *priv;
652
653 netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
654 if (!netdev) {
655 dev_alert(&card->pdev->dev, "alloc_candev failed\n");
656 return NULL;
657 }
658 priv = netdev_priv(netdev);
659 priv->netdev = netdev;
660 priv->card = card;
661 memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
662 priv->btr_const.brp_max = card->pdat->max_brp;
663 priv->btr_const.sjw_max = card->pdat->max_sjw;
664 priv->can.bittiming_const = &priv->btr_const;
665 priv->can.clock.freq = 8000000;
666 priv->chip = chip_id;
667 priv->output = softing_default_output(netdev);
668 SET_NETDEV_DEV(netdev, &card->pdev->dev);
669
670 netdev->flags |= IFF_ECHO;
671 netdev->netdev_ops = &softing_netdev_ops;
672 priv->can.do_set_mode = softing_candev_set_mode;
673 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
674
675 return netdev;
676}
677
678static __devinit int softing_netdev_register(struct net_device *netdev)
679{
680 int ret;
681
682 netdev->sysfs_groups[0] = &netdev_sysfs_group;
683 ret = register_candev(netdev);
684 if (ret) {
685 dev_alert(&netdev->dev, "register failed\n");
686 return ret;
687 }
688 return 0;
689}
690
691static void softing_netdev_cleanup(struct net_device *netdev)
692{
693 unregister_candev(netdev);
694 free_candev(netdev);
695}
696
697/*
698 * sysfs for Platform device
699 */
700#define DEV_ATTR_RO(name, member) \
701static ssize_t show_##name(struct device *dev, \
702 struct device_attribute *attr, char *buf) \
703{ \
704 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
705 return sprintf(buf, "%u\n", card->member); \
706} \
707static DEVICE_ATTR(name, 0444, show_##name, NULL)
708
709#define DEV_ATTR_RO_STR(name, member) \
710static ssize_t show_##name(struct device *dev, \
711 struct device_attribute *attr, char *buf) \
712{ \
713 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
714 return sprintf(buf, "%s\n", card->member); \
715} \
716static DEVICE_ATTR(name, 0444, show_##name, NULL)
717
718DEV_ATTR_RO(serial, id.serial);
719DEV_ATTR_RO_STR(firmware, pdat->app.fw);
720DEV_ATTR_RO(firmware_version, id.fw_version);
721DEV_ATTR_RO_STR(hardware, pdat->name);
722DEV_ATTR_RO(hardware_version, id.hw_version);
723DEV_ATTR_RO(license, id.license);
724DEV_ATTR_RO(frequency, id.freq);
725DEV_ATTR_RO(txpending, tx.pending);
726
727static struct attribute *softing_pdev_attrs[] = {
728 &dev_attr_serial.attr,
729 &dev_attr_firmware.attr,
730 &dev_attr_firmware_version.attr,
731 &dev_attr_hardware.attr,
732 &dev_attr_hardware_version.attr,
733 &dev_attr_license.attr,
734 &dev_attr_frequency.attr,
735 &dev_attr_txpending.attr,
736 NULL,
737};
738
739static const struct attribute_group softing_pdev_group = {
740 .name = NULL,
741 .attrs = softing_pdev_attrs,
742};
743
744/*
745 * platform driver
746 */
747static __devexit int softing_pdev_remove(struct platform_device *pdev)
748{
749 struct softing *card = platform_get_drvdata(pdev);
750 int j;
751
752 /* first, disable card*/
753 softing_card_shutdown(card);
754
755 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
756 if (!card->net[j])
757 continue;
758 softing_netdev_cleanup(card->net[j]);
759 card->net[j] = NULL;
760 }
761 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
762
763 iounmap(card->dpram);
764 kfree(card);
765 return 0;
766}
767
768static __devinit int softing_pdev_probe(struct platform_device *pdev)
769{
770 const struct softing_platform_data *pdat = pdev->dev.platform_data;
771 struct softing *card;
772 struct net_device *netdev;
773 struct softing_priv *priv;
774 struct resource *pres;
775 int ret;
776 int j;
777
778 if (!pdat) {
779 dev_warn(&pdev->dev, "no platform data\n");
780 return -EINVAL;
781 }
782 if (pdat->nbus > ARRAY_SIZE(card->net)) {
783 dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
784 return -EINVAL;
785 }
786
787 card = kzalloc(sizeof(*card), GFP_KERNEL);
788 if (!card)
789 return -ENOMEM;
790 card->pdat = pdat;
791 card->pdev = pdev;
792 platform_set_drvdata(pdev, card);
793 mutex_init(&card->fw.lock);
794 spin_lock_init(&card->spin);
795
796 ret = -EINVAL;
797 pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
798 if (!pres)
799 goto platform_resource_failed;;
800 card->dpram_phys = pres->start;
801 card->dpram_size = pres->end - pres->start + 1;
802 card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
803 if (!card->dpram) {
804 dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
805 goto ioremap_failed;
806 }
807
808 pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
809 if (pres)
810 card->irq.nr = pres->start;
811
812 /* reset card */
813 ret = softing_card_boot(card);
814 if (ret < 0) {
815 dev_alert(&pdev->dev, "failed to boot\n");
816 goto boot_failed;
817 }
818
819 /* only now, the chip's are known */
820 card->id.freq = card->pdat->freq;
821
822 ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
823 if (ret < 0) {
824 dev_alert(&card->pdev->dev, "sysfs failed\n");
825 goto sysfs_failed;
826 }
827
828 ret = -ENOMEM;
829 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
830 card->net[j] = netdev =
831 softing_netdev_create(card, card->id.chip[j]);
832 if (!netdev) {
833 dev_alert(&pdev->dev, "failed to make can[%i]", j);
834 goto netdev_failed;
835 }
836 priv = netdev_priv(card->net[j]);
837 priv->index = j;
838 ret = softing_netdev_register(netdev);
839 if (ret) {
840 free_candev(netdev);
841 card->net[j] = NULL;
842 dev_alert(&card->pdev->dev,
843 "failed to register can[%i]\n", j);
844 goto netdev_failed;
845 }
846 }
847 dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
848 return 0;
849
850netdev_failed:
851 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
852 if (!card->net[j])
853 continue;
854 softing_netdev_cleanup(card->net[j]);
855 }
856 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
857sysfs_failed:
858 softing_card_shutdown(card);
859boot_failed:
860 iounmap(card->dpram);
861ioremap_failed:
862platform_resource_failed:
863 kfree(card);
864 return ret;
865}
866
867static struct platform_driver softing_driver = {
868 .driver = {
869 .name = "softing",
870 .owner = THIS_MODULE,
871 },
872 .probe = softing_pdev_probe,
873 .remove = __devexit_p(softing_pdev_remove),
874};
875
876MODULE_ALIAS("platform:softing");
877
878static int __init softing_start(void)
879{
880 return platform_driver_register(&softing_driver);
881}
882
883static void __exit softing_stop(void)
884{
885 platform_driver_unregister(&softing_driver);
886}
887
888module_init(softing_start);
889module_exit(softing_stop);
890
891MODULE_DESCRIPTION("Softing DPRAM CAN driver");
892MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
893MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 000000000000..ebbf69815623
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
1
2#include <linux/platform_device.h>
3
4#ifndef _SOFTING_DEVICE_H_
5#define _SOFTING_DEVICE_H_
6
7/* softing firmware directory prefix */
8#define fw_dir "softing-4.6/"
9
10struct softing_platform_data {
11 unsigned int manf;
12 unsigned int prod;
13 /*
14 * generation
15 * 1st with NEC or SJA1000
16 * 8bit, exclusive interrupt, ...
17 * 2nd only SJA1000
18 * 16bit, shared interrupt
19 */
20 int generation;
21 int nbus; /* # busses on device */
22 unsigned int freq; /* operating frequency in Hz */
23 unsigned int max_brp;
24 unsigned int max_sjw;
25 unsigned long dpram_size;
26 const char *name;
27 struct {
28 unsigned long offs;
29 unsigned long addr;
30 const char *fw;
31 } boot, load, app;
32 /*
33 * reset() function
34 * bring pdev in or out of reset, depending on value
35 */
36 int (*reset)(struct platform_device *pdev, int value);
37 int (*enable_irq)(struct platform_device *pdev, int value);
38};
39
40#endif
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566f..2d2d28f58e91 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
65static DEFINE_RWLOCK(cnic_dev_lock); 65static DEFINE_RWLOCK(cnic_dev_lock);
66static DEFINE_MUTEX(cnic_lock); 66static DEFINE_MUTEX(cnic_lock);
67 67
68static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 68static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
69
70/* helper function, assuming cnic_lock is held */
71static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
72{
73 return rcu_dereference_protected(cnic_ulp_tbl[type],
74 lockdep_is_held(&cnic_lock));
75}
69 76
70static int cnic_service_bnx2(void *, void *); 77static int cnic_service_bnx2(void *, void *);
71static int cnic_service_bnx2x(void *, void *); 78static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
435 return -EINVAL; 442 return -EINVAL;
436 } 443 }
437 mutex_lock(&cnic_lock); 444 mutex_lock(&cnic_lock);
438 if (cnic_ulp_tbl[ulp_type]) { 445 if (cnic_ulp_tbl_prot(ulp_type)) {
439 pr_err("%s: Type %d has already been registered\n", 446 pr_err("%s: Type %d has already been registered\n",
440 __func__, ulp_type); 447 __func__, ulp_type);
441 mutex_unlock(&cnic_lock); 448 mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
478 return -EINVAL; 485 return -EINVAL;
479 } 486 }
480 mutex_lock(&cnic_lock); 487 mutex_lock(&cnic_lock);
481 ulp_ops = cnic_ulp_tbl[ulp_type]; 488 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
482 if (!ulp_ops) { 489 if (!ulp_ops) {
483 pr_err("%s: Type %d has not been registered\n", 490 pr_err("%s: Type %d has not been registered\n",
484 __func__, ulp_type); 491 __func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
529 return -EINVAL; 536 return -EINVAL;
530 } 537 }
531 mutex_lock(&cnic_lock); 538 mutex_lock(&cnic_lock);
532 if (cnic_ulp_tbl[ulp_type] == NULL) { 539 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
533 pr_err("%s: Driver with type %d has not been registered\n", 540 pr_err("%s: Driver with type %d has not been registered\n",
534 __func__, ulp_type); 541 __func__, ulp_type);
535 mutex_unlock(&cnic_lock); 542 mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
544 551
545 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 552 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
546 cp->ulp_handle[ulp_type] = ulp_ctx; 553 cp->ulp_handle[ulp_type] = ulp_ctx;
547 ulp_ops = cnic_ulp_tbl[ulp_type]; 554 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
548 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 555 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
549 cnic_hold(dev); 556 cnic_hold(dev);
550 557
@@ -699,13 +706,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 706static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
700{ 707{
701 int i; 708 int i;
702 u32 *page_table = dma->pgtbl; 709 __le32 *page_table = (__le32 *) dma->pgtbl;
703 710
704 for (i = 0; i < dma->num_pages; i++) { 711 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in big endian format. */ 712 /* Each entry needs to be in big endian format. */
706 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 713 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
707 page_table++; 714 page_table++;
708 *page_table = (u32) dma->pg_map_arr[i]; 715 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
709 page_table++; 716 page_table++;
710 } 717 }
711} 718}
@@ -713,13 +720,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 720static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
714{ 721{
715 int i; 722 int i;
716 u32 *page_table = dma->pgtbl; 723 __le32 *page_table = (__le32 *) dma->pgtbl;
717 724
718 for (i = 0; i < dma->num_pages; i++) { 725 for (i = 0; i < dma->num_pages; i++) {
719 /* Each entry needs to be in little endian format. */ 726 /* Each entry needs to be in little endian format. */
720 *page_table = dma->pg_map_arr[i] & 0xffffffff; 727 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
721 page_table++; 728 page_table++;
722 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 729 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
723 page_table++; 730 page_table++;
724 } 731 }
725} 732}
@@ -2953,7 +2960,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
2953 struct cnic_ulp_ops *ulp_ops; 2960 struct cnic_ulp_ops *ulp_ops;
2954 2961
2955 mutex_lock(&cnic_lock); 2962 mutex_lock(&cnic_lock);
2956 ulp_ops = cp->ulp_ops[if_type]; 2963 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2964 lockdep_is_held(&cnic_lock));
2957 if (!ulp_ops) { 2965 if (!ulp_ops) {
2958 mutex_unlock(&cnic_lock); 2966 mutex_unlock(&cnic_lock);
2959 continue; 2967 continue;
@@ -2977,7 +2985,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
2977 struct cnic_ulp_ops *ulp_ops; 2985 struct cnic_ulp_ops *ulp_ops;
2978 2986
2979 mutex_lock(&cnic_lock); 2987 mutex_lock(&cnic_lock);
2980 ulp_ops = cp->ulp_ops[if_type]; 2988 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2989 lockdep_is_held(&cnic_lock));
2981 if (!ulp_ops || !ulp_ops->cnic_start) { 2990 if (!ulp_ops || !ulp_ops->cnic_start) {
2982 mutex_unlock(&cnic_lock); 2991 mutex_unlock(&cnic_lock);
2983 continue; 2992 continue;
@@ -3041,7 +3050,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
3041 struct cnic_ulp_ops *ulp_ops; 3050 struct cnic_ulp_ops *ulp_ops;
3042 3051
3043 mutex_lock(&cnic_lock); 3052 mutex_lock(&cnic_lock);
3044 ulp_ops = cnic_ulp_tbl[i]; 3053 ulp_ops = cnic_ulp_tbl_prot(i);
3045 if (!ulp_ops || !ulp_ops->cnic_init) { 3054 if (!ulp_ops || !ulp_ops->cnic_init) {
3046 mutex_unlock(&cnic_lock); 3055 mutex_unlock(&cnic_lock);
3047 continue; 3056 continue;
@@ -3065,7 +3074,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
3065 struct cnic_ulp_ops *ulp_ops; 3074 struct cnic_ulp_ops *ulp_ops;
3066 3075
3067 mutex_lock(&cnic_lock); 3076 mutex_lock(&cnic_lock);
3068 ulp_ops = cnic_ulp_tbl[i]; 3077 ulp_ops = cnic_ulp_tbl_prot(i);
3069 if (!ulp_ops || !ulp_ops->cnic_exit) { 3078 if (!ulp_ops || !ulp_ops->cnic_exit) {
3070 mutex_unlock(&cnic_lock); 3079 mutex_unlock(&cnic_lock);
3071 continue; 3080 continue;
@@ -4170,6 +4179,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4170 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4179 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4171} 4180}
4172 4181
4182static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
4183{
4184 u32 max_conn;
4185
4186 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
4187 dev->max_iscsi_conn = max_conn;
4188}
4189
4173static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4190static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4174{ 4191{
4175 struct cnic_local *cp = dev->cnic_priv; 4192 struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4511,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4494 return err; 4511 return err;
4495 } 4512 }
4496 4513
4514 cnic_get_bnx2_iscsi_info(dev);
4515
4497 return 0; 4516 return 0;
4498} 4517}
4499 4518
@@ -4705,129 +4724,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4705 cp->rx_cons = *cp->rx_cons_ptr; 4724 cp->rx_cons = *cp->rx_cons_ptr;
4706} 4725}
4707 4726
4708static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4709 u32 lower_addr)
4710{
4711 u32 val;
4712 u8 mac[6];
4713
4714 val = CNIC_RD(dev, upper_addr);
4715
4716 mac[0] = (u8) (val >> 8);
4717 mac[1] = (u8) val;
4718
4719 val = CNIC_RD(dev, lower_addr);
4720
4721 mac[2] = (u8) (val >> 24);
4722 mac[3] = (u8) (val >> 16);
4723 mac[4] = (u8) (val >> 8);
4724 mac[5] = (u8) val;
4725
4726 if (is_valid_ether_addr(mac)) {
4727 memcpy(dev->mac_addr, mac, 6);
4728 return 0;
4729 } else {
4730 return -EINVAL;
4731 }
4732}
4733
4734static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4735{
4736 struct cnic_local *cp = dev->cnic_priv;
4737 u32 base, base2, addr, addr1, val;
4738 int port = CNIC_PORT(cp);
4739
4740 dev->max_iscsi_conn = 0;
4741 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4742 if (base == 0)
4743 return;
4744
4745 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4746 MISC_REG_GENERIC_CR_0));
4747 addr = BNX2X_SHMEM_ADDR(base,
4748 dev_info.port_hw_config[port].iscsi_mac_upper);
4749
4750 addr1 = BNX2X_SHMEM_ADDR(base,
4751 dev_info.port_hw_config[port].iscsi_mac_lower);
4752
4753 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
4754
4755 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4756 val = CNIC_RD(dev, addr);
4757
4758 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4759 u16 val16;
4760
4761 addr = BNX2X_SHMEM_ADDR(base,
4762 drv_lic_key[port].max_iscsi_init_conn);
4763 val16 = CNIC_RD16(dev, addr);
4764
4765 if (val16)
4766 val16 ^= 0x1e1e;
4767 dev->max_iscsi_conn = val16;
4768 }
4769
4770 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4771 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4772
4773 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4774 int func = CNIC_FUNC(cp);
4775 u32 mf_cfg_addr;
4776
4777 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4778 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4779 mf_cfg_addr));
4780 else
4781 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4782
4783 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4784 /* Must determine if the MF is SD vs SI mode */
4785 addr = BNX2X_SHMEM_ADDR(base,
4786 dev_info.shared_feature_config.config);
4787 val = CNIC_RD(dev, addr);
4788 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4789 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4790 int rc;
4791
4792 /* MULTI_FUNCTION_SI mode */
4793 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4794 func_ext_config[func].func_cfg);
4795 val = CNIC_RD(dev, addr);
4796 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4797 dev->max_iscsi_conn = 0;
4798
4799 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4800 dev->max_fcoe_conn = 0;
4801
4802 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4803 func_ext_config[func].
4804 iscsi_mac_addr_upper);
4805 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4806 func_ext_config[func].
4807 iscsi_mac_addr_lower);
4808 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4809 addr1);
4810 if (rc && func > 1)
4811 dev->max_iscsi_conn = 0;
4812
4813 return;
4814 }
4815 }
4816
4817 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4818 func_mf_config[func].e1hov_tag);
4819
4820 val = CNIC_RD(dev, addr);
4821 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4822 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4823 dev->max_fcoe_conn = 0;
4824 dev->max_iscsi_conn = 0;
4825 }
4826 }
4827 if (!is_valid_ether_addr(dev->mac_addr))
4828 dev->max_iscsi_conn = 0;
4829}
4830
4831static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4727static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4832{ 4728{
4833 struct cnic_local *cp = dev->cnic_priv; 4729 struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4805,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4909 4805
4910 cnic_init_bnx2x_kcq(dev); 4806 cnic_init_bnx2x_kcq(dev);
4911 4807
4912 cnic_get_bnx2x_iscsi_info(dev);
4913
4914 /* Only 1 EQ */ 4808 /* Only 1 EQ */
4915 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4809 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4916 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4810 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5343,6 +5237,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5343 cdev->pcidev = pdev; 5237 cdev->pcidev = pdev;
5344 cp->chip_id = ethdev->chip_id; 5238 cp->chip_id = ethdev->chip_id;
5345 5239
5240 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5241 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5242 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5243 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5244 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5245
5246 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5247
5346 cp->cnic_ops = &cnic_bnx2x_ops; 5248 cp->cnic_ops = &cnic_bnx2x_ops;
5347 cp->start_hw = cnic_start_bnx2x_hw; 5249 cp->start_hw = cnic_start_bnx2x_hw;
5348 cp->stop_hw = cnic_stop_bnx2x_hw; 5250 cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c924c3..4456260c653c 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
220#define ULP_F_INIT 0 220#define ULP_F_INIT 0
221#define ULP_F_START 1 221#define ULP_F_START 1
222#define ULP_F_CALL_PENDING 2 222#define ULP_F_CALL_PENDING 2
223 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 223 struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
224 224
225 unsigned long cnic_local_flags; 225 unsigned long cnic_local_flags;
226#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0ffe003..e01b49ee3591 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.12" 15#define CNIC_MODULE_VERSION "2.2.13"
16#define CNIC_MODULE_RELDATE "Jan 03, 2011" 16#define CNIC_MODULE_RELDATE "Jan 31, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
159 u32 drv_state; 159 u32 drv_state;
160#define CNIC_DRV_STATE_REGD 0x00000001 160#define CNIC_DRV_STATE_REGD 0x00000001
161#define CNIC_DRV_STATE_USING_MSIX 0x00000002 161#define CNIC_DRV_STATE_USING_MSIX 0x00000002
162#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
163#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
164#define CNIC_DRV_STATE_NO_FCOE 0x00000010
162 u32 chip_id; 165 u32 chip_id;
163 u32 max_kwqe_pending; 166 u32 max_kwqe_pending;
164 struct pci_dev *pdev; 167 struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
176 u32 fcoe_init_cid; 179 u32 fcoe_init_cid;
177 u16 iscsi_l2_client_id; 180 u16 iscsi_l2_client_id;
178 u16 iscsi_l2_cid; 181 u16 iscsi_l2_cid;
182 u8 iscsi_mac[ETH_ALEN];
179 183
180 int num_irq; 184 int num_irq;
181 struct cnic_irq irq_arr[MAX_CNIC_VEC]; 185 struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa68c926..862804f32b6e 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
186 dev = NULL; 186 dev = NULL;
187 if (grp) 187 if (grp)
188 dev = vlan_group_get_device(grp, vlan); 188 dev = vlan_group_get_device(grp, vlan);
189 } else 189 } else if (netif_is_bond_slave(dev)) {
190 while (dev->master) 190 while (dev->master)
191 dev = dev->master; 191 dev = dev->master;
192 }
192 return dev; 193 return dev;
193 } 194 }
194 } 195 }
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
967 cxgb_neigh_update((struct neighbour *)ctx); 968 cxgb_neigh_update((struct neighbour *)ctx);
968 break; 969 break;
969 } 970 }
970 case (NETEVENT_PMTU_UPDATE):
971 break;
972 case (NETEVENT_REDIRECT):{ 971 case (NETEVENT_REDIRECT):{
973 struct netevent_redirect *nr = ctx; 972 struct netevent_redirect *nr = ctx;
974 cxgb_redirect(nr->old, nr->new); 973 cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1eec8c3f..5352c8a23f4d 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
2471 case NETEVENT_NEIGH_UPDATE: 2471 case NETEVENT_NEIGH_UPDATE:
2472 check_neigh_update(data); 2472 check_neigh_update(data);
2473 break; 2473 break;
2474 case NETEVENT_PMTU_UPDATE:
2475 case NETEVENT_REDIRECT: 2474 case NETEVENT_REDIRECT:
2476 default: 2475 default:
2477 break; 2476 break;
@@ -2710,6 +2709,8 @@ static int cxgb_open(struct net_device *dev)
2710 struct port_info *pi = netdev_priv(dev); 2709 struct port_info *pi = netdev_priv(dev);
2711 struct adapter *adapter = pi->adapter; 2710 struct adapter *adapter = pi->adapter;
2712 2711
2712 netif_carrier_off(dev);
2713
2713 if (!(adapter->flags & FULL_INIT_DONE)) { 2714 if (!(adapter->flags & FULL_INIT_DONE)) {
2714 err = cxgb_up(adapter); 2715 err = cxgb_up(adapter);
2715 if (err < 0) 2716 if (err < 0)
@@ -3661,7 +3662,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3661 pi->xact_addr_filt = -1; 3662 pi->xact_addr_filt = -1;
3662 pi->rx_offload = RX_CSO; 3663 pi->rx_offload = RX_CSO;
3663 pi->port_id = i; 3664 pi->port_id = i;
3664 netif_carrier_off(netdev);
3665 netdev->irq = pdev->irq; 3665 netdev->irq = pdev->irq;
3666 3666
3667 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3667 netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae2059f..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2040{ 2040{
2041 int i; 2041 int i;
2042 2042
2043 BUG_ON(adapter->debugfs_root == NULL); 2043 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2044 2044
2045 /* 2045 /*
2046 * Debugfs support is best effort. 2046 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2061 */ 2061 */
2062static void cleanup_debugfs(struct adapter *adapter) 2062static void cleanup_debugfs(struct adapter *adapter)
2063{ 2063{
2064 BUG_ON(adapter->debugfs_root == NULL); 2064 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2065 2065
2066 /* 2066 /*
2067 * Unlike our sister routine cleanup_proc(), we don't need to remove 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2489 struct net_device *netdev; 2489 struct net_device *netdev;
2490 2490
2491 /* 2491 /*
2492 * Vet our module parameters.
2493 */
2494 if (msi != MSI_MSIX && msi != MSI_MSI) {
2495 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2496 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2497 MSI_MSI);
2498 err = -EINVAL;
2499 goto err_out;
2500 }
2501
2502 /*
2503 * Print our driver banner the first time we're called to initialize a 2492 * Print our driver banner the first time we're called to initialize a
2504 * device. 2493 * device.
2505 */ 2494 */
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2711 /* 2700 /*
2712 * Set up our debugfs entries. 2701 * Set up our debugfs entries.
2713 */ 2702 */
2714 if (cxgb4vf_debugfs_root) { 2703 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2715 adapter->debugfs_root = 2704 adapter->debugfs_root =
2716 debugfs_create_dir(pci_name(pdev), 2705 debugfs_create_dir(pci_name(pdev),
2717 cxgb4vf_debugfs_root); 2706 cxgb4vf_debugfs_root);
2718 if (adapter->debugfs_root == NULL) 2707 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2719 dev_warn(&pdev->dev, "could not create debugfs" 2708 dev_warn(&pdev->dev, "could not create debugfs"
2720 " directory"); 2709 " directory");
2721 else 2710 else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2770 */ 2759 */
2771 2760
2772err_free_debugfs: 2761err_free_debugfs:
2773 if (adapter->debugfs_root) { 2762 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2774 cleanup_debugfs(adapter); 2763 cleanup_debugfs(adapter);
2775 debugfs_remove_recursive(adapter->debugfs_root); 2764 debugfs_remove_recursive(adapter->debugfs_root);
2776 } 2765 }
@@ -2802,7 +2791,6 @@ err_release_regions:
2802err_disable_device: 2791err_disable_device:
2803 pci_disable_device(pdev); 2792 pci_disable_device(pdev);
2804 2793
2805err_out:
2806 return err; 2794 return err;
2807} 2795}
2808 2796
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2840 /* 2828 /*
2841 * Tear down our debugfs entries. 2829 * Tear down our debugfs entries.
2842 */ 2830 */
2843 if (adapter->debugfs_root) { 2831 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2844 cleanup_debugfs(adapter); 2832 cleanup_debugfs(adapter);
2845 debugfs_remove_recursive(adapter->debugfs_root); 2833 debugfs_remove_recursive(adapter->debugfs_root);
2846 } 2834 }
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2874} 2862}
2875 2863
2876/* 2864/*
2865 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2866 * delivery.
2867 */
2868static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2869{
2870 struct adapter *adapter;
2871 int pidx;
2872
2873 adapter = pci_get_drvdata(pdev);
2874 if (!adapter)
2875 return;
2876
2877 /*
2878 * Disable all Virtual Interfaces. This will shut down the
2879 * delivery of all ingress packets into the chip for these
2880 * Virtual Interfaces.
2881 */
2882 for_each_port(adapter, pidx) {
2883 struct net_device *netdev;
2884 struct port_info *pi;
2885
2886 if (!test_bit(pidx, &adapter->registered_device_map))
2887 continue;
2888
2889 netdev = adapter->port[pidx];
2890 if (!netdev)
2891 continue;
2892
2893 pi = netdev_priv(netdev);
2894 t4vf_enable_vi(adapter, pi->viid, false, false);
2895 }
2896
2897 /*
2898 * Free up all Queues which will prevent further DMA and
2899 * Interrupts allowing various internal pathways to drain.
2900 */
2901 t4vf_free_sge_resources(adapter);
2902}
2903
2904/*
2877 * PCI Device registration data structures. 2905 * PCI Device registration data structures.
2878 */ 2906 */
2879#define CH_DEVICE(devid, idx) \ 2907#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
2906 .id_table = cxgb4vf_pci_tbl, 2934 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe, 2935 .probe = cxgb4vf_pci_probe,
2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2936 .remove = __devexit_p(cxgb4vf_pci_remove),
2937 .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2909}; 2938};
2910 2939
2911/* 2940/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
2915{ 2944{
2916 int ret; 2945 int ret;
2917 2946
2947 /*
2948 * Vet our module parameters.
2949 */
2950 if (msi != MSI_MSIX && msi != MSI_MSI) {
2951 printk(KERN_WARNING KBUILD_MODNAME
2952 ": bad module parameter msi=%d; must be %d"
2953 " (MSI-X or MSI) or %d (MSI)\n",
2954 msi, MSI_MSIX, MSI_MSI);
2955 return -EINVAL;
2956 }
2957
2918 /* Debugfs support is optional, just warn if this fails */ 2958 /* Debugfs support is optional, just warn if this fails */
2919 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2959 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2920 if (!cxgb4vf_debugfs_root) 2960 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2921 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2961 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2922 " debugfs entry, continuing\n"); 2962 " debugfs entry, continuing\n");
2923 2963
2924 ret = pci_register_driver(&cxgb4vf_driver); 2964 ret = pci_register_driver(&cxgb4vf_driver);
2925 if (ret < 0) 2965 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2926 debugfs_remove(cxgb4vf_debugfs_root); 2966 debugfs_remove(cxgb4vf_debugfs_root);
2927 return ret; 2967 return ret;
2928} 2968}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80475ce..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
171 delay_idx = 0; 171 delay_idx = 0;
172 ms = delay[0]; 172 ms = delay[0];
173 173
174 for (i = 0; i < 500; i += ms) { 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
175 if (sleep_ok) { 175 if (sleep_ok) {
176 ms = delay[delay_idx]; 176 ms = delay[delay_idx];
177 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68ad4fd..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
1094 } 1094 }
1095 } 1095 }
1096 /* Change buffer ownership for this last frame, back to the adapter */ 1096 /* Change buffer ownership for this last frame, back to the adapter */
1097 for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { 1097 for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); 1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
1099 } 1099 }
1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); 1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
1103 /* 1103 /*
1104 ** Update entry information 1104 ** Update entry information
1105 */ 1105 */
1106 lp->rx_new = (++lp->rx_new) & lp->rxRingMask; 1106 lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
1107 } 1107 }
1108 1108
1109 return 0; 1109 return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
1148 } 1148 }
1149 1149
1150 /* Update all the pointers */ 1150 /* Update all the pointers */
1151 lp->tx_old = (++lp->tx_old) & lp->txRingMask; 1151 lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
1152 } 1152 }
1153 1153
1154 return 0; 1154 return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
1753 1753
1754 /* Free all the skbuffs in the queue. */ 1754 /* Free all the skbuffs in the queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) { 1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].fraginfo = 0;
1758 skb = np->rx_skbuff[i]; 1756 skb = np->rx_skbuff[i];
1759 if (skb) { 1757 if (skb) {
1760 pci_unmap_single(np->pdev, 1758 pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
1763 dev_kfree_skb (skb); 1761 dev_kfree_skb (skb);
1764 np->rx_skbuff[i] = NULL; 1762 np->rx_skbuff[i] = NULL;
1765 } 1763 }
1764 np->rx_ring[i].status = 0;
1765 np->rx_ring[i].fraginfo = 0;
1766 } 1766 }
1767 for (i = 0; i < TX_RING_SIZE; i++) { 1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i]; 1768 skb = np->tx_skbuff[i];
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b1b897..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
124 case M88E1000_I_PHY_ID: 124 case M88E1000_I_PHY_ID:
125 case M88E1011_I_PHY_ID: 125 case M88E1011_I_PHY_ID:
126 case M88E1111_I_PHY_ID: 126 case M88E1111_I_PHY_ID:
127 case M88E1118_E_PHY_ID:
127 hw->phy_type = e1000_phy_m88; 128 hw->phy_type = e1000_phy_m88;
128 break; 129 break;
129 case IGP01E1000_I_PHY_ID: 130 case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3222 break; 3223 break;
3223 case e1000_ce4100: 3224 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) || 3225 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID)) 3226 (hw->phy_id == RTL8201N_PHY_ID) ||
3227 (hw->phy_id == M88E1118_E_PHY_ID))
3226 match = true; 3228 match = true;
3227 break; 3229 break;
3228 case e1000_82541: 3230 case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda2dd6c..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
2918#define M88E1011_I_REV_4 0x04 2918#define M88E1011_I_REV_4 0x04
2919#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2920#define M88E1118_E_PHY_ID 0x01410E40
2920#define L1LXT971A_PHY_ID 0x001378E0 2921#define L1LXT971A_PHY_ID 0x001378E0
2921 2922
2922#define RTL8211B_PHY_ID 0x001CC910 2923#define RTL8211B_PHY_ID 0x001CC910
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e1369053..00bf595ebd67 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
364 /* structs defined in e1000_hw.h */ 364 /* structs defined in e1000_hw.h */
365 struct e1000_hw hw; 365 struct e1000_hw hw;
366 366
367 spinlock_t stats64_lock;
367 struct e1000_hw_stats stats; 368 struct e1000_hw_stats stats;
368 struct e1000_phy_info phy_info; 369 struct e1000_phy_info phy_info;
369 struct e1000_phy_stats phy_stats; 370 struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
494extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); 495extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
495extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 496extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
496extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 497extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
497extern void e1000e_update_stats(struct e1000_adapter *adapter); 498extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
499 struct rtnl_link_stats64
500 *stats);
498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 501extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 502extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 503extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b6336cfb..65ef9b5548d8 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
46}; 46};
47 47
48#define E1000_STAT(str, m) { \ 48#define E1000_STAT(str, m) { \
49 .stat_string = str, \ 49 .stat_string = str, \
50 .type = E1000_STATS, \ 50 .type = E1000_STATS, \
51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ 51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
52 .stat_offset = offsetof(struct e1000_adapter, m) } 52 .stat_offset = offsetof(struct e1000_adapter, m) }
53#define E1000_NETDEV_STAT(str, m) { \ 53#define E1000_NETDEV_STAT(str, m) { \
54 .stat_string = str, \ 54 .stat_string = str, \
55 .type = NETDEV_STATS, \ 55 .type = NETDEV_STATS, \
56 .sizeof_stat = sizeof(((struct net_device *)0)->m), \ 56 .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
57 .stat_offset = offsetof(struct net_device, m) } 57 .stat_offset = offsetof(struct rtnl_link_stats64, m) }
58 58
59static const struct e1000_stats e1000_gstrings_stats[] = { 59static const struct e1000_stats e1000_gstrings_stats[] = {
60 E1000_STAT("rx_packets", stats.gprc), 60 E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
65 E1000_STAT("tx_broadcast", stats.bptc), 65 E1000_STAT("tx_broadcast", stats.bptc),
66 E1000_STAT("rx_multicast", stats.mprc), 66 E1000_STAT("rx_multicast", stats.mprc),
67 E1000_STAT("tx_multicast", stats.mptc), 67 E1000_STAT("tx_multicast", stats.mptc),
68 E1000_NETDEV_STAT("rx_errors", stats.rx_errors), 68 E1000_NETDEV_STAT("rx_errors", rx_errors),
69 E1000_NETDEV_STAT("tx_errors", stats.tx_errors), 69 E1000_NETDEV_STAT("tx_errors", tx_errors),
70 E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped), 70 E1000_NETDEV_STAT("tx_dropped", tx_dropped),
71 E1000_STAT("multicast", stats.mprc), 71 E1000_STAT("multicast", stats.mprc),
72 E1000_STAT("collisions", stats.colc), 72 E1000_STAT("collisions", stats.colc),
73 E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors), 73 E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
74 E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors), 74 E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
75 E1000_STAT("rx_crc_errors", stats.crcerrs), 75 E1000_STAT("rx_crc_errors", stats.crcerrs),
76 E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors), 76 E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
77 E1000_STAT("rx_no_buffer_count", stats.rnbc), 77 E1000_STAT("rx_no_buffer_count", stats.rnbc),
78 E1000_STAT("rx_missed_errors", stats.mpc), 78 E1000_STAT("rx_missed_errors", stats.mpc),
79 E1000_STAT("tx_aborted_errors", stats.ecol), 79 E1000_STAT("tx_aborted_errors", stats.ecol),
80 E1000_STAT("tx_carrier_errors", stats.tncrs), 80 E1000_STAT("tx_carrier_errors", stats.tncrs),
81 E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors), 81 E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
82 E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors), 82 E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
83 E1000_STAT("tx_window_errors", stats.latecol), 83 E1000_STAT("tx_window_errors", stats.latecol),
84 E1000_STAT("tx_abort_late_coll", stats.latecol), 84 E1000_STAT("tx_abort_late_coll", stats.latecol),
85 E1000_STAT("tx_deferred_ok", stats.dc), 85 E1000_STAT("tx_deferred_ok", stats.dc),
@@ -684,20 +684,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
684 rx_old = adapter->rx_ring; 684 rx_old = adapter->rx_ring;
685 685
686 err = -ENOMEM; 686 err = -ENOMEM;
687 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 687 tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
688 if (!tx_ring) 688 if (!tx_ring)
689 goto err_alloc_tx; 689 goto err_alloc_tx;
690 /*
691 * use a memcpy to save any previously configured
692 * items like napi structs from having to be
693 * reinitialized
694 */
695 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
696 690
697 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 691 rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
698 if (!rx_ring) 692 if (!rx_ring)
699 goto err_alloc_rx; 693 goto err_alloc_rx;
700 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
701 694
702 adapter->tx_ring = tx_ring; 695 adapter->tx_ring = tx_ring;
703 adapter->rx_ring = rx_ring; 696 adapter->rx_ring = rx_ring;
@@ -1255,7 +1248,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1255{ 1248{
1256 struct e1000_hw *hw = &adapter->hw; 1249 struct e1000_hw *hw = &adapter->hw;
1257 u32 ctrl_reg = 0; 1250 u32 ctrl_reg = 0;
1258 u32 stat_reg = 0;
1259 u16 phy_reg = 0; 1251 u16 phy_reg = 0;
1260 s32 ret_val = 0; 1252 s32 ret_val = 0;
1261 1253
@@ -1363,8 +1355,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363 * Set the ILOS bit on the fiber Nic if half duplex link is 1355 * Set the ILOS bit on the fiber Nic if half duplex link is
1364 * detected. 1356 * detected.
1365 */ 1357 */
1366 stat_reg = er32(STATUS); 1358 if ((er32(STATUS) & E1000_STATUS_FD) == 0)
1367 if ((stat_reg & E1000_STATUS_FD) == 0)
1368 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1359 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1369 } 1360 }
1370 1361
@@ -1972,8 +1963,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
1972static int e1000_nway_reset(struct net_device *netdev) 1963static int e1000_nway_reset(struct net_device *netdev)
1973{ 1964{
1974 struct e1000_adapter *adapter = netdev_priv(netdev); 1965 struct e1000_adapter *adapter = netdev_priv(netdev);
1975 if (netif_running(netdev)) 1966
1976 e1000e_reinit_locked(adapter); 1967 if (!netif_running(netdev))
1968 return -EAGAIN;
1969
1970 if (!adapter->hw.mac.autoneg)
1971 return -EINVAL;
1972
1973 e1000e_reinit_locked(adapter);
1974
1977 return 0; 1975 return 0;
1978} 1976}
1979 1977
@@ -1982,14 +1980,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1982 u64 *data) 1980 u64 *data)
1983{ 1981{
1984 struct e1000_adapter *adapter = netdev_priv(netdev); 1982 struct e1000_adapter *adapter = netdev_priv(netdev);
1983 struct rtnl_link_stats64 net_stats;
1985 int i; 1984 int i;
1986 char *p = NULL; 1985 char *p = NULL;
1987 1986
1988 e1000e_update_stats(adapter); 1987 e1000e_get_stats64(netdev, &net_stats);
1989 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1988 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1990 switch (e1000_gstrings_stats[i].type) { 1989 switch (e1000_gstrings_stats[i].type) {
1991 case NETDEV_STATS: 1990 case NETDEV_STATS:
1992 p = (char *) netdev + 1991 p = (char *) &net_stats +
1993 e1000_gstrings_stats[i].stat_offset; 1992 e1000_gstrings_stats[i].stat_offset;
1994 break; 1993 break;
1995 case E1000_STATS: 1994 case E1000_STATS:
@@ -2014,7 +2013,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
2014 2013
2015 switch (stringset) { 2014 switch (stringset) {
2016 case ETH_SS_TEST: 2015 case ETH_SS_TEST:
2017 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); 2016 memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
2018 break; 2017 break;
2019 case ETH_SS_STATS: 2018 case ETH_SS_STATS:
2020 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 2019 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974cfec1..232b42b7f7ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2104,7 +2104,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2104{ 2104{
2105 union ich8_hws_flash_status hsfsts; 2105 union ich8_hws_flash_status hsfsts;
2106 s32 ret_val = -E1000_ERR_NVM; 2106 s32 ret_val = -E1000_ERR_NVM;
2107 s32 i = 0;
2108 2107
2109 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2108 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2110 2109
@@ -2140,6 +2139,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2140 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2139 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2141 ret_val = 0; 2140 ret_val = 0;
2142 } else { 2141 } else {
2142 s32 i = 0;
2143
2143 /* 2144 /*
2144 * Otherwise poll for sometime so the current 2145 * Otherwise poll for sometime so the current
2145 * cycle has a chance to end before giving up. 2146 * cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa1749bf66..96921de5df2e 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1978{ 1978{
1979 struct e1000_nvm_info *nvm = &hw->nvm; 1979 struct e1000_nvm_info *nvm = &hw->nvm;
1980 u32 eecd = er32(EECD); 1980 u32 eecd = er32(EECD);
1981 u16 timeout = 0;
1982 u8 spi_stat_reg; 1981 u8 spi_stat_reg;
1983 1982
1984 if (nvm->type == e1000_nvm_eeprom_spi) { 1983 if (nvm->type == e1000_nvm_eeprom_spi) {
1984 u16 timeout = NVM_MAX_RETRY_SPI;
1985
1985 /* Clear SK and CS */ 1986 /* Clear SK and CS */
1986 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 1987 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1987 ew32(EECD, eecd); 1988 ew32(EECD, eecd);
1988 udelay(1); 1989 udelay(1);
1989 timeout = NVM_MAX_RETRY_SPI;
1990 1990
1991 /* 1991 /*
1992 * Read "Status Register" repeatedly until the LSB is cleared. 1992 * Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 1c18f26b0812..ec0b803c501e 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -900,8 +900,6 @@ next_desc:
900 900
901 adapter->total_rx_bytes += total_rx_bytes; 901 adapter->total_rx_bytes += total_rx_bytes;
902 adapter->total_rx_packets += total_rx_packets; 902 adapter->total_rx_packets += total_rx_packets;
903 netdev->stats.rx_bytes += total_rx_bytes;
904 netdev->stats.rx_packets += total_rx_packets;
905 return cleaned; 903 return cleaned;
906} 904}
907 905
@@ -937,6 +935,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
937 u16 phy_status, phy_1000t_status, phy_ext_status; 935 u16 phy_status, phy_1000t_status, phy_ext_status;
938 u16 pci_status; 936 u16 pci_status;
939 937
938 if (test_bit(__E1000_DOWN, &adapter->state))
939 return;
940
940 e1e_rphy(hw, PHY_STATUS, &phy_status); 941 e1e_rphy(hw, PHY_STATUS, &phy_status);
941 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 942 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
942 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 943 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1057,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1057 } 1058 }
1058 adapter->total_tx_bytes += total_tx_bytes; 1059 adapter->total_tx_bytes += total_tx_bytes;
1059 adapter->total_tx_packets += total_tx_packets; 1060 adapter->total_tx_packets += total_tx_packets;
1060 netdev->stats.tx_bytes += total_tx_bytes;
1061 netdev->stats.tx_packets += total_tx_packets;
1062 return count < tx_ring->count; 1061 return count < tx_ring->count;
1063} 1062}
1064 1063
@@ -1245,8 +1244,6 @@ next_desc:
1245 1244
1246 adapter->total_rx_bytes += total_rx_bytes; 1245 adapter->total_rx_bytes += total_rx_bytes;
1247 adapter->total_rx_packets += total_rx_packets; 1246 adapter->total_rx_packets += total_rx_packets;
1248 netdev->stats.rx_bytes += total_rx_bytes;
1249 netdev->stats.rx_packets += total_rx_packets;
1250 return cleaned; 1247 return cleaned;
1251} 1248}
1252 1249
@@ -1426,8 +1423,6 @@ next_desc:
1426 1423
1427 adapter->total_rx_bytes += total_rx_bytes; 1424 adapter->total_rx_bytes += total_rx_bytes;
1428 adapter->total_rx_packets += total_rx_packets; 1425 adapter->total_rx_packets += total_rx_packets;
1429 netdev->stats.rx_bytes += total_rx_bytes;
1430 netdev->stats.rx_packets += total_rx_packets;
1431 return cleaned; 1426 return cleaned;
1432} 1427}
1433 1428
@@ -1506,6 +1501,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
1506 struct e1000_adapter *adapter = container_of(work, 1501 struct e1000_adapter *adapter = container_of(work,
1507 struct e1000_adapter, downshift_task); 1502 struct e1000_adapter, downshift_task);
1508 1503
1504 if (test_bit(__E1000_DOWN, &adapter->state))
1505 return;
1506
1509 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1507 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1510} 1508}
1511 1509
@@ -1851,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1851 int err = 0, vector = 0; 1849 int err = 0, vector = 0;
1852 1850
1853 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1851 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1854 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1852 snprintf(adapter->rx_ring->name,
1853 sizeof(adapter->rx_ring->name) - 1,
1854 "%s-rx-0", netdev->name);
1855 else 1855 else
1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1857 err = request_irq(adapter->msix_entries[vector].vector, 1857 err = request_irq(adapter->msix_entries[vector].vector,
@@ -1864,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1864 vector++; 1864 vector++;
1865 1865
1866 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1866 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1867 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1867 snprintf(adapter->tx_ring->name,
1868 sizeof(adapter->tx_ring->name) - 1,
1869 "%s-tx-0", netdev->name);
1868 else 1870 else
1869 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1871 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1870 err = request_irq(adapter->msix_entries[vector].vector, 1872 err = request_irq(adapter->msix_entries[vector].vector,
@@ -2728,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2728{ 2730{
2729 struct e1000_hw *hw = &adapter->hw; 2731 struct e1000_hw *hw = &adapter->hw;
2730 u32 rctl, rfctl; 2732 u32 rctl, rfctl;
2731 u32 psrctl = 0;
2732 u32 pages = 0; 2733 u32 pages = 0;
2733 2734
2734 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2735 /* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2827,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2827 adapter->rx_ps_pages = 0; 2828 adapter->rx_ps_pages = 0;
2828 2829
2829 if (adapter->rx_ps_pages) { 2830 if (adapter->rx_ps_pages) {
2831 u32 psrctl = 0;
2832
2830 /* Configure extra packet-split registers */ 2833 /* Configure extra packet-split registers */
2831 rfctl = er32(RFCTL); 2834 rfctl = er32(RFCTL);
2832 rfctl |= E1000_RFCTL_EXTEN; 2835 rfctl |= E1000_RFCTL_EXTEN;
@@ -3028,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
3028 struct netdev_hw_addr *ha; 3031 struct netdev_hw_addr *ha;
3029 u8 *mta_list; 3032 u8 *mta_list;
3030 u32 rctl; 3033 u32 rctl;
3031 int i;
3032 3034
3033 /* Check for Promiscuous and All Multicast modes */ 3035 /* Check for Promiscuous and All Multicast modes */
3034 3036
@@ -3051,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
3051 ew32(RCTL, rctl); 3053 ew32(RCTL, rctl);
3052 3054
3053 if (!netdev_mc_empty(netdev)) { 3055 if (!netdev_mc_empty(netdev)) {
3056 int i = 0;
3057
3054 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 3058 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3055 if (!mta_list) 3059 if (!mta_list)
3056 return; 3060 return;
3057 3061
3058 /* prepare a packed array of only addresses. */ 3062 /* prepare a packed array of only addresses. */
3059 i = 0;
3060 netdev_for_each_mc_addr(ha, netdev) 3063 netdev_for_each_mc_addr(ha, netdev)
3061 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3064 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3062 3065
@@ -3338,6 +3341,23 @@ int e1000e_up(struct e1000_adapter *adapter)
3338 return 0; 3341 return 0;
3339} 3342}
3340 3343
3344static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3345{
3346 struct e1000_hw *hw = &adapter->hw;
3347
3348 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3349 return;
3350
3351 /* flush pending descriptor writebacks to memory */
3352 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3353 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3354
3355 /* execute the writes immediately */
3356 e1e_flush();
3357}
3358
3359static void e1000e_update_stats(struct e1000_adapter *adapter);
3360
3341void e1000e_down(struct e1000_adapter *adapter) 3361void e1000e_down(struct e1000_adapter *adapter)
3342{ 3362{
3343 struct net_device *netdev = adapter->netdev; 3363 struct net_device *netdev = adapter->netdev;
@@ -3372,11 +3392,19 @@ void e1000e_down(struct e1000_adapter *adapter)
3372 del_timer_sync(&adapter->phy_info_timer); 3392 del_timer_sync(&adapter->phy_info_timer);
3373 3393
3374 netif_carrier_off(netdev); 3394 netif_carrier_off(netdev);
3395
3396 spin_lock(&adapter->stats64_lock);
3397 e1000e_update_stats(adapter);
3398 spin_unlock(&adapter->stats64_lock);
3399
3375 adapter->link_speed = 0; 3400 adapter->link_speed = 0;
3376 adapter->link_duplex = 0; 3401 adapter->link_duplex = 0;
3377 3402
3378 if (!pci_channel_offline(adapter->pdev)) 3403 if (!pci_channel_offline(adapter->pdev))
3379 e1000e_reset(adapter); 3404 e1000e_reset(adapter);
3405
3406 e1000e_flush_descriptors(adapter);
3407
3380 e1000_clean_tx_ring(adapter); 3408 e1000_clean_tx_ring(adapter);
3381 e1000_clean_rx_ring(adapter); 3409 e1000_clean_rx_ring(adapter);
3382 3410
@@ -3413,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3413 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3441 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3414 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3442 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3415 3443
3444 spin_lock_init(&adapter->stats64_lock);
3445
3416 e1000e_set_interrupt_capability(adapter); 3446 e1000e_set_interrupt_capability(adapter);
3417 3447
3418 if (e1000_alloc_queues(adapter)) 3448 if (e1000_alloc_queues(adapter))
@@ -3765,6 +3795,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3765{ 3795{
3766 struct e1000_adapter *adapter = container_of(work, 3796 struct e1000_adapter *adapter = container_of(work,
3767 struct e1000_adapter, update_phy_task); 3797 struct e1000_adapter, update_phy_task);
3798
3799 if (test_bit(__E1000_DOWN, &adapter->state))
3800 return;
3801
3768 e1000_get_phy_info(&adapter->hw); 3802 e1000_get_phy_info(&adapter->hw);
3769} 3803}
3770 3804
@@ -3775,6 +3809,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3775static void e1000_update_phy_info(unsigned long data) 3809static void e1000_update_phy_info(unsigned long data)
3776{ 3810{
3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3811 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3812
3813 if (test_bit(__E1000_DOWN, &adapter->state))
3814 return;
3815
3778 schedule_work(&adapter->update_phy_task); 3816 schedule_work(&adapter->update_phy_task);
3779} 3817}
3780 3818
@@ -3886,7 +3924,7 @@ release:
3886 * e1000e_update_stats - Update the board statistics counters 3924 * e1000e_update_stats - Update the board statistics counters
3887 * @adapter: board private structure 3925 * @adapter: board private structure
3888 **/ 3926 **/
3889void e1000e_update_stats(struct e1000_adapter *adapter) 3927static void e1000e_update_stats(struct e1000_adapter *adapter)
3890{ 3928{
3891 struct net_device *netdev = adapter->netdev; 3929 struct net_device *netdev = adapter->netdev;
3892 struct e1000_hw *hw = &adapter->hw; 3930 struct e1000_hw *hw = &adapter->hw;
@@ -3998,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
3998{ 4036{
3999 struct e1000_hw *hw = &adapter->hw; 4037 struct e1000_hw *hw = &adapter->hw;
4000 struct e1000_phy_regs *phy = &adapter->phy_regs; 4038 struct e1000_phy_regs *phy = &adapter->phy_regs;
4001 int ret_val;
4002 4039
4003 if ((er32(STATUS) & E1000_STATUS_LU) && 4040 if ((er32(STATUS) & E1000_STATUS_LU) &&
4004 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4041 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4042 int ret_val;
4043
4005 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4044 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4006 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4045 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4007 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4046 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4147,7 +4186,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4147 struct e1000_ring *tx_ring = adapter->tx_ring; 4186 struct e1000_ring *tx_ring = adapter->tx_ring;
4148 struct e1000_hw *hw = &adapter->hw; 4187 struct e1000_hw *hw = &adapter->hw;
4149 u32 link, tctl; 4188 u32 link, tctl;
4150 int tx_pending = 0; 4189
4190 if (test_bit(__E1000_DOWN, &adapter->state))
4191 return;
4151 4192
4152 link = e1000e_has_link(adapter); 4193 link = e1000e_has_link(adapter);
4153 if ((netif_carrier_ok(netdev)) && link) { 4194 if ((netif_carrier_ok(netdev)) && link) {
@@ -4285,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4285 } 4326 }
4286 4327
4287link_up: 4328link_up:
4329 spin_lock(&adapter->stats64_lock);
4288 e1000e_update_stats(adapter); 4330 e1000e_update_stats(adapter);
4331 spin_unlock(&adapter->stats64_lock);
4289 4332
4290 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4333 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4291 adapter->tpt_old = adapter->stats.tpt; 4334 adapter->tpt_old = adapter->stats.tpt;
@@ -4299,21 +4342,17 @@ link_up:
4299 4342
4300 e1000e_update_adaptive(&adapter->hw); 4343 e1000e_update_adaptive(&adapter->hw);
4301 4344
4302 if (!netif_carrier_ok(netdev)) { 4345 if (!netif_carrier_ok(netdev) &&
4303 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 4346 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4304 tx_ring->count); 4347 /*
4305 if (tx_pending) { 4348 * We've lost link, so the controller stops DMA,
4306 /* 4349 * but we've got queued Tx work that's never going
4307 * We've lost link, so the controller stops DMA, 4350 * to get done, so reset controller to flush Tx.
4308 * but we've got queued Tx work that's never going 4351 * (Do the reset outside of interrupt context).
4309 * to get done, so reset controller to flush Tx. 4352 */
4310 * (Do the reset outside of interrupt context). 4353 schedule_work(&adapter->reset_task);
4311 */ 4354 /* return immediately since reset is imminent */
4312 adapter->tx_timeout_count++; 4355 return;
4313 schedule_work(&adapter->reset_task);
4314 /* return immediately since reset is imminent */
4315 return;
4316 }
4317 } 4356 }
4318 4357
4319 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4358 /* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4338,19 +4377,12 @@ link_up:
4338 else 4377 else
4339 ew32(ICS, E1000_ICS_RXDMT0); 4378 ew32(ICS, E1000_ICS_RXDMT0);
4340 4379
4380 /* flush pending descriptors to memory before detecting Tx hang */
4381 e1000e_flush_descriptors(adapter);
4382
4341 /* Force detection of hung controller every watchdog period */ 4383 /* Force detection of hung controller every watchdog period */
4342 adapter->detect_tx_hung = 1; 4384 adapter->detect_tx_hung = 1;
4343 4385
4344 /* flush partial descriptors to memory before detecting Tx hang */
4345 if (adapter->flags2 & FLAG2_DMA_BURST) {
4346 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4347 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4348 /*
4349 * no need to flush the writes because the timeout code does
4350 * an er32 first thing
4351 */
4352 }
4353
4354 /* 4386 /*
4355 * With 82571 controllers, LAA may be overwritten due to controller 4387 * With 82571 controllers, LAA may be overwritten due to controller
4356 * reset from the other port. Set the appropriate LAA in RAR[0] 4388 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4384,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
4384 u32 cmd_length = 0; 4416 u32 cmd_length = 0;
4385 u16 ipcse = 0, tucse, mss; 4417 u16 ipcse = 0, tucse, mss;
4386 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4418 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4387 int err;
4388 4419
4389 if (!skb_is_gso(skb)) 4420 if (!skb_is_gso(skb))
4390 return 0; 4421 return 0;
4391 4422
4392 if (skb_header_cloned(skb)) { 4423 if (skb_header_cloned(skb)) {
4393 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4424 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4425
4394 if (err) 4426 if (err)
4395 return err; 4427 return err;
4396 } 4428 }
@@ -4888,6 +4920,10 @@ static void e1000_reset_task(struct work_struct *work)
4888 struct e1000_adapter *adapter; 4920 struct e1000_adapter *adapter;
4889 adapter = container_of(work, struct e1000_adapter, reset_task); 4921 adapter = container_of(work, struct e1000_adapter, reset_task);
4890 4922
4923 /* don't run the task if already down */
4924 if (test_bit(__E1000_DOWN, &adapter->state))
4925 return;
4926
4891 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4927 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4892 (adapter->flags & FLAG_RX_RESTART_NOW))) { 4928 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4893 e1000e_dump(adapter); 4929 e1000e_dump(adapter);
@@ -4897,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
4897} 4933}
4898 4934
4899/** 4935/**
4900 * e1000_get_stats - Get System Network Statistics 4936 * e1000_get_stats64 - Get System Network Statistics
4901 * @netdev: network interface device structure 4937 * @netdev: network interface device structure
4938 * @stats: rtnl_link_stats64 pointer
4902 * 4939 *
4903 * Returns the address of the device statistics structure. 4940 * Returns the address of the device statistics structure.
4904 * The statistics are actually updated from the timer callback.
4905 **/ 4941 **/
4906static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4942struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
4943 struct rtnl_link_stats64 *stats)
4907{ 4944{
4908 /* only return the current stats */ 4945 struct e1000_adapter *adapter = netdev_priv(netdev);
4909 return &netdev->stats; 4946
4947 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4948 spin_lock(&adapter->stats64_lock);
4949 e1000e_update_stats(adapter);
4950 /* Fill out the OS statistics structure */
4951 stats->rx_bytes = adapter->stats.gorc;
4952 stats->rx_packets = adapter->stats.gprc;
4953 stats->tx_bytes = adapter->stats.gotc;
4954 stats->tx_packets = adapter->stats.gptc;
4955 stats->multicast = adapter->stats.mprc;
4956 stats->collisions = adapter->stats.colc;
4957
4958 /* Rx Errors */
4959
4960 /*
4961 * RLEC on some newer hardware can be incorrect so build
4962 * our own version based on RUC and ROC
4963 */
4964 stats->rx_errors = adapter->stats.rxerrc +
4965 adapter->stats.crcerrs + adapter->stats.algnerrc +
4966 adapter->stats.ruc + adapter->stats.roc +
4967 adapter->stats.cexterr;
4968 stats->rx_length_errors = adapter->stats.ruc +
4969 adapter->stats.roc;
4970 stats->rx_crc_errors = adapter->stats.crcerrs;
4971 stats->rx_frame_errors = adapter->stats.algnerrc;
4972 stats->rx_missed_errors = adapter->stats.mpc;
4973
4974 /* Tx Errors */
4975 stats->tx_errors = adapter->stats.ecol +
4976 adapter->stats.latecol;
4977 stats->tx_aborted_errors = adapter->stats.ecol;
4978 stats->tx_window_errors = adapter->stats.latecol;
4979 stats->tx_carrier_errors = adapter->stats.tncrs;
4980
4981 /* Tx Dropped needs to be maintained elsewhere */
4982
4983 spin_unlock(&adapter->stats64_lock);
4984 return stats;
4910} 4985}
4911 4986
4912/** 4987/**
@@ -5476,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
5476{ 5551{
5477 struct net_device *netdev = data; 5552 struct net_device *netdev = data;
5478 struct e1000_adapter *adapter = netdev_priv(netdev); 5553 struct e1000_adapter *adapter = netdev_priv(netdev);
5479 int vector, msix_irq;
5480 5554
5481 if (adapter->msix_entries) { 5555 if (adapter->msix_entries) {
5556 int vector, msix_irq;
5557
5482 vector = 0; 5558 vector = 0;
5483 msix_irq = adapter->msix_entries[vector].vector; 5559 msix_irq = adapter->msix_entries[vector].vector;
5484 disable_irq(msix_irq); 5560 disable_irq(msix_irq);
@@ -5675,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
5675 .ndo_open = e1000_open, 5751 .ndo_open = e1000_open,
5676 .ndo_stop = e1000_close, 5752 .ndo_stop = e1000_close,
5677 .ndo_start_xmit = e1000_xmit_frame, 5753 .ndo_start_xmit = e1000_xmit_frame,
5678 .ndo_get_stats = e1000_get_stats, 5754 .ndo_get_stats64 = e1000e_get_stats64,
5679 .ndo_set_multicast_list = e1000_set_multi, 5755 .ndo_set_multicast_list = e1000_set_multi,
5680 .ndo_set_mac_address = e1000_set_mac, 5756 .ndo_set_mac_address = e1000_set_mac,
5681 .ndo_change_mtu = e1000_change_mtu, 5757 .ndo_change_mtu = e1000_change_mtu,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051b134b..6ae31fcfb629 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) 2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2410{ 2410{
2411 s32 ret_val; 2411 s32 ret_val;
2412 u32 page_select = 0;
2413 u32 page = offset >> IGP_PAGE_SHIFT; 2412 u32 page = offset >> IGP_PAGE_SHIFT;
2414 u32 page_shift = 0;
2415 2413
2416 ret_val = hw->phy.ops.acquire(hw); 2414 ret_val = hw->phy.ops.acquire(hw);
2417 if (ret_val) 2415 if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2427 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2425 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2428 2426
2429 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2427 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2428 u32 page_shift, page_select;
2429
2430 /* 2430 /*
2431 * Page select is register 31 for phy address 1 and 22 for 2431 * Page select is register 31 for phy address 1 and 22 for
2432 * phy address 2 and 3. Page select is shifted only for 2432 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) 2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2469{ 2469{
2470 s32 ret_val; 2470 s32 ret_val;
2471 u32 page_select = 0;
2472 u32 page = offset >> IGP_PAGE_SHIFT; 2471 u32 page = offset >> IGP_PAGE_SHIFT;
2473 u32 page_shift = 0;
2474 2472
2475 ret_val = hw->phy.ops.acquire(hw); 2473 ret_val = hw->phy.ops.acquire(hw);
2476 if (ret_val) 2474 if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2486 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2484 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2487 2485
2488 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2486 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2487 u32 page_shift, page_select;
2488
2489 /* 2489 /*
2490 * Page select is register 31 for phy address 1 and 22 for 2490 * Page select is register 31 for phy address 1 and 22 for
2491 * phy address 2 and 3. Page select is shifted only for 2491 * phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7f..907b05a1c659 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
812 if (netif_msg_hw(priv)) 812 if (netif_msg_hw(priv))
813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", 813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
814 endptr + 1); 814 endptr + 1);
815 enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); 815 enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
816} 816}
817 817
818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, 818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31880ba..2e573be16c13 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49d9db7..aee5256e522b 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.10" 35#define DRV_VERSION "2.1.1.9"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 8 40#define ENIC_WQ_MAX 1
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 1
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
44 44
@@ -49,7 +49,7 @@ struct enic_msix_entry {
49 void *devid; 49 void *devid;
50}; 50};
51 51
52#define ENIC_SET_APPLIED (1 << 0) 52#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
53#define ENIC_SET_REQUEST (1 << 1) 53#define ENIC_SET_REQUEST (1 << 1)
54#define ENIC_SET_NAME (1 << 2) 54#define ENIC_SET_NAME (1 << 2)
55#define ENIC_SET_INSTANCE (1 << 3) 55#define ENIC_SET_INSTANCE (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
101 /* receive queue cache line section */ 101 /* receive queue cache line section */
102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; 102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
103 unsigned int rq_count; 103 unsigned int rq_count;
104 int (*rq_alloc_buf)(struct vnic_rq *rq);
105 u64 rq_truncated_pkts; 104 u64 rq_truncated_pkts;
106 u64 rq_bad_fcs; 105 u64 rq_bad_fcs;
107 struct napi_struct napi[ENIC_RQ_MAX]; 106 struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 000000000000..37ad3a1c82ee
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/pci.h>
20#include <linux/etherdevice.h>
21
22#include "vnic_dev.h"
23#include "vnic_vic.h"
24#include "enic_res.h"
25#include "enic.h"
26#include "enic_dev.h"
27
28int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
29{
30 int err;
31
32 spin_lock(&enic->devcmd_lock);
33 err = vnic_dev_fw_info(enic->vdev, fw_info);
34 spin_unlock(&enic->devcmd_lock);
35
36 return err;
37}
38
39int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
40{
41 int err;
42
43 spin_lock(&enic->devcmd_lock);
44 err = vnic_dev_stats_dump(enic->vdev, vstats);
45 spin_unlock(&enic->devcmd_lock);
46
47 return err;
48}
49
50int enic_dev_add_station_addr(struct enic *enic)
51{
52 int err;
53
54 if (!is_valid_ether_addr(enic->netdev->dev_addr))
55 return -EADDRNOTAVAIL;
56
57 spin_lock(&enic->devcmd_lock);
58 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
59 spin_unlock(&enic->devcmd_lock);
60
61 return err;
62}
63
64int enic_dev_del_station_addr(struct enic *enic)
65{
66 int err;
67
68 if (!is_valid_ether_addr(enic->netdev->dev_addr))
69 return -EADDRNOTAVAIL;
70
71 spin_lock(&enic->devcmd_lock);
72 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
73 spin_unlock(&enic->devcmd_lock);
74
75 return err;
76}
77
78int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
79 int broadcast, int promisc, int allmulti)
80{
81 int err;
82
83 spin_lock(&enic->devcmd_lock);
84 err = vnic_dev_packet_filter(enic->vdev, directed,
85 multicast, broadcast, promisc, allmulti);
86 spin_unlock(&enic->devcmd_lock);
87
88 return err;
89}
90
91int enic_dev_add_addr(struct enic *enic, u8 *addr)
92{
93 int err;
94
95 spin_lock(&enic->devcmd_lock);
96 err = vnic_dev_add_addr(enic->vdev, addr);
97 spin_unlock(&enic->devcmd_lock);
98
99 return err;
100}
101
102int enic_dev_del_addr(struct enic *enic, u8 *addr)
103{
104 int err;
105
106 spin_lock(&enic->devcmd_lock);
107 err = vnic_dev_del_addr(enic->vdev, addr);
108 spin_unlock(&enic->devcmd_lock);
109
110 return err;
111}
112
113int enic_dev_notify_unset(struct enic *enic)
114{
115 int err;
116
117 spin_lock(&enic->devcmd_lock);
118 err = vnic_dev_notify_unset(enic->vdev);
119 spin_unlock(&enic->devcmd_lock);
120
121 return err;
122}
123
124int enic_dev_hang_notify(struct enic *enic)
125{
126 int err;
127
128 spin_lock(&enic->devcmd_lock);
129 err = vnic_dev_hang_notify(enic->vdev);
130 spin_unlock(&enic->devcmd_lock);
131
132 return err;
133}
134
135int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
136{
137 int err;
138
139 spin_lock(&enic->devcmd_lock);
140 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
141 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
142 spin_unlock(&enic->devcmd_lock);
143
144 return err;
145}
146
147int enic_dev_enable(struct enic *enic)
148{
149 int err;
150
151 spin_lock(&enic->devcmd_lock);
152 err = vnic_dev_enable_wait(enic->vdev);
153 spin_unlock(&enic->devcmd_lock);
154
155 return err;
156}
157
158int enic_dev_disable(struct enic *enic)
159{
160 int err;
161
162 spin_lock(&enic->devcmd_lock);
163 err = vnic_dev_disable(enic->vdev);
164 spin_unlock(&enic->devcmd_lock);
165
166 return err;
167}
168
169int enic_vnic_dev_deinit(struct enic *enic)
170{
171 int err;
172
173 spin_lock(&enic->devcmd_lock);
174 err = vnic_dev_deinit(enic->vdev);
175 spin_unlock(&enic->devcmd_lock);
176
177 return err;
178}
179
180int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
181{
182 int err;
183
184 spin_lock(&enic->devcmd_lock);
185 err = vnic_dev_init_prov(enic->vdev,
186 (u8 *)vp, vic_provinfo_size(vp));
187 spin_unlock(&enic->devcmd_lock);
188
189 return err;
190}
191
192int enic_dev_init_done(struct enic *enic, int *done, int *error)
193{
194 int err;
195
196 spin_lock(&enic->devcmd_lock);
197 err = vnic_dev_init_done(enic->vdev, done, error);
198 spin_unlock(&enic->devcmd_lock);
199
200 return err;
201}
202
203/* rtnl lock is held */
204void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
205{
206 struct enic *enic = netdev_priv(netdev);
207
208 spin_lock(&enic->devcmd_lock);
209 enic_add_vlan(enic, vid);
210 spin_unlock(&enic->devcmd_lock);
211}
212
213/* rtnl lock is held */
214void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
215{
216 struct enic *enic = netdev_priv(netdev);
217
218 spin_lock(&enic->devcmd_lock);
219 enic_del_vlan(enic, vid);
220 spin_unlock(&enic->devcmd_lock);
221}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 000000000000..495f57fcb887
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _ENIC_DEV_H_
20#define _ENIC_DEV_H_
21
22int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
23int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
24int enic_dev_add_station_addr(struct enic *enic);
25int enic_dev_del_station_addr(struct enic *enic);
26int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
27 int broadcast, int promisc, int allmulti);
28int enic_dev_add_addr(struct enic *enic, u8 *addr);
29int enic_dev_del_addr(struct enic *enic, u8 *addr);
30void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
31void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
32int enic_dev_notify_unset(struct enic *enic);
33int enic_dev_hang_notify(struct enic *enic);
34int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
35int enic_dev_enable(struct enic *enic);
36int enic_dev_disable(struct enic *enic);
37int enic_vnic_dev_deinit(struct enic *enic);
38int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
39int enic_dev_init_done(struct enic *enic, int *done, int *error);
40
41#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c51fb3..4f1710e31eb4 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
44#include "vnic_vic.h" 44#include "vnic_vic.h"
45#include "enic_res.h" 45#include "enic_res.h"
46#include "enic.h" 46#include "enic.h"
47#include "enic_dev.h"
47 48
48#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 49#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 50#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
190 return 0; 191 return 0;
191} 192}
192 193
193static int enic_dev_fw_info(struct enic *enic,
194 struct vnic_devcmd_fw_info **fw_info)
195{
196 int err;
197
198 spin_lock(&enic->devcmd_lock);
199 err = vnic_dev_fw_info(enic->vdev, fw_info);
200 spin_unlock(&enic->devcmd_lock);
201
202 return err;
203}
204
205static void enic_get_drvinfo(struct net_device *netdev, 194static void enic_get_drvinfo(struct net_device *netdev,
206 struct ethtool_drvinfo *drvinfo) 195 struct ethtool_drvinfo *drvinfo)
207{ 196{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
246 } 235 }
247} 236}
248 237
249static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
250{
251 int err;
252
253 spin_lock(&enic->devcmd_lock);
254 err = vnic_dev_stats_dump(enic->vdev, vstats);
255 spin_unlock(&enic->devcmd_lock);
256
257 return err;
258}
259
260static void enic_get_ethtool_stats(struct net_device *netdev, 238static void enic_get_ethtool_stats(struct net_device *netdev,
261 struct ethtool_stats *stats, u64 *data) 239 struct ethtool_stats *stats, u64 *data)
262{ 240{
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
896 return net_stats; 874 return net_stats;
897} 875}
898 876
899static void enic_reset_multicast_list(struct enic *enic) 877static void enic_reset_addr_lists(struct enic *enic)
900{ 878{
901 enic->mc_count = 0; 879 enic->mc_count = 0;
880 enic->uc_count = 0;
902 enic->flags = 0; 881 enic->flags = 0;
903} 882}
904 883
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
919 return 0; 898 return 0;
920} 899}
921 900
922static int enic_dev_add_station_addr(struct enic *enic)
923{
924 int err = 0;
925
926 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
927 spin_lock(&enic->devcmd_lock);
928 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
929 spin_unlock(&enic->devcmd_lock);
930 }
931
932 return err;
933}
934
935static int enic_dev_del_station_addr(struct enic *enic)
936{
937 int err = 0;
938
939 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
940 spin_lock(&enic->devcmd_lock);
941 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
942 spin_unlock(&enic->devcmd_lock);
943 }
944
945 return err;
946}
947
948static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 901static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
949{ 902{
950 struct enic *enic = netdev_priv(netdev); 903 struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
989 return enic_dev_add_station_addr(enic); 942 return enic_dev_add_station_addr(enic);
990} 943}
991 944
992static int enic_dev_packet_filter(struct enic *enic, int directed, 945static void enic_update_multicast_addr_list(struct enic *enic)
993 int multicast, int broadcast, int promisc, int allmulti)
994{
995 int err;
996
997 spin_lock(&enic->devcmd_lock);
998 err = vnic_dev_packet_filter(enic->vdev, directed,
999 multicast, broadcast, promisc, allmulti);
1000 spin_unlock(&enic->devcmd_lock);
1001
1002 return err;
1003}
1004
1005static int enic_dev_add_addr(struct enic *enic, u8 *addr)
1006{
1007 int err;
1008
1009 spin_lock(&enic->devcmd_lock);
1010 err = vnic_dev_add_addr(enic->vdev, addr);
1011 spin_unlock(&enic->devcmd_lock);
1012
1013 return err;
1014}
1015
1016static int enic_dev_del_addr(struct enic *enic, u8 *addr)
1017{
1018 int err;
1019
1020 spin_lock(&enic->devcmd_lock);
1021 err = vnic_dev_del_addr(enic->vdev, addr);
1022 spin_unlock(&enic->devcmd_lock);
1023
1024 return err;
1025}
1026
1027static void enic_add_multicast_addr_list(struct enic *enic)
1028{ 946{
1029 struct net_device *netdev = enic->netdev; 947 struct net_device *netdev = enic->netdev;
1030 struct netdev_hw_addr *ha; 948 struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
1079 enic->mc_count = mc_count; 997 enic->mc_count = mc_count;
1080} 998}
1081 999
1082static void enic_add_unicast_addr_list(struct enic *enic) 1000static void enic_update_unicast_addr_list(struct enic *enic)
1083{ 1001{
1084 struct net_device *netdev = enic->netdev; 1002 struct net_device *netdev = enic->netdev;
1085 struct netdev_hw_addr *ha; 1003 struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
1156 } 1074 }
1157 1075
1158 if (!promisc) { 1076 if (!promisc) {
1159 enic_add_unicast_addr_list(enic); 1077 enic_update_unicast_addr_list(enic);
1160 if (!allmulti) 1078 if (!allmulti)
1161 enic_add_multicast_addr_list(enic); 1079 enic_update_multicast_addr_list(enic);
1162 } 1080 }
1163} 1081}
1164 1082
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
1170 enic->vlan_group = vlan_group; 1088 enic->vlan_group = vlan_group;
1171} 1089}
1172 1090
1173/* rtnl lock is held */
1174static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1175{
1176 struct enic *enic = netdev_priv(netdev);
1177
1178 spin_lock(&enic->devcmd_lock);
1179 enic_add_vlan(enic, vid);
1180 spin_unlock(&enic->devcmd_lock);
1181}
1182
1183/* rtnl lock is held */
1184static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1185{
1186 struct enic *enic = netdev_priv(netdev);
1187
1188 spin_lock(&enic->devcmd_lock);
1189 enic_del_vlan(enic, vid);
1190 spin_unlock(&enic->devcmd_lock);
1191}
1192
1193/* netif_tx_lock held, BHs disabled */ 1091/* netif_tx_lock held, BHs disabled */
1194static void enic_tx_timeout(struct net_device *netdev) 1092static void enic_tx_timeout(struct net_device *netdev)
1195{ 1093{
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
1197 schedule_work(&enic->reset); 1095 schedule_work(&enic->reset);
1198} 1096}
1199 1097
1200static int enic_vnic_dev_deinit(struct enic *enic)
1201{
1202 int err;
1203
1204 spin_lock(&enic->devcmd_lock);
1205 err = vnic_dev_deinit(enic->vdev);
1206 spin_unlock(&enic->devcmd_lock);
1207
1208 return err;
1209}
1210
1211static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
1212{
1213 int err;
1214
1215 spin_lock(&enic->devcmd_lock);
1216 err = vnic_dev_init_prov(enic->vdev,
1217 (u8 *)vp, vic_provinfo_size(vp));
1218 spin_unlock(&enic->devcmd_lock);
1219
1220 return err;
1221}
1222
1223static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1224{
1225 int err;
1226
1227 spin_lock(&enic->devcmd_lock);
1228 err = vnic_dev_init_done(enic->vdev, done, error);
1229 spin_unlock(&enic->devcmd_lock);
1230
1231 return err;
1232}
1233
1234static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1098static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1235{ 1099{
1236 struct enic *enic = netdev_priv(netdev); 1100 struct enic *enic = netdev_priv(netdev);
@@ -1318,18 +1182,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1318 vic_provinfo_free(vp); 1182 vic_provinfo_free(vp);
1319 if (err) 1183 if (err)
1320 return err; 1184 return err;
1321
1322 enic->pp.set |= ENIC_SET_APPLIED;
1323 break; 1185 break;
1324 1186
1325 case PORT_REQUEST_DISASSOCIATE: 1187 case PORT_REQUEST_DISASSOCIATE:
1326 enic->pp.set &= ~ENIC_SET_APPLIED;
1327 break; 1188 break;
1328 1189
1329 default: 1190 default:
1330 return -EINVAL; 1191 return -EINVAL;
1331 } 1192 }
1332 1193
1194 /* Set flag to indicate that the port assoc/disassoc
1195 * request has been sent out to fw
1196 */
1197 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1198
1333 return 0; 1199 return 0;
1334} 1200}
1335 1201
@@ -1379,9 +1245,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1379 1245
1380 if (is_zero_ether_addr(netdev->dev_addr)) 1246 if (is_zero_ether_addr(netdev->dev_addr))
1381 random_ether_addr(netdev->dev_addr); 1247 random_ether_addr(netdev->dev_addr);
1382 } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
1383 if (!is_zero_ether_addr(enic->pp.mac_addr))
1384 enic_dev_del_addr(enic, enic->pp.mac_addr);
1385 } 1248 }
1386 1249
1387 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); 1250 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1253,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1390 if (err) 1253 if (err)
1391 goto set_port_profile_cleanup; 1254 goto set_port_profile_cleanup;
1392 1255
1393 if (!is_zero_ether_addr(enic->pp.mac_addr))
1394 enic_dev_add_addr(enic, enic->pp.mac_addr);
1395
1396set_port_profile_cleanup: 1256set_port_profile_cleanup:
1397 memset(enic->pp.vf_mac, 0, ETH_ALEN); 1257 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1398 1258
@@ -1411,7 +1271,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1411 int err, error, done; 1271 int err, error, done;
1412 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1272 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1413 1273
1414 if (!(enic->pp.set & ENIC_SET_APPLIED)) 1274 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1415 return -ENODATA; 1275 return -ENODATA;
1416 1276
1417 err = enic_dev_init_done(enic, &done, &error); 1277 err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1349,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
1489 return 0; 1349 return 0;
1490} 1350}
1491 1351
1492static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
1493{
1494 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
1495
1496 if (vnic_rq_posting_soon(rq)) {
1497
1498 /* SW workaround for A0 HW erratum: if we're just about
1499 * to write posted_index, insert a dummy desc
1500 * of type resvd
1501 */
1502
1503 rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
1504 vnic_rq_post(rq, 0, 0, 0, 0);
1505 } else {
1506 return enic_rq_alloc_buf(rq);
1507 }
1508
1509 return 0;
1510}
1511
1512static int enic_dev_hw_version(struct enic *enic,
1513 enum vnic_dev_hw_version *hw_ver)
1514{
1515 int err;
1516
1517 spin_lock(&enic->devcmd_lock);
1518 err = vnic_dev_hw_version(enic->vdev, hw_ver);
1519 spin_unlock(&enic->devcmd_lock);
1520
1521 return err;
1522}
1523
1524static int enic_set_rq_alloc_buf(struct enic *enic)
1525{
1526 enum vnic_dev_hw_version hw_ver;
1527 int err;
1528
1529 err = enic_dev_hw_version(enic, &hw_ver);
1530 if (err)
1531 return err;
1532
1533 switch (hw_ver) {
1534 case VNIC_DEV_HW_VER_A1:
1535 enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
1536 break;
1537 case VNIC_DEV_HW_VER_A2:
1538 case VNIC_DEV_HW_VER_UNKNOWN:
1539 enic->rq_alloc_buf = enic_rq_alloc_buf;
1540 break;
1541 default:
1542 return -ENODEV;
1543 }
1544
1545 return 0;
1546}
1547
1548static void enic_rq_indicate_buf(struct vnic_rq *rq, 1352static void enic_rq_indicate_buf(struct vnic_rq *rq,
1549 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1353 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1550 int skipped, void *opaque) 1354 int skipped, void *opaque)
@@ -1681,7 +1485,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1681 0 /* don't unmask intr */, 1485 0 /* don't unmask intr */,
1682 0 /* don't reset intr timer */); 1486 0 /* don't reset intr timer */);
1683 1487
1684 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1488 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1685 1489
1686 /* Buffer allocation failed. Stay in polling 1490 /* Buffer allocation failed. Stay in polling
1687 * mode so we can try to fill the ring again. 1491 * mode so we can try to fill the ring again.
@@ -1731,7 +1535,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1731 0 /* don't unmask intr */, 1535 0 /* don't unmask intr */,
1732 0 /* don't reset intr timer */); 1536 0 /* don't reset intr timer */);
1733 1537
1734 err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf); 1538 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1735 1539
1736 /* Buffer allocation failed. Stay in polling mode 1540 /* Buffer allocation failed. Stay in polling mode
1737 * so we can try to fill the ring again. 1541 * so we can try to fill the ring again.
@@ -1901,39 +1705,6 @@ static int enic_dev_notify_set(struct enic *enic)
1901 return err; 1705 return err;
1902} 1706}
1903 1707
1904static int enic_dev_notify_unset(struct enic *enic)
1905{
1906 int err;
1907
1908 spin_lock(&enic->devcmd_lock);
1909 err = vnic_dev_notify_unset(enic->vdev);
1910 spin_unlock(&enic->devcmd_lock);
1911
1912 return err;
1913}
1914
1915static int enic_dev_enable(struct enic *enic)
1916{
1917 int err;
1918
1919 spin_lock(&enic->devcmd_lock);
1920 err = vnic_dev_enable_wait(enic->vdev);
1921 spin_unlock(&enic->devcmd_lock);
1922
1923 return err;
1924}
1925
1926static int enic_dev_disable(struct enic *enic)
1927{
1928 int err;
1929
1930 spin_lock(&enic->devcmd_lock);
1931 err = vnic_dev_disable(enic->vdev);
1932 spin_unlock(&enic->devcmd_lock);
1933
1934 return err;
1935}
1936
1937static void enic_notify_timer_start(struct enic *enic) 1708static void enic_notify_timer_start(struct enic *enic)
1938{ 1709{
1939 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1710 switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1738,7 @@ static int enic_open(struct net_device *netdev)
1967 } 1738 }
1968 1739
1969 for (i = 0; i < enic->rq_count; i++) { 1740 for (i = 0; i < enic->rq_count; i++) {
1970 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1741 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1971 /* Need at least one buffer on ring to get going */ 1742 /* Need at least one buffer on ring to get going */
1972 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1743 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1973 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1744 netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2056,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
2285 rss_hash_bits, rss_base_cpu, rss_enable); 2056 rss_hash_bits, rss_base_cpu, rss_enable);
2286} 2057}
2287 2058
2288static int enic_dev_hang_notify(struct enic *enic)
2289{
2290 int err;
2291
2292 spin_lock(&enic->devcmd_lock);
2293 err = vnic_dev_hang_notify(enic->vdev);
2294 spin_unlock(&enic->devcmd_lock);
2295
2296 return err;
2297}
2298
2299static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
2300{
2301 int err;
2302
2303 spin_lock(&enic->devcmd_lock);
2304 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
2305 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
2306 spin_unlock(&enic->devcmd_lock);
2307
2308 return err;
2309}
2310
2311static void enic_reset(struct work_struct *work) 2059static void enic_reset(struct work_struct *work)
2312{ 2060{
2313 struct enic *enic = container_of(work, struct enic, reset); 2061 struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2068,7 @@ static void enic_reset(struct work_struct *work)
2320 enic_dev_hang_notify(enic); 2068 enic_dev_hang_notify(enic);
2321 enic_stop(enic->netdev); 2069 enic_stop(enic->netdev);
2322 enic_dev_hang_reset(enic); 2070 enic_dev_hang_reset(enic);
2323 enic_reset_multicast_list(enic); 2071 enic_reset_addr_lists(enic);
2324 enic_init_vnic_resources(enic); 2072 enic_init_vnic_resources(enic);
2325 enic_set_rss_nic_cfg(enic); 2073 enic_set_rss_nic_cfg(enic);
2326 enic_dev_set_ig_vlan_rewrite_mode(enic); 2074 enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2080,7 @@ static void enic_reset(struct work_struct *work)
2332static int enic_set_intr_mode(struct enic *enic) 2080static int enic_set_intr_mode(struct enic *enic)
2333{ 2081{
2334 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2082 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2335 unsigned int m = 1; 2083 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2336 unsigned int i; 2084 unsigned int i;
2337 2085
2338 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2086 /* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2223,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2475 .ndo_tx_timeout = enic_tx_timeout, 2223 .ndo_tx_timeout = enic_tx_timeout,
2476 .ndo_set_vf_port = enic_set_vf_port, 2224 .ndo_set_vf_port = enic_set_vf_port,
2477 .ndo_get_vf_port = enic_get_vf_port, 2225 .ndo_get_vf_port = enic_get_vf_port,
2478#ifdef IFLA_VF_MAX
2479 .ndo_set_vf_mac = enic_set_vf_mac, 2226 .ndo_set_vf_mac = enic_set_vf_mac,
2480#endif
2481#ifdef CONFIG_NET_POLL_CONTROLLER 2227#ifdef CONFIG_NET_POLL_CONTROLLER
2482 .ndo_poll_controller = enic_poll_controller, 2228 .ndo_poll_controller = enic_poll_controller,
2483#endif 2229#endif
@@ -2556,25 +2302,12 @@ static int enic_dev_init(struct enic *enic)
2556 2302
2557 enic_init_vnic_resources(enic); 2303 enic_init_vnic_resources(enic);
2558 2304
2559 err = enic_set_rq_alloc_buf(enic);
2560 if (err) {
2561 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
2562 goto err_out_free_vnic_resources;
2563 }
2564
2565 err = enic_set_rss_nic_cfg(enic); 2305 err = enic_set_rss_nic_cfg(enic);
2566 if (err) { 2306 if (err) {
2567 dev_err(dev, "Failed to config nic, aborting\n"); 2307 dev_err(dev, "Failed to config nic, aborting\n");
2568 goto err_out_free_vnic_resources; 2308 goto err_out_free_vnic_resources;
2569 } 2309 }
2570 2310
2571 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2572 if (err) {
2573 dev_err(dev,
2574 "Failed to set ingress vlan rewrite mode, aborting.\n");
2575 goto err_out_free_vnic_resources;
2576 }
2577
2578 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2311 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2579 default: 2312 default:
2580 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2313 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2446,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2713 goto err_out_vnic_unregister; 2446 goto err_out_vnic_unregister;
2714 } 2447 }
2715 2448
2449 /* Setup devcmd lock
2450 */
2451
2452 spin_lock_init(&enic->devcmd_lock);
2453
2454 /*
2455 * Set ingress vlan rewrite mode before vnic initialization
2456 */
2457
2458 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2459 if (err) {
2460 dev_err(dev,
2461 "Failed to set ingress vlan rewrite mode, aborting.\n");
2462 goto err_out_dev_close;
2463 }
2464
2716 /* Issue device init to initialize the vnic-to-switch link. 2465 /* Issue device init to initialize the vnic-to-switch link.
2717 * We'll start with carrier off and wait for link UP 2466 * We'll start with carrier off and wait for link UP
2718 * notification later to turn on carrier. We don't need 2467 * notification later to turn on carrier. We don't need
@@ -2736,11 +2485,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2736 } 2485 }
2737 } 2486 }
2738 2487
2739 /* Setup devcmd lock
2740 */
2741
2742 spin_lock_init(&enic->devcmd_lock);
2743
2744 err = enic_dev_init(enic); 2488 err = enic_dev_init(enic);
2745 if (err) { 2489 if (err) {
2746 dev_err(dev, "Device initialization failed, aborting\n"); 2490 dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b17668..c489e72107de 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -419,25 +419,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
419 return err; 419 return err;
420} 420}
421 421
422int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
423{
424 struct vnic_devcmd_fw_info *fw_info;
425 int err;
426
427 err = vnic_dev_fw_info(vdev, &fw_info);
428 if (err)
429 return err;
430
431 if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
432 *hw_ver = VNIC_DEV_HW_VER_A1;
433 else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
434 *hw_ver = VNIC_DEV_HW_VER_A2;
435 else
436 *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
437
438 return 0;
439}
440
441int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 422int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
442 void *value) 423 void *value)
443{ 424{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24cd459..e837546213a8 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
44#undef pr_fmt 44#undef pr_fmt
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 46
47enum vnic_dev_hw_version {
48 VNIC_DEV_HW_VER_UNKNOWN,
49 VNIC_DEV_HW_VER_A1,
50 VNIC_DEV_HW_VER_A2,
51};
52
53enum vnic_dev_intr_mode { 47enum vnic_dev_intr_mode {
54 VNIC_DEV_INTR_MODE_UNKNOWN, 48 VNIC_DEV_INTR_MODE_UNKNOWN,
55 VNIC_DEV_INTR_MODE_INTX, 49 VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
93 u64 *a0, u64 *a1, int wait); 87 u64 *a0, u64 *a1, int wait);
94int vnic_dev_fw_info(struct vnic_dev *vdev, 88int vnic_dev_fw_info(struct vnic_dev *vdev,
95 struct vnic_devcmd_fw_info **fw_info); 89 struct vnic_devcmd_fw_info **fw_info);
96int vnic_dev_hw_version(struct vnic_dev *vdev,
97 enum vnic_dev_hw_version *hw_ver);
98int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 90int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
99 void *value); 91 void *value);
100int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); 92int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de2454a..2056586f4d4b 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
141 } 141 }
142} 142}
143 143
144static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
145{
146 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
147}
148
149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 144static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
150{ 145{
151 rq->ring.desc_avail += count; 146 rq->ring.desc_avail += count;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373719ae..634c0daeecec 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
54 54
55#include "fec.h" 55#include "fec.h"
56 56
57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 57#if defined(CONFIG_ARM)
58#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
59#else 59#else
60#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
@@ -147,8 +147,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
147 * account when setting it. 147 * account when setting it.
148 */ 148 */
149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
151 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 151#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
153#else 152#else
154#define OPT_FRAME_SIZE 0 153#define OPT_FRAME_SIZE 0
@@ -183,7 +182,7 @@ struct fec_enet_private {
183 struct bufdesc *rx_bd_base; 182 struct bufdesc *rx_bd_base;
184 struct bufdesc *tx_bd_base; 183 struct bufdesc *tx_bd_base;
185 /* The next free ring entry */ 184 /* The next free ring entry */
186 struct bufdesc *cur_rx, *cur_tx; 185 struct bufdesc *cur_rx, *cur_tx;
187 /* The ring entries to be free()ed */ 186 /* The ring entries to be free()ed */
188 struct bufdesc *dirty_tx; 187 struct bufdesc *dirty_tx;
189 188
@@ -191,28 +190,21 @@ struct fec_enet_private {
191 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
192 spinlock_t hw_lock; 191 spinlock_t hw_lock;
193 192
194 struct platform_device *pdev; 193 struct platform_device *pdev;
195 194
196 int opened; 195 int opened;
197 196
198 /* Phylib and MDIO interface */ 197 /* Phylib and MDIO interface */
199 struct mii_bus *mii_bus; 198 struct mii_bus *mii_bus;
200 struct phy_device *phy_dev; 199 struct phy_device *phy_dev;
201 int mii_timeout; 200 int mii_timeout;
202 uint phy_speed; 201 uint phy_speed;
203 phy_interface_t phy_interface; 202 phy_interface_t phy_interface;
204 int link; 203 int link;
205 int full_duplex; 204 int full_duplex;
206 struct completion mdio_done; 205 struct completion mdio_done;
207}; 206};
208 207
209static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
210static void fec_enet_tx(struct net_device *dev);
211static void fec_enet_rx(struct net_device *dev);
212static int fec_enet_close(struct net_device *dev);
213static void fec_restart(struct net_device *dev, int duplex);
214static void fec_stop(struct net_device *dev);
215
216/* FEC MII MMFR bits definition */ 208/* FEC MII MMFR bits definition */
217#define FEC_MMFR_ST (1 << 30) 209#define FEC_MMFR_ST (1 << 30)
218#define FEC_MMFR_OP_READ (2 << 28) 210#define FEC_MMFR_OP_READ (2 << 28)
@@ -239,9 +231,9 @@ static void *swap_buffer(void *bufaddr, int len)
239} 231}
240 232
241static netdev_tx_t 233static netdev_tx_t
242fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 234fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
243{ 235{
244 struct fec_enet_private *fep = netdev_priv(dev); 236 struct fec_enet_private *fep = netdev_priv(ndev);
245 const struct platform_device_id *id_entry = 237 const struct platform_device_id *id_entry =
246 platform_get_device_id(fep->pdev); 238 platform_get_device_id(fep->pdev);
247 struct bufdesc *bdp; 239 struct bufdesc *bdp;
@@ -262,9 +254,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
262 254
263 if (status & BD_ENET_TX_READY) { 255 if (status & BD_ENET_TX_READY) {
264 /* Ooops. All transmit buffers are full. Bail out. 256 /* Ooops. All transmit buffers are full. Bail out.
265 * This should not happen, since dev->tbusy should be set. 257 * This should not happen, since ndev->tbusy should be set.
266 */ 258 */
267 printk("%s: tx queue full!.\n", dev->name); 259 printk("%s: tx queue full!.\n", ndev->name);
268 spin_unlock_irqrestore(&fep->hw_lock, flags); 260 spin_unlock_irqrestore(&fep->hw_lock, flags);
269 return NETDEV_TX_BUSY; 261 return NETDEV_TX_BUSY;
270 } 262 }
@@ -284,7 +276,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
284 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 276 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
285 unsigned int index; 277 unsigned int index;
286 index = bdp - fep->tx_bd_base; 278 index = bdp - fep->tx_bd_base;
287 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 279 memcpy(fep->tx_bounce[index], skb->data, skb->len);
288 bufaddr = fep->tx_bounce[index]; 280 bufaddr = fep->tx_bounce[index];
289 } 281 }
290 282
@@ -299,13 +291,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
299 /* Save skb pointer */ 291 /* Save skb pointer */
300 fep->tx_skbuff[fep->skb_cur] = skb; 292 fep->tx_skbuff[fep->skb_cur] = skb;
301 293
302 dev->stats.tx_bytes += skb->len; 294 ndev->stats.tx_bytes += skb->len;
303 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 295 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
304 296
305 /* Push the data cache so the CPM does not get stale memory 297 /* Push the data cache so the CPM does not get stale memory
306 * data. 298 * data.
307 */ 299 */
308 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, 300 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
309 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 301 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
310 302
311 /* Send it on its way. Tell FEC it's ready, interrupt when done, 303 /* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -326,7 +318,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
326 318
327 if (bdp == fep->dirty_tx) { 319 if (bdp == fep->dirty_tx) {
328 fep->tx_full = 1; 320 fep->tx_full = 1;
329 netif_stop_queue(dev); 321 netif_stop_queue(ndev);
330 } 322 }
331 323
332 fep->cur_tx = bdp; 324 fep->cur_tx = bdp;
@@ -336,62 +328,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
336 return NETDEV_TX_OK; 328 return NETDEV_TX_OK;
337} 329}
338 330
331/* This function is called to start or restart the FEC during a link
332 * change. This only happens when switching between half and full
333 * duplex.
334 */
339static void 335static void
340fec_timeout(struct net_device *dev) 336fec_restart(struct net_device *ndev, int duplex)
341{ 337{
342 struct fec_enet_private *fep = netdev_priv(dev); 338 struct fec_enet_private *fep = netdev_priv(ndev);
339 const struct platform_device_id *id_entry =
340 platform_get_device_id(fep->pdev);
341 int i;
342 u32 temp_mac[2];
343 u32 rcntl = OPT_FRAME_SIZE | 0x04;
343 344
344 dev->stats.tx_errors++; 345 /* Whack a reset. We should wait for this. */
346 writel(1, fep->hwp + FEC_ECNTRL);
347 udelay(10);
345 348
346 fec_restart(dev, fep->full_duplex); 349 /*
347 netif_wake_queue(dev); 350 * enet-mac reset will reset mac address registers too,
348} 351 * so need to reconfigure it.
352 */
353 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
354 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
355 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
356 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
357 }
349 358
350static irqreturn_t 359 /* Clear any outstanding interrupt. */
351fec_enet_interrupt(int irq, void * dev_id) 360 writel(0xffc00000, fep->hwp + FEC_IEVENT);
352{
353 struct net_device *dev = dev_id;
354 struct fec_enet_private *fep = netdev_priv(dev);
355 uint int_events;
356 irqreturn_t ret = IRQ_NONE;
357 361
358 do { 362 /* Reset all multicast. */
359 int_events = readl(fep->hwp + FEC_IEVENT); 363 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
360 writel(int_events, fep->hwp + FEC_IEVENT); 364 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
365#ifndef CONFIG_M5272
366 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
367 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
368#endif
361 369
362 if (int_events & FEC_ENET_RXF) { 370 /* Set maximum receive buffer size. */
363 ret = IRQ_HANDLED; 371 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
364 fec_enet_rx(dev);
365 }
366 372
367 /* Transmit OK, or non-fatal error. Update the buffer 373 /* Set receive and transmit descriptor base. */
368 * descriptors. FEC handles all errors, we just discover 374 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
369 * them as part of the transmit process. 375 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
370 */ 376 fep->hwp + FEC_X_DES_START);
371 if (int_events & FEC_ENET_TXF) { 377
372 ret = IRQ_HANDLED; 378 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
373 fec_enet_tx(dev); 379 fep->cur_rx = fep->rx_bd_base;
380
381 /* Reset SKB transmit buffers. */
382 fep->skb_cur = fep->skb_dirty = 0;
383 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
384 if (fep->tx_skbuff[i]) {
385 dev_kfree_skb_any(fep->tx_skbuff[i]);
386 fep->tx_skbuff[i] = NULL;
374 } 387 }
388 }
375 389
376 if (int_events & FEC_ENET_MII) { 390 /* Enable MII mode */
377 ret = IRQ_HANDLED; 391 if (duplex) {
378 complete(&fep->mdio_done); 392 /* FD enable */
393 writel(0x04, fep->hwp + FEC_X_CNTRL);
394 } else {
395 /* No Rcv on Xmit */
396 rcntl |= 0x02;
397 writel(0x0, fep->hwp + FEC_X_CNTRL);
398 }
399
400 fep->full_duplex = duplex;
401
402 /* Set MII speed */
403 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
404
405 /*
406 * The phy interface and speed need to get configured
407 * differently on enet-mac.
408 */
409 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
410 /* Enable flow control and length check */
411 rcntl |= 0x40000000 | 0x00000020;
412
413 /* MII or RMII */
414 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
415 rcntl |= (1 << 8);
416 else
417 rcntl &= ~(1 << 8);
418
419 /* 10M or 100M */
420 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
421 rcntl &= ~(1 << 9);
422 else
423 rcntl |= (1 << 9);
424
425 } else {
426#ifdef FEC_MIIGSK_ENR
427 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
428 /* disable the gasket and wait */
429 writel(0, fep->hwp + FEC_MIIGSK_ENR);
430 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
431 udelay(1);
432
433 /*
434 * configure the gasket:
435 * RMII, 50 MHz, no loopback, no echo
436 */
437 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
438
439 /* re-enable the gasket */
440 writel(2, fep->hwp + FEC_MIIGSK_ENR);
379 } 441 }
380 } while (int_events); 442#endif
443 }
444 writel(rcntl, fep->hwp + FEC_R_CNTRL);
381 445
382 return ret; 446 /* And last, enable the transmit and receive processing */
447 writel(2, fep->hwp + FEC_ECNTRL);
448 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
449
450 /* Enable interrupts we wish to service */
451 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
452}
453
454static void
455fec_stop(struct net_device *ndev)
456{
457 struct fec_enet_private *fep = netdev_priv(ndev);
458
459 /* We cannot expect a graceful transmit stop without link !!! */
460 if (fep->link) {
461 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
462 udelay(10);
463 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
464 printk("fec_stop : Graceful transmit stop did not complete !\n");
465 }
466
467 /* Whack a reset. We should wait for this. */
468 writel(1, fep->hwp + FEC_ECNTRL);
469 udelay(10);
470 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
471 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
383} 472}
384 473
385 474
386static void 475static void
387fec_enet_tx(struct net_device *dev) 476fec_timeout(struct net_device *ndev)
477{
478 struct fec_enet_private *fep = netdev_priv(ndev);
479
480 ndev->stats.tx_errors++;
481
482 fec_restart(ndev, fep->full_duplex);
483 netif_wake_queue(ndev);
484}
485
486static void
487fec_enet_tx(struct net_device *ndev)
388{ 488{
389 struct fec_enet_private *fep; 489 struct fec_enet_private *fep;
390 struct bufdesc *bdp; 490 struct bufdesc *bdp;
391 unsigned short status; 491 unsigned short status;
392 struct sk_buff *skb; 492 struct sk_buff *skb;
393 493
394 fep = netdev_priv(dev); 494 fep = netdev_priv(ndev);
395 spin_lock(&fep->hw_lock); 495 spin_lock(&fep->hw_lock);
396 bdp = fep->dirty_tx; 496 bdp = fep->dirty_tx;
397 497
@@ -399,7 +499,8 @@ fec_enet_tx(struct net_device *dev)
399 if (bdp == fep->cur_tx && fep->tx_full == 0) 499 if (bdp == fep->cur_tx && fep->tx_full == 0)
400 break; 500 break;
401 501
402 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 502 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
503 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
403 bdp->cbd_bufaddr = 0; 504 bdp->cbd_bufaddr = 0;
404 505
405 skb = fep->tx_skbuff[fep->skb_dirty]; 506 skb = fep->tx_skbuff[fep->skb_dirty];
@@ -407,19 +508,19 @@ fec_enet_tx(struct net_device *dev)
407 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 508 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
408 BD_ENET_TX_RL | BD_ENET_TX_UN | 509 BD_ENET_TX_RL | BD_ENET_TX_UN |
409 BD_ENET_TX_CSL)) { 510 BD_ENET_TX_CSL)) {
410 dev->stats.tx_errors++; 511 ndev->stats.tx_errors++;
411 if (status & BD_ENET_TX_HB) /* No heartbeat */ 512 if (status & BD_ENET_TX_HB) /* No heartbeat */
412 dev->stats.tx_heartbeat_errors++; 513 ndev->stats.tx_heartbeat_errors++;
413 if (status & BD_ENET_TX_LC) /* Late collision */ 514 if (status & BD_ENET_TX_LC) /* Late collision */
414 dev->stats.tx_window_errors++; 515 ndev->stats.tx_window_errors++;
415 if (status & BD_ENET_TX_RL) /* Retrans limit */ 516 if (status & BD_ENET_TX_RL) /* Retrans limit */
416 dev->stats.tx_aborted_errors++; 517 ndev->stats.tx_aborted_errors++;
417 if (status & BD_ENET_TX_UN) /* Underrun */ 518 if (status & BD_ENET_TX_UN) /* Underrun */
418 dev->stats.tx_fifo_errors++; 519 ndev->stats.tx_fifo_errors++;
419 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 520 if (status & BD_ENET_TX_CSL) /* Carrier lost */
420 dev->stats.tx_carrier_errors++; 521 ndev->stats.tx_carrier_errors++;
421 } else { 522 } else {
422 dev->stats.tx_packets++; 523 ndev->stats.tx_packets++;
423 } 524 }
424 525
425 if (status & BD_ENET_TX_READY) 526 if (status & BD_ENET_TX_READY)
@@ -429,7 +530,7 @@ fec_enet_tx(struct net_device *dev)
429 * but we eventually sent the packet OK. 530 * but we eventually sent the packet OK.
430 */ 531 */
431 if (status & BD_ENET_TX_DEF) 532 if (status & BD_ENET_TX_DEF)
432 dev->stats.collisions++; 533 ndev->stats.collisions++;
433 534
434 /* Free the sk buffer associated with this last transmit */ 535 /* Free the sk buffer associated with this last transmit */
435 dev_kfree_skb_any(skb); 536 dev_kfree_skb_any(skb);
@@ -446,8 +547,8 @@ fec_enet_tx(struct net_device *dev)
446 */ 547 */
447 if (fep->tx_full) { 548 if (fep->tx_full) {
448 fep->tx_full = 0; 549 fep->tx_full = 0;
449 if (netif_queue_stopped(dev)) 550 if (netif_queue_stopped(ndev))
450 netif_wake_queue(dev); 551 netif_wake_queue(ndev);
451 } 552 }
452 } 553 }
453 fep->dirty_tx = bdp; 554 fep->dirty_tx = bdp;
@@ -461,9 +562,9 @@ fec_enet_tx(struct net_device *dev)
461 * effectively tossing the packet. 562 * effectively tossing the packet.
462 */ 563 */
463static void 564static void
464fec_enet_rx(struct net_device *dev) 565fec_enet_rx(struct net_device *ndev)
465{ 566{
466 struct fec_enet_private *fep = netdev_priv(dev); 567 struct fec_enet_private *fep = netdev_priv(ndev);
467 const struct platform_device_id *id_entry = 568 const struct platform_device_id *id_entry =
468 platform_get_device_id(fep->pdev); 569 platform_get_device_id(fep->pdev);
469 struct bufdesc *bdp; 570 struct bufdesc *bdp;
@@ -497,17 +598,17 @@ fec_enet_rx(struct net_device *dev)
497 /* Check for errors. */ 598 /* Check for errors. */
498 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 599 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
499 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 600 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
500 dev->stats.rx_errors++; 601 ndev->stats.rx_errors++;
501 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 602 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
502 /* Frame too long or too short. */ 603 /* Frame too long or too short. */
503 dev->stats.rx_length_errors++; 604 ndev->stats.rx_length_errors++;
504 } 605 }
505 if (status & BD_ENET_RX_NO) /* Frame alignment */ 606 if (status & BD_ENET_RX_NO) /* Frame alignment */
506 dev->stats.rx_frame_errors++; 607 ndev->stats.rx_frame_errors++;
507 if (status & BD_ENET_RX_CR) /* CRC Error */ 608 if (status & BD_ENET_RX_CR) /* CRC Error */
508 dev->stats.rx_crc_errors++; 609 ndev->stats.rx_crc_errors++;
509 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 610 if (status & BD_ENET_RX_OV) /* FIFO overrun */
510 dev->stats.rx_fifo_errors++; 611 ndev->stats.rx_fifo_errors++;
511 } 612 }
512 613
513 /* Report late collisions as a frame error. 614 /* Report late collisions as a frame error.
@@ -515,19 +616,19 @@ fec_enet_rx(struct net_device *dev)
515 * have in the buffer. So, just drop this frame on the floor. 616 * have in the buffer. So, just drop this frame on the floor.
516 */ 617 */
517 if (status & BD_ENET_RX_CL) { 618 if (status & BD_ENET_RX_CL) {
518 dev->stats.rx_errors++; 619 ndev->stats.rx_errors++;
519 dev->stats.rx_frame_errors++; 620 ndev->stats.rx_frame_errors++;
520 goto rx_processing_done; 621 goto rx_processing_done;
521 } 622 }
522 623
523 /* Process the incoming frame. */ 624 /* Process the incoming frame. */
524 dev->stats.rx_packets++; 625 ndev->stats.rx_packets++;
525 pkt_len = bdp->cbd_datlen; 626 pkt_len = bdp->cbd_datlen;
526 dev->stats.rx_bytes += pkt_len; 627 ndev->stats.rx_bytes += pkt_len;
527 data = (__u8*)__va(bdp->cbd_bufaddr); 628 data = (__u8*)__va(bdp->cbd_bufaddr);
528 629
529 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 630 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
530 DMA_FROM_DEVICE); 631 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
531 632
532 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 633 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
533 swap_buffer(data, pkt_len); 634 swap_buffer(data, pkt_len);
@@ -541,18 +642,18 @@ fec_enet_rx(struct net_device *dev)
541 642
542 if (unlikely(!skb)) { 643 if (unlikely(!skb)) {
543 printk("%s: Memory squeeze, dropping packet.\n", 644 printk("%s: Memory squeeze, dropping packet.\n",
544 dev->name); 645 ndev->name);
545 dev->stats.rx_dropped++; 646 ndev->stats.rx_dropped++;
546 } else { 647 } else {
547 skb_reserve(skb, NET_IP_ALIGN); 648 skb_reserve(skb, NET_IP_ALIGN);
548 skb_put(skb, pkt_len - 4); /* Make room */ 649 skb_put(skb, pkt_len - 4); /* Make room */
549 skb_copy_to_linear_data(skb, data, pkt_len - 4); 650 skb_copy_to_linear_data(skb, data, pkt_len - 4);
550 skb->protocol = eth_type_trans(skb, dev); 651 skb->protocol = eth_type_trans(skb, ndev);
551 netif_rx(skb); 652 netif_rx(skb);
552 } 653 }
553 654
554 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 655 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
555 DMA_FROM_DEVICE); 656 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
556rx_processing_done: 657rx_processing_done:
557 /* Clear the status flags for this buffer */ 658 /* Clear the status flags for this buffer */
558 status &= ~BD_ENET_RX_STATS; 659 status &= ~BD_ENET_RX_STATS;
@@ -577,10 +678,47 @@ rx_processing_done:
577 spin_unlock(&fep->hw_lock); 678 spin_unlock(&fep->hw_lock);
578} 679}
579 680
681static irqreturn_t
682fec_enet_interrupt(int irq, void *dev_id)
683{
684 struct net_device *ndev = dev_id;
685 struct fec_enet_private *fep = netdev_priv(ndev);
686 uint int_events;
687 irqreturn_t ret = IRQ_NONE;
688
689 do {
690 int_events = readl(fep->hwp + FEC_IEVENT);
691 writel(int_events, fep->hwp + FEC_IEVENT);
692
693 if (int_events & FEC_ENET_RXF) {
694 ret = IRQ_HANDLED;
695 fec_enet_rx(ndev);
696 }
697
698 /* Transmit OK, or non-fatal error. Update the buffer
699 * descriptors. FEC handles all errors, we just discover
700 * them as part of the transmit process.
701 */
702 if (int_events & FEC_ENET_TXF) {
703 ret = IRQ_HANDLED;
704 fec_enet_tx(ndev);
705 }
706
707 if (int_events & FEC_ENET_MII) {
708 ret = IRQ_HANDLED;
709 complete(&fep->mdio_done);
710 }
711 } while (int_events);
712
713 return ret;
714}
715
716
717
580/* ------------------------------------------------------------------------- */ 718/* ------------------------------------------------------------------------- */
581static void __inline__ fec_get_mac(struct net_device *dev) 719static void __inline__ fec_get_mac(struct net_device *ndev)
582{ 720{
583 struct fec_enet_private *fep = netdev_priv(dev); 721 struct fec_enet_private *fep = netdev_priv(ndev);
584 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 722 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
585 unsigned char *iap, tmpaddr[ETH_ALEN]; 723 unsigned char *iap, tmpaddr[ETH_ALEN];
586 724
@@ -616,11 +754,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
616 iap = &tmpaddr[0]; 754 iap = &tmpaddr[0];
617 } 755 }
618 756
619 memcpy(dev->dev_addr, iap, ETH_ALEN); 757 memcpy(ndev->dev_addr, iap, ETH_ALEN);
620 758
621 /* Adjust MAC if using macaddr */ 759 /* Adjust MAC if using macaddr */
622 if (iap == macaddr) 760 if (iap == macaddr)
623 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 761 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
624} 762}
625 763
626/* ------------------------------------------------------------------------- */ 764/* ------------------------------------------------------------------------- */
@@ -628,9 +766,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
628/* 766/*
629 * Phy section 767 * Phy section
630 */ 768 */
631static void fec_enet_adjust_link(struct net_device *dev) 769static void fec_enet_adjust_link(struct net_device *ndev)
632{ 770{
633 struct fec_enet_private *fep = netdev_priv(dev); 771 struct fec_enet_private *fep = netdev_priv(ndev);
634 struct phy_device *phy_dev = fep->phy_dev; 772 struct phy_device *phy_dev = fep->phy_dev;
635 unsigned long flags; 773 unsigned long flags;
636 774
@@ -647,7 +785,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
647 /* Duplex link change */ 785 /* Duplex link change */
648 if (phy_dev->link) { 786 if (phy_dev->link) {
649 if (fep->full_duplex != phy_dev->duplex) { 787 if (fep->full_duplex != phy_dev->duplex) {
650 fec_restart(dev, phy_dev->duplex); 788 fec_restart(ndev, phy_dev->duplex);
651 status_change = 1; 789 status_change = 1;
652 } 790 }
653 } 791 }
@@ -656,9 +794,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
656 if (phy_dev->link != fep->link) { 794 if (phy_dev->link != fep->link) {
657 fep->link = phy_dev->link; 795 fep->link = phy_dev->link;
658 if (phy_dev->link) 796 if (phy_dev->link)
659 fec_restart(dev, phy_dev->duplex); 797 fec_restart(ndev, phy_dev->duplex);
660 else 798 else
661 fec_stop(dev); 799 fec_stop(ndev);
662 status_change = 1; 800 status_change = 1;
663 } 801 }
664 802
@@ -727,9 +865,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
727 return 0; 865 return 0;
728} 866}
729 867
730static int fec_enet_mii_probe(struct net_device *dev) 868static int fec_enet_mii_probe(struct net_device *ndev)
731{ 869{
732 struct fec_enet_private *fep = netdev_priv(dev); 870 struct fec_enet_private *fep = netdev_priv(ndev);
733 struct phy_device *phy_dev = NULL; 871 struct phy_device *phy_dev = NULL;
734 char mdio_bus_id[MII_BUS_ID_SIZE]; 872 char mdio_bus_id[MII_BUS_ID_SIZE];
735 char phy_name[MII_BUS_ID_SIZE + 3]; 873 char phy_name[MII_BUS_ID_SIZE + 3];
@@ -754,16 +892,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
754 892
755 if (phy_id >= PHY_MAX_ADDR) { 893 if (phy_id >= PHY_MAX_ADDR) {
756 printk(KERN_INFO "%s: no PHY, assuming direct connection " 894 printk(KERN_INFO "%s: no PHY, assuming direct connection "
757 "to switch\n", dev->name); 895 "to switch\n", ndev->name);
758 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); 896 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
759 phy_id = 0; 897 phy_id = 0;
760 } 898 }
761 899
762 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 900 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
763 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, 901 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
764 PHY_INTERFACE_MODE_MII); 902 PHY_INTERFACE_MODE_MII);
765 if (IS_ERR(phy_dev)) { 903 if (IS_ERR(phy_dev)) {
766 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); 904 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
767 return PTR_ERR(phy_dev); 905 return PTR_ERR(phy_dev);
768 } 906 }
769 907
@@ -776,7 +914,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
776 fep->full_duplex = 0; 914 fep->full_duplex = 0;
777 915
778 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " 916 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
779 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 917 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
780 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 918 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
781 fep->phy_dev->irq); 919 fep->phy_dev->irq);
782 920
@@ -786,8 +924,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
786static int fec_enet_mii_init(struct platform_device *pdev) 924static int fec_enet_mii_init(struct platform_device *pdev)
787{ 925{
788 static struct mii_bus *fec0_mii_bus; 926 static struct mii_bus *fec0_mii_bus;
789 struct net_device *dev = platform_get_drvdata(pdev); 927 struct net_device *ndev = platform_get_drvdata(pdev);
790 struct fec_enet_private *fep = netdev_priv(dev); 928 struct fec_enet_private *fep = netdev_priv(ndev);
791 const struct platform_device_id *id_entry = 929 const struct platform_device_id *id_entry =
792 platform_get_device_id(fep->pdev); 930 platform_get_device_id(fep->pdev);
793 int err = -ENXIO, i; 931 int err = -ENXIO, i;
@@ -845,8 +983,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
845 for (i = 0; i < PHY_MAX_ADDR; i++) 983 for (i = 0; i < PHY_MAX_ADDR; i++)
846 fep->mii_bus->irq[i] = PHY_POLL; 984 fep->mii_bus->irq[i] = PHY_POLL;
847 985
848 platform_set_drvdata(dev, fep->mii_bus);
849
850 if (mdiobus_register(fep->mii_bus)) 986 if (mdiobus_register(fep->mii_bus))
851 goto err_out_free_mdio_irq; 987 goto err_out_free_mdio_irq;
852 988
@@ -873,10 +1009,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
873 mdiobus_free(fep->mii_bus); 1009 mdiobus_free(fep->mii_bus);
874} 1010}
875 1011
876static int fec_enet_get_settings(struct net_device *dev, 1012static int fec_enet_get_settings(struct net_device *ndev,
877 struct ethtool_cmd *cmd) 1013 struct ethtool_cmd *cmd)
878{ 1014{
879 struct fec_enet_private *fep = netdev_priv(dev); 1015 struct fec_enet_private *fep = netdev_priv(ndev);
880 struct phy_device *phydev = fep->phy_dev; 1016 struct phy_device *phydev = fep->phy_dev;
881 1017
882 if (!phydev) 1018 if (!phydev)
@@ -885,10 +1021,10 @@ static int fec_enet_get_settings(struct net_device *dev,
885 return phy_ethtool_gset(phydev, cmd); 1021 return phy_ethtool_gset(phydev, cmd);
886} 1022}
887 1023
888static int fec_enet_set_settings(struct net_device *dev, 1024static int fec_enet_set_settings(struct net_device *ndev,
889 struct ethtool_cmd *cmd) 1025 struct ethtool_cmd *cmd)
890{ 1026{
891 struct fec_enet_private *fep = netdev_priv(dev); 1027 struct fec_enet_private *fep = netdev_priv(ndev);
892 struct phy_device *phydev = fep->phy_dev; 1028 struct phy_device *phydev = fep->phy_dev;
893 1029
894 if (!phydev) 1030 if (!phydev)
@@ -897,14 +1033,14 @@ static int fec_enet_set_settings(struct net_device *dev,
897 return phy_ethtool_sset(phydev, cmd); 1033 return phy_ethtool_sset(phydev, cmd);
898} 1034}
899 1035
900static void fec_enet_get_drvinfo(struct net_device *dev, 1036static void fec_enet_get_drvinfo(struct net_device *ndev,
901 struct ethtool_drvinfo *info) 1037 struct ethtool_drvinfo *info)
902{ 1038{
903 struct fec_enet_private *fep = netdev_priv(dev); 1039 struct fec_enet_private *fep = netdev_priv(ndev);
904 1040
905 strcpy(info->driver, fep->pdev->dev.driver->name); 1041 strcpy(info->driver, fep->pdev->dev.driver->name);
906 strcpy(info->version, "Revision: 1.0"); 1042 strcpy(info->version, "Revision: 1.0");
907 strcpy(info->bus_info, dev_name(&dev->dev)); 1043 strcpy(info->bus_info, dev_name(&ndev->dev));
908} 1044}
909 1045
910static struct ethtool_ops fec_enet_ethtool_ops = { 1046static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -914,12 +1050,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
914 .get_link = ethtool_op_get_link, 1050 .get_link = ethtool_op_get_link,
915}; 1051};
916 1052
917static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1053static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
918{ 1054{
919 struct fec_enet_private *fep = netdev_priv(dev); 1055 struct fec_enet_private *fep = netdev_priv(ndev);
920 struct phy_device *phydev = fep->phy_dev; 1056 struct phy_device *phydev = fep->phy_dev;
921 1057
922 if (!netif_running(dev)) 1058 if (!netif_running(ndev))
923 return -EINVAL; 1059 return -EINVAL;
924 1060
925 if (!phydev) 1061 if (!phydev)
@@ -928,9 +1064,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
928 return phy_mii_ioctl(phydev, rq, cmd); 1064 return phy_mii_ioctl(phydev, rq, cmd);
929} 1065}
930 1066
931static void fec_enet_free_buffers(struct net_device *dev) 1067static void fec_enet_free_buffers(struct net_device *ndev)
932{ 1068{
933 struct fec_enet_private *fep = netdev_priv(dev); 1069 struct fec_enet_private *fep = netdev_priv(ndev);
934 int i; 1070 int i;
935 struct sk_buff *skb; 1071 struct sk_buff *skb;
936 struct bufdesc *bdp; 1072 struct bufdesc *bdp;
@@ -940,7 +1076,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
940 skb = fep->rx_skbuff[i]; 1076 skb = fep->rx_skbuff[i];
941 1077
942 if (bdp->cbd_bufaddr) 1078 if (bdp->cbd_bufaddr)
943 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 1079 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
944 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1080 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
945 if (skb) 1081 if (skb)
946 dev_kfree_skb(skb); 1082 dev_kfree_skb(skb);
@@ -952,9 +1088,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
952 kfree(fep->tx_bounce[i]); 1088 kfree(fep->tx_bounce[i]);
953} 1089}
954 1090
955static int fec_enet_alloc_buffers(struct net_device *dev) 1091static int fec_enet_alloc_buffers(struct net_device *ndev)
956{ 1092{
957 struct fec_enet_private *fep = netdev_priv(dev); 1093 struct fec_enet_private *fep = netdev_priv(ndev);
958 int i; 1094 int i;
959 struct sk_buff *skb; 1095 struct sk_buff *skb;
960 struct bufdesc *bdp; 1096 struct bufdesc *bdp;
@@ -963,12 +1099,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
963 for (i = 0; i < RX_RING_SIZE; i++) { 1099 for (i = 0; i < RX_RING_SIZE; i++) {
964 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1100 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
965 if (!skb) { 1101 if (!skb) {
966 fec_enet_free_buffers(dev); 1102 fec_enet_free_buffers(ndev);
967 return -ENOMEM; 1103 return -ENOMEM;
968 } 1104 }
969 fep->rx_skbuff[i] = skb; 1105 fep->rx_skbuff[i] = skb;
970 1106
971 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 1107 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
972 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1108 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
973 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1109 bdp->cbd_sc = BD_ENET_RX_EMPTY;
974 bdp++; 1110 bdp++;
@@ -995,45 +1131,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
995} 1131}
996 1132
997static int 1133static int
998fec_enet_open(struct net_device *dev) 1134fec_enet_open(struct net_device *ndev)
999{ 1135{
1000 struct fec_enet_private *fep = netdev_priv(dev); 1136 struct fec_enet_private *fep = netdev_priv(ndev);
1001 int ret; 1137 int ret;
1002 1138
1003 /* I should reset the ring buffers here, but I don't yet know 1139 /* I should reset the ring buffers here, but I don't yet know
1004 * a simple way to do that. 1140 * a simple way to do that.
1005 */ 1141 */
1006 1142
1007 ret = fec_enet_alloc_buffers(dev); 1143 ret = fec_enet_alloc_buffers(ndev);
1008 if (ret) 1144 if (ret)
1009 return ret; 1145 return ret;
1010 1146
1011 /* Probe and connect to PHY when open the interface */ 1147 /* Probe and connect to PHY when open the interface */
1012 ret = fec_enet_mii_probe(dev); 1148 ret = fec_enet_mii_probe(ndev);
1013 if (ret) { 1149 if (ret) {
1014 fec_enet_free_buffers(dev); 1150 fec_enet_free_buffers(ndev);
1015 return ret; 1151 return ret;
1016 } 1152 }
1017 phy_start(fep->phy_dev); 1153 phy_start(fep->phy_dev);
1018 netif_start_queue(dev); 1154 netif_start_queue(ndev);
1019 fep->opened = 1; 1155 fep->opened = 1;
1020 return 0; 1156 return 0;
1021} 1157}
1022 1158
1023static int 1159static int
1024fec_enet_close(struct net_device *dev) 1160fec_enet_close(struct net_device *ndev)
1025{ 1161{
1026 struct fec_enet_private *fep = netdev_priv(dev); 1162 struct fec_enet_private *fep = netdev_priv(ndev);
1027 1163
1028 /* Don't know what to do yet. */ 1164 /* Don't know what to do yet. */
1029 fep->opened = 0; 1165 fep->opened = 0;
1030 netif_stop_queue(dev); 1166 netif_stop_queue(ndev);
1031 fec_stop(dev); 1167 fec_stop(ndev);
1032 1168
1033 if (fep->phy_dev) 1169 if (fep->phy_dev) {
1170 phy_stop(fep->phy_dev);
1034 phy_disconnect(fep->phy_dev); 1171 phy_disconnect(fep->phy_dev);
1172 }
1035 1173
1036 fec_enet_free_buffers(dev); 1174 fec_enet_free_buffers(ndev);
1037 1175
1038 return 0; 1176 return 0;
1039} 1177}
@@ -1051,14 +1189,14 @@ fec_enet_close(struct net_device *dev)
1051#define HASH_BITS 6 /* #bits in hash */ 1189#define HASH_BITS 6 /* #bits in hash */
1052#define CRC32_POLY 0xEDB88320 1190#define CRC32_POLY 0xEDB88320
1053 1191
1054static void set_multicast_list(struct net_device *dev) 1192static void set_multicast_list(struct net_device *ndev)
1055{ 1193{
1056 struct fec_enet_private *fep = netdev_priv(dev); 1194 struct fec_enet_private *fep = netdev_priv(ndev);
1057 struct netdev_hw_addr *ha; 1195 struct netdev_hw_addr *ha;
1058 unsigned int i, bit, data, crc, tmp; 1196 unsigned int i, bit, data, crc, tmp;
1059 unsigned char hash; 1197 unsigned char hash;
1060 1198
1061 if (dev->flags & IFF_PROMISC) { 1199 if (ndev->flags & IFF_PROMISC) {
1062 tmp = readl(fep->hwp + FEC_R_CNTRL); 1200 tmp = readl(fep->hwp + FEC_R_CNTRL);
1063 tmp |= 0x8; 1201 tmp |= 0x8;
1064 writel(tmp, fep->hwp + FEC_R_CNTRL); 1202 writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1069,7 +1207,7 @@ static void set_multicast_list(struct net_device *dev)
1069 tmp &= ~0x8; 1207 tmp &= ~0x8;
1070 writel(tmp, fep->hwp + FEC_R_CNTRL); 1208 writel(tmp, fep->hwp + FEC_R_CNTRL);
1071 1209
1072 if (dev->flags & IFF_ALLMULTI) { 1210 if (ndev->flags & IFF_ALLMULTI) {
1073 /* Catch all multicast addresses, so set the 1211 /* Catch all multicast addresses, so set the
1074 * filter to all 1's 1212 * filter to all 1's
1075 */ 1213 */
@@ -1084,7 +1222,7 @@ static void set_multicast_list(struct net_device *dev)
1084 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1222 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1085 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1223 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1086 1224
1087 netdev_for_each_mc_addr(ha, dev) { 1225 netdev_for_each_mc_addr(ha, ndev) {
1088 /* Only support group multicast for now */ 1226 /* Only support group multicast for now */
1089 if (!(ha->addr[0] & 1)) 1227 if (!(ha->addr[0] & 1))
1090 continue; 1228 continue;
@@ -1092,7 +1230,7 @@ static void set_multicast_list(struct net_device *dev)
1092 /* calculate crc32 value of mac address */ 1230 /* calculate crc32 value of mac address */
1093 crc = 0xffffffff; 1231 crc = 0xffffffff;
1094 1232
1095 for (i = 0; i < dev->addr_len; i++) { 1233 for (i = 0; i < ndev->addr_len; i++) {
1096 data = ha->addr[i]; 1234 data = ha->addr[i];
1097 for (bit = 0; bit < 8; bit++, data >>= 1) { 1235 for (bit = 0; bit < 8; bit++, data >>= 1) {
1098 crc = (crc >> 1) ^ 1236 crc = (crc >> 1) ^
@@ -1119,20 +1257,20 @@ static void set_multicast_list(struct net_device *dev)
1119 1257
1120/* Set a MAC change in hardware. */ 1258/* Set a MAC change in hardware. */
1121static int 1259static int
1122fec_set_mac_address(struct net_device *dev, void *p) 1260fec_set_mac_address(struct net_device *ndev, void *p)
1123{ 1261{
1124 struct fec_enet_private *fep = netdev_priv(dev); 1262 struct fec_enet_private *fep = netdev_priv(ndev);
1125 struct sockaddr *addr = p; 1263 struct sockaddr *addr = p;
1126 1264
1127 if (!is_valid_ether_addr(addr->sa_data)) 1265 if (!is_valid_ether_addr(addr->sa_data))
1128 return -EADDRNOTAVAIL; 1266 return -EADDRNOTAVAIL;
1129 1267
1130 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1268 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1131 1269
1132 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1270 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1133 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1271 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1134 fep->hwp + FEC_ADDR_LOW); 1272 fep->hwp + FEC_ADDR_LOW);
1135 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1273 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1136 fep->hwp + FEC_ADDR_HIGH); 1274 fep->hwp + FEC_ADDR_HIGH);
1137 return 0; 1275 return 0;
1138} 1276}
@@ -1146,16 +1284,16 @@ static const struct net_device_ops fec_netdev_ops = {
1146 .ndo_validate_addr = eth_validate_addr, 1284 .ndo_validate_addr = eth_validate_addr,
1147 .ndo_tx_timeout = fec_timeout, 1285 .ndo_tx_timeout = fec_timeout,
1148 .ndo_set_mac_address = fec_set_mac_address, 1286 .ndo_set_mac_address = fec_set_mac_address,
1149 .ndo_do_ioctl = fec_enet_ioctl, 1287 .ndo_do_ioctl = fec_enet_ioctl,
1150}; 1288};
1151 1289
1152 /* 1290 /*
1153 * XXX: We need to clean up on failure exits here. 1291 * XXX: We need to clean up on failure exits here.
1154 * 1292 *
1155 */ 1293 */
1156static int fec_enet_init(struct net_device *dev) 1294static int fec_enet_init(struct net_device *ndev)
1157{ 1295{
1158 struct fec_enet_private *fep = netdev_priv(dev); 1296 struct fec_enet_private *fep = netdev_priv(ndev);
1159 struct bufdesc *cbd_base; 1297 struct bufdesc *cbd_base;
1160 struct bufdesc *bdp; 1298 struct bufdesc *bdp;
1161 int i; 1299 int i;
@@ -1170,20 +1308,19 @@ static int fec_enet_init(struct net_device *dev)
1170 1308
1171 spin_lock_init(&fep->hw_lock); 1309 spin_lock_init(&fep->hw_lock);
1172 1310
1173 fep->hwp = (void __iomem *)dev->base_addr; 1311 fep->netdev = ndev;
1174 fep->netdev = dev;
1175 1312
1176 /* Get the Ethernet address */ 1313 /* Get the Ethernet address */
1177 fec_get_mac(dev); 1314 fec_get_mac(ndev);
1178 1315
1179 /* Set receive and transmit descriptor base. */ 1316 /* Set receive and transmit descriptor base. */
1180 fep->rx_bd_base = cbd_base; 1317 fep->rx_bd_base = cbd_base;
1181 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1318 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1182 1319
1183 /* The FEC Ethernet specific entries in the device structure */ 1320 /* The FEC Ethernet specific entries in the device structure */
1184 dev->watchdog_timeo = TX_TIMEOUT; 1321 ndev->watchdog_timeo = TX_TIMEOUT;
1185 dev->netdev_ops = &fec_netdev_ops; 1322 ndev->netdev_ops = &fec_netdev_ops;
1186 dev->ethtool_ops = &fec_enet_ethtool_ops; 1323 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1187 1324
1188 /* Initialize the receive buffer descriptors. */ 1325 /* Initialize the receive buffer descriptors. */
1189 bdp = fep->rx_bd_base; 1326 bdp = fep->rx_bd_base;
@@ -1212,152 +1349,11 @@ static int fec_enet_init(struct net_device *dev)
1212 bdp--; 1349 bdp--;
1213 bdp->cbd_sc |= BD_SC_WRAP; 1350 bdp->cbd_sc |= BD_SC_WRAP;
1214 1351
1215 fec_restart(dev, 0); 1352 fec_restart(ndev, 0);
1216 1353
1217 return 0; 1354 return 0;
1218} 1355}
1219 1356
1220/* This function is called to start or restart the FEC during a link
1221 * change. This only happens when switching between half and full
1222 * duplex.
1223 */
1224static void
1225fec_restart(struct net_device *dev, int duplex)
1226{
1227 struct fec_enet_private *fep = netdev_priv(dev);
1228 const struct platform_device_id *id_entry =
1229 platform_get_device_id(fep->pdev);
1230 int i;
1231 u32 val, temp_mac[2];
1232
1233 /* Whack a reset. We should wait for this. */
1234 writel(1, fep->hwp + FEC_ECNTRL);
1235 udelay(10);
1236
1237 /*
1238 * enet-mac reset will reset mac address registers too,
1239 * so need to reconfigure it.
1240 */
1241 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1242 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1243 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1244 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1245 }
1246
1247 /* Clear any outstanding interrupt. */
1248 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1249
1250 /* Reset all multicast. */
1251 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1252 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1253#ifndef CONFIG_M5272
1254 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1255 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1256#endif
1257
1258 /* Set maximum receive buffer size. */
1259 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1260
1261 /* Set receive and transmit descriptor base. */
1262 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1263 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1264 fep->hwp + FEC_X_DES_START);
1265
1266 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1267 fep->cur_rx = fep->rx_bd_base;
1268
1269 /* Reset SKB transmit buffers. */
1270 fep->skb_cur = fep->skb_dirty = 0;
1271 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1272 if (fep->tx_skbuff[i]) {
1273 dev_kfree_skb_any(fep->tx_skbuff[i]);
1274 fep->tx_skbuff[i] = NULL;
1275 }
1276 }
1277
1278 /* Enable MII mode */
1279 if (duplex) {
1280 /* MII enable / FD enable */
1281 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1282 writel(0x04, fep->hwp + FEC_X_CNTRL);
1283 } else {
1284 /* MII enable / No Rcv on Xmit */
1285 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1286 writel(0x0, fep->hwp + FEC_X_CNTRL);
1287 }
1288 fep->full_duplex = duplex;
1289
1290 /* Set MII speed */
1291 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1292
1293 /*
1294 * The phy interface and speed need to get configured
1295 * differently on enet-mac.
1296 */
1297 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1298 val = readl(fep->hwp + FEC_R_CNTRL);
1299
1300 /* MII or RMII */
1301 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1302 val |= (1 << 8);
1303 else
1304 val &= ~(1 << 8);
1305
1306 /* 10M or 100M */
1307 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1308 val &= ~(1 << 9);
1309 else
1310 val |= (1 << 9);
1311
1312 writel(val, fep->hwp + FEC_R_CNTRL);
1313 } else {
1314#ifdef FEC_MIIGSK_ENR
1315 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1316 /* disable the gasket and wait */
1317 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1318 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1319 udelay(1);
1320
1321 /*
1322 * configure the gasket:
1323 * RMII, 50 MHz, no loopback, no echo
1324 */
1325 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1326
1327 /* re-enable the gasket */
1328 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1329 }
1330#endif
1331 }
1332
1333 /* And last, enable the transmit and receive processing */
1334 writel(2, fep->hwp + FEC_ECNTRL);
1335 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1336
1337 /* Enable interrupts we wish to service */
1338 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1339}
1340
1341static void
1342fec_stop(struct net_device *dev)
1343{
1344 struct fec_enet_private *fep = netdev_priv(dev);
1345
1346 /* We cannot expect a graceful transmit stop without link !!! */
1347 if (fep->link) {
1348 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1349 udelay(10);
1350 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1351 printk("fec_stop : Graceful transmit stop did not complete !\n");
1352 }
1353
1354 /* Whack a reset. We should wait for this. */
1355 writel(1, fep->hwp + FEC_ECNTRL);
1356 udelay(10);
1357 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1358 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1359}
1360
1361static int __devinit 1357static int __devinit
1362fec_probe(struct platform_device *pdev) 1358fec_probe(struct platform_device *pdev)
1363{ 1359{
@@ -1377,19 +1373,20 @@ fec_probe(struct platform_device *pdev)
1377 1373
1378 /* Init network device */ 1374 /* Init network device */
1379 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1375 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1380 if (!ndev) 1376 if (!ndev) {
1381 return -ENOMEM; 1377 ret = -ENOMEM;
1378 goto failed_alloc_etherdev;
1379 }
1382 1380
1383 SET_NETDEV_DEV(ndev, &pdev->dev); 1381 SET_NETDEV_DEV(ndev, &pdev->dev);
1384 1382
1385 /* setup board info structure */ 1383 /* setup board info structure */
1386 fep = netdev_priv(ndev); 1384 fep = netdev_priv(ndev);
1387 memset(fep, 0, sizeof(*fep));
1388 1385
1389 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1386 fep->hwp = ioremap(r->start, resource_size(r));
1390 fep->pdev = pdev; 1387 fep->pdev = pdev;
1391 1388
1392 if (!ndev->base_addr) { 1389 if (!fep->hwp) {
1393 ret = -ENOMEM; 1390 ret = -ENOMEM;
1394 goto failed_ioremap; 1391 goto failed_ioremap;
1395 } 1392 }
@@ -1407,10 +1404,9 @@ fec_probe(struct platform_device *pdev)
1407 break; 1404 break;
1408 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1405 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1409 if (ret) { 1406 if (ret) {
1410 while (i >= 0) { 1407 while (--i >= 0) {
1411 irq = platform_get_irq(pdev, i); 1408 irq = platform_get_irq(pdev, i);
1412 free_irq(irq, ndev); 1409 free_irq(irq, ndev);
1413 i--;
1414 } 1410 }
1415 goto failed_irq; 1411 goto failed_irq;
1416 } 1412 }
@@ -1453,9 +1449,11 @@ failed_clk:
1453 free_irq(irq, ndev); 1449 free_irq(irq, ndev);
1454 } 1450 }
1455failed_irq: 1451failed_irq:
1456 iounmap((void __iomem *)ndev->base_addr); 1452 iounmap(fep->hwp);
1457failed_ioremap: 1453failed_ioremap:
1458 free_netdev(ndev); 1454 free_netdev(ndev);
1455failed_alloc_etherdev:
1456 release_mem_region(r->start, resource_size(r));
1459 1457
1460 return ret; 1458 return ret;
1461} 1459}
@@ -1465,16 +1463,22 @@ fec_drv_remove(struct platform_device *pdev)
1465{ 1463{
1466 struct net_device *ndev = platform_get_drvdata(pdev); 1464 struct net_device *ndev = platform_get_drvdata(pdev);
1467 struct fec_enet_private *fep = netdev_priv(ndev); 1465 struct fec_enet_private *fep = netdev_priv(ndev);
1468 1466 struct resource *r;
1469 platform_set_drvdata(pdev, NULL);
1470 1467
1471 fec_stop(ndev); 1468 fec_stop(ndev);
1472 fec_enet_mii_remove(fep); 1469 fec_enet_mii_remove(fep);
1473 clk_disable(fep->clk); 1470 clk_disable(fep->clk);
1474 clk_put(fep->clk); 1471 clk_put(fep->clk);
1475 iounmap((void __iomem *)ndev->base_addr); 1472 iounmap(fep->hwp);
1476 unregister_netdev(ndev); 1473 unregister_netdev(ndev);
1477 free_netdev(ndev); 1474 free_netdev(ndev);
1475
1476 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1477 BUG_ON(!r);
1478 release_mem_region(r->start, resource_size(r));
1479
1480 platform_set_drvdata(pdev, NULL);
1481
1478 return 0; 1482 return 0;
1479} 1483}
1480 1484
@@ -1483,16 +1487,14 @@ static int
1483fec_suspend(struct device *dev) 1487fec_suspend(struct device *dev)
1484{ 1488{
1485 struct net_device *ndev = dev_get_drvdata(dev); 1489 struct net_device *ndev = dev_get_drvdata(dev);
1486 struct fec_enet_private *fep; 1490 struct fec_enet_private *fep = netdev_priv(ndev);
1487 1491
1488 if (ndev) { 1492 if (netif_running(ndev)) {
1489 fep = netdev_priv(ndev); 1493 fec_stop(ndev);
1490 if (netif_running(ndev)) { 1494 netif_device_detach(ndev);
1491 fec_stop(ndev);
1492 netif_device_detach(ndev);
1493 }
1494 clk_disable(fep->clk);
1495 } 1495 }
1496 clk_disable(fep->clk);
1497
1496 return 0; 1498 return 0;
1497} 1499}
1498 1500
@@ -1500,16 +1502,14 @@ static int
1500fec_resume(struct device *dev) 1502fec_resume(struct device *dev)
1501{ 1503{
1502 struct net_device *ndev = dev_get_drvdata(dev); 1504 struct net_device *ndev = dev_get_drvdata(dev);
1503 struct fec_enet_private *fep; 1505 struct fec_enet_private *fep = netdev_priv(ndev);
1504 1506
1505 if (ndev) { 1507 clk_enable(fep->clk);
1506 fep = netdev_priv(ndev); 1508 if (netif_running(ndev)) {
1507 clk_enable(fep->clk); 1509 fec_restart(ndev, fep->full_duplex);
1508 if (netif_running(ndev)) { 1510 netif_device_attach(ndev);
1509 fec_restart(ndev, fep->full_duplex);
1510 netif_device_attach(ndev);
1511 }
1512 } 1511 }
1512
1513 return 0; 1513 return 0;
1514} 1514}
1515 1515
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296ef0dd..9c0b1bac6af6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5645 goto out_error; 5645 goto out_error;
5646 } 5646 }
5647 5647
5648 netif_carrier_off(dev);
5649
5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5650 5652
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 119aa2000c24..5ed8f9f9419f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1920,7 +1920,7 @@ int startup_gfar(struct net_device *ndev)
1920 if (err) { 1920 if (err) {
1921 for (j = 0; j < i; j++) 1921 for (j = 0; j < i; j++)
1922 free_grp_irqs(&priv->gfargrp[j]); 1922 free_grp_irqs(&priv->gfargrp[j]);
1923 goto irq_fail; 1923 goto irq_fail;
1924 } 1924 }
1925 } 1925 }
1926 1926
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb5..8931168d3e74 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) 400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
401{ 401{
402 struct list_head *p; 402 struct list_head *p;
403 struct bpqdev *bpqdev = v;
403 404
404 ++*pos; 405 ++*pos;
405 406
406 if (v == SEQ_START_TOKEN) 407 if (v == SEQ_START_TOKEN)
407 p = rcu_dereference(bpq_devices.next); 408 p = rcu_dereference(list_next_rcu(&bpq_devices));
408 else 409 else
409 p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); 410 p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
410 411
411 return (p == &bpq_devices) ? NULL 412 return (p == &bpq_devices) ? NULL
412 : list_entry(p, struct bpqdev, bpq_list); 413 : list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368fa6bc6..65c1833244f7 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -129,6 +129,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
129 break; 129 break;
130 case E1000_DEV_ID_82580_COPPER: 130 case E1000_DEV_ID_82580_COPPER:
131 case E1000_DEV_ID_82580_FIBER: 131 case E1000_DEV_ID_82580_FIBER:
132 case E1000_DEV_ID_82580_QUAD_FIBER:
132 case E1000_DEV_ID_82580_SERDES: 133 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 134 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 135 case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -237,9 +238,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
237 size = 14; 238 size = 14;
238 nvm->word_size = 1 << size; 239 nvm->word_size = 1 << size;
239 240
240 /* if 82576 then initialize mailbox parameters */ 241 /* if part supports SR-IOV then initialize mailbox parameters */
241 if (mac->type == e1000_82576) 242 switch (mac->type) {
243 case e1000_82576:
244 case e1000_i350:
242 igb_init_mbx_params_pf(hw); 245 igb_init_mbx_params_pf(hw);
246 break;
247 default:
248 break;
249 }
243 250
244 /* setup PHY parameters */ 251 /* setup PHY parameters */
245 if (phy->media_type != e1000_media_type_copper) { 252 if (phy->media_type != e1000_media_type_copper) {
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638afb8cdc..281324e85980 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A 59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
59#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C 60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb70047..78d48c7fa859 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
422{ 422{
423 struct e1000_mbx_info *mbx = &hw->mbx; 423 struct e1000_mbx_info *mbx = &hw->mbx;
424 424
425 if (hw->mac.type == e1000_82576) { 425 mbx->timeout = 0;
426 mbx->timeout = 0; 426 mbx->usec_delay = 0;
427 mbx->usec_delay = 0; 427
428 428 mbx->size = E1000_VFMAILBOX_SIZE;
429 mbx->size = E1000_VFMAILBOX_SIZE; 429
430 430 mbx->ops.read = igb_read_mbx_pf;
431 mbx->ops.read = igb_read_mbx_pf; 431 mbx->ops.write = igb_write_mbx_pf;
432 mbx->ops.write = igb_write_mbx_pf; 432 mbx->ops.read_posted = igb_read_posted_mbx;
433 mbx->ops.read_posted = igb_read_posted_mbx; 433 mbx->ops.write_posted = igb_write_posted_mbx;
434 mbx->ops.write_posted = igb_write_posted_mbx; 434 mbx->ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->ops.check_for_msg = igb_check_for_msg_pf; 435 mbx->ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->ops.check_for_ack = igb_check_for_ack_pf; 436 mbx->ops.check_for_rst = igb_check_for_rst_pf;
437 mbx->ops.check_for_rst = igb_check_for_rst_pf; 437
438 438 mbx->stats.msgs_tx = 0;
439 mbx->stats.msgs_tx = 0; 439 mbx->stats.msgs_rx = 0;
440 mbx->stats.msgs_rx = 0; 440 mbx->stats.reqs = 0;
441 mbx->stats.reqs = 0; 441 mbx->stats.acks = 0;
442 mbx->stats.acks = 0; 442 mbx->stats.rsts = 0;
443 mbx->stats.rsts = 0;
444 }
445 443
446 return 0; 444 return 0;
447} 445}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b7513d..cb6bf7b815ae 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -68,6 +68,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -2286,9 +2287,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2286 2287
2287 spin_lock_init(&adapter->stats64_lock); 2288 spin_lock_init(&adapter->stats64_lock);
2288#ifdef CONFIG_PCI_IOV 2289#ifdef CONFIG_PCI_IOV
2289 if (hw->mac.type == e1000_82576) 2290 switch (hw->mac.type) {
2291 case e1000_82576:
2292 case e1000_i350:
2290 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2293 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
2291 2294 break;
2295 default:
2296 break;
2297 }
2292#endif /* CONFIG_PCI_IOV */ 2298#endif /* CONFIG_PCI_IOV */
2293 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2299 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2294 2300
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281d..4488bd581eca 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
635 635
636 ret = sh_irda_set_baudrate(self, speed); 636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0) 637 if (ret < 0)
638 return ret; 638 goto sh_irda_hard_xmit_end;
639 639
640 self->tx_buff.len = 0; 640 self->tx_buff.len = 0;
641 if (skb->len) { 641 if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
652 652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len); 653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE); 654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 } 655 } else
656 goto sh_irda_hard_xmit_end;
656 657
657 dev_kfree_skb(skb); 658 dev_kfree_skb(skb);
658 659
659 return 0; 660 return 0;
661
662sh_irda_hard_xmit_end:
663 sh_irda_set_baudrate(self, 9600);
664 netif_wake_queue(self->ndev);
665 sh_irda_rcv_ctrl(self, 1);
666 dev_kfree_skb(skb);
667
668 return ret;
669
660} 670}
661 671
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) 672static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c92463617..12769b58c2e7 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -334,6 +334,10 @@ struct ixgbe_adapter {
334 u16 bd_number; 334 u16 bd_number;
335 struct work_struct reset_task; 335 struct work_struct reset_task;
336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
337
338 /* DCB parameters */
339 struct ieee_pfc *ixgbe_ieee_pfc;
340 struct ieee_ets *ixgbe_ieee_ets;
337 struct ixgbe_dcb_config dcb_cfg; 341 struct ixgbe_dcb_config dcb_cfg;
338 struct ixgbe_dcb_config temp_dcb_cfg; 342 struct ixgbe_dcb_config temp_dcb_cfg;
339 u8 dcb_set_bitmap; 343 u8 dcb_set_bitmap;
@@ -521,7 +525,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
521extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 525extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
522extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 526extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
523extern int ethtool_ioctl(struct ifreq *ifr); 527extern int ethtool_ioctl(struct ifreq *ifr);
524extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
525extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 528extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 529extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 530extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e42..ebbda7d15254 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1371 1371
1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1373
1374 /* clear VMDq pool/queue selection for RAR 0 */
1375 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1373 } 1376 }
1374 hw->addr_ctrl.overflow_promisc = 0; 1377 hw->addr_ctrl.overflow_promisc = 0;
1375 1378
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045a8cf0..90cceb4a6317 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -29,6 +29,7 @@
29#define _IXGBE_COMMON_H_ 29#define _IXGBE_COMMON_H_
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h"
32 33
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -110,9 +111,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
110 111
111#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 112#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
112 113
113extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
114#define hw_dbg(hw, format, arg...) \ 114#define hw_dbg(hw, format, arg...) \
115 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) 115 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
116#define e_dev_info(format, arg...) \ 116#define e_dev_info(format, arg...) \
117 dev_info(&adapter->pdev->dev, format, ## arg) 117 dev_info(&adapter->pdev->dev, format, ## arg)
118#define e_dev_warn(format, arg...) \ 118#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260c1f50..13c962efbfc9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -34,6 +34,42 @@
34#include "ixgbe_dcb_82599.h" 34#include "ixgbe_dcb_82599.h"
35 35
36/** 36/**
37 * ixgbe_ieee_credits - This calculates the ieee traffic class
38 * credits from the configured bandwidth percentages. Credits
39 * are the smallest unit programable into the underlying
40 * hardware. The IEEE 802.1Qaz specification do not use bandwidth
41 * groups so this is much simplified from the CEE case.
42 */
43s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
44{
45 int min_percent = 100;
46 int min_credit, multiplier;
47 int i;
48
49 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
50 DCB_CREDIT_QUANTUM;
51
52 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
53 if (bw[i] < min_percent && bw[i])
54 min_percent = bw[i];
55 }
56
57 multiplier = (min_credit / min_percent) + 1;
58
59 /* Find out the hw credits for each TC */
60 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
61 int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
62
63 if (val < min_credit)
64 val = min_credit;
65 refill[i] = val;
66
67 max[i] = (bw[i] * MAX_CREDIT)/100;
68 }
69 return 0;
70}
71
72/**
37 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits 73 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
38 * @ixgbe_dcb_config: Struct containing DCB settings. 74 * @ixgbe_dcb_config: Struct containing DCB settings.
39 * @direction: Configuring either Tx or Rx. 75 * @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@ out:
141 return ret_val; 177 return ret_val;
142} 178}
143 179
180void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
181{
182 int i;
183
184 *pfc_en = 0;
185 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
186 *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
187}
188
189void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
190 u16 *refill)
191{
192 struct tc_bw_alloc *p;
193 int i;
194
195 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
196 p = &cfg->tc_config[i].path[direction];
197 refill[i] = p->data_credits_refill;
198 }
199}
200
201void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
202{
203 int i;
204
205 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
206 max[i] = cfg->tc_config[i].desc_credits_max;
207}
208
209void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
210 u8 *bwgid)
211{
212 struct tc_bw_alloc *p;
213 int i;
214
215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
216 p = &cfg->tc_config[i].path[direction];
217 bwgid[i] = p->bwg_id;
218 }
219}
220
221void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
222 u8 *ptype)
223{
224 struct tc_bw_alloc *p;
225 int i;
226
227 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
228 p = &cfg->tc_config[i].path[direction];
229 ptype[i] = p->prio_type;
230 }
231}
232
144/** 233/**
145 * ixgbe_dcb_hw_config - Config and enable DCB 234 * ixgbe_dcb_hw_config - Config and enable DCB
146 * @hw: pointer to hardware structure 235 * @hw: pointer to hardware structure
@@ -152,13 +241,30 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
152 struct ixgbe_dcb_config *dcb_config) 241 struct ixgbe_dcb_config *dcb_config)
153{ 242{
154 s32 ret = 0; 243 s32 ret = 0;
244 u8 pfc_en;
245 u8 ptype[MAX_TRAFFIC_CLASS];
246 u8 bwgid[MAX_TRAFFIC_CLASS];
247 u16 refill[MAX_TRAFFIC_CLASS];
248 u16 max[MAX_TRAFFIC_CLASS];
249
250 /* Unpack CEE standard containers */
251 ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
252 ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
253 ixgbe_dcb_unpack_max(dcb_config, max);
254 ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
255 ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
256
155 switch (hw->mac.type) { 257 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB: 258 case ixgbe_mac_82598EB:
157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 259 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
260 pfc_en, refill, max, bwgid,
261 ptype);
158 break; 262 break;
159 case ixgbe_mac_82599EB: 263 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540: 264 case ixgbe_mac_X540:
161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 265 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
266 pfc_en, refill, max, bwgid,
267 ptype);
162 break; 268 break;
163 default: 269 default:
164 break; 270 break;
@@ -166,3 +272,70 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
166 return ret; 272 return ret;
167} 273}
168 274
275/* Helper routines to abstract HW specifics from DCB netlink ops */
276s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
277{
278 int ret = -EINVAL;
279
280 switch (hw->mac.type) {
281 case ixgbe_mac_82598EB:
282 ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
283 break;
284 case ixgbe_mac_82599EB:
285 case ixgbe_mac_X540:
286 ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
287 break;
288 default:
289 break;
290 }
291 return ret;
292}
293
294s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
295 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
296{
297 int i;
298 u8 prio_type[IEEE_8021QAZ_MAX_TCS];
299
300 /* Map TSA onto CEE prio type */
301 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
302 switch (tsa[i]) {
303 case IEEE_8021QAZ_TSA_STRICT:
304 prio_type[i] = 2;
305 break;
306 case IEEE_8021QAZ_TSA_ETS:
307 prio_type[i] = 0;
308 break;
309 default:
310 /* Hardware only supports priority strict or
311 * ETS transmission selection algorithms if
312 * we receive some other value from dcbnl
313 * throw an error
314 */
315 return -EINVAL;
316 }
317 }
318
319 switch (hw->mac.type) {
320 case ixgbe_mac_82598EB:
321 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
322 prio_type);
323 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
324 bwg_id, prio_type);
325 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
326 bwg_id, prio_type);
327 break;
328 case ixgbe_mac_82599EB:
329 case ixgbe_mac_X540:
330 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
331 bwg_id, prio_type);
332 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
333 bwg_id, prio_type);
334 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
335 bwg_id, prio_type);
336 break;
337 default:
338 break;
339 }
340 return 0;
341}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38ee1644..e5935114815e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; 139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ 140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
141 bool pfc_mode_enable; 141 bool pfc_mode_enable;
142 bool round_robin_enable;
143 142
144 enum dcb_rx_pba_cfg rx_pba_cfg; 143 enum dcb_rx_pba_cfg rx_pba_cfg;
145 144
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
148}; 147};
149 148
150/* DCB driver APIs */ 149/* DCB driver APIs */
150void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
151void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
152void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
153void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
154void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
151 155
152/* DCB credits calculation */ 156/* DCB credits calculation */
157s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, 158s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8); 159 struct ixgbe_dcb_config *, int, u8);
155 160
156/* DCB hw initialization */ 161/* DCB hw initialization */
162s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
163 u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
164s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 165s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
158 166
159/* DCB definitions for credit calculation */ 167/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c12e05..2965edcdac7b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -38,15 +38,14 @@
38 * 38 *
39 * Configure packet buffers for DCB mode. 39 * Configure packet buffers for DCB mode.
40 */ 40 */
41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
42 struct ixgbe_dcb_config *dcb_config)
43{ 42{
44 s32 ret_val = 0; 43 s32 ret_val = 0;
45 u32 value = IXGBE_RXPBSIZE_64KB; 44 u32 value = IXGBE_RXPBSIZE_64KB;
46 u8 i = 0; 45 u8 i = 0;
47 46
48 /* Setup Rx packet buffer sizes */ 47 /* Setup Rx packet buffer sizes */
49 switch (dcb_config->rx_pba_cfg) { 48 switch (rx_pba) {
50 case pba_80_48: 49 case pba_80_48:
51 /* Setup the first four at 80KB */ 50 /* Setup the first four at 80KB */
52 value = IXGBE_RXPBSIZE_80KB; 51 value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
78 * 77 *
79 * Configure Rx Data Arbiter and credits for each traffic class. 78 * Configure Rx Data Arbiter and credits for each traffic class.
80 */ 79 */
81static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, 80s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
82 struct ixgbe_dcb_config *dcb_config) 81 u16 *refill,
82 u16 *max,
83 u8 *prio_type)
83{ 84{
84 struct tc_bw_alloc *p;
85 u32 reg = 0; 85 u32 reg = 0;
86 u32 credit_refill = 0; 86 u32 credit_refill = 0;
87 u32 credit_max = 0; 87 u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
102 102
103 /* Configure traffic class credits and priority */ 103 /* Configure traffic class credits and priority */
104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
105 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 105 credit_refill = refill[i];
106 credit_refill = p->data_credits_refill; 106 credit_max = max[i];
107 credit_max = p->data_credits_max;
108 107
109 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); 108 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
110 109
111 if (p->prio_type == prio_link) 110 if (prio_type[i] == prio_link)
112 reg |= IXGBE_RT2CR_LSP; 111 reg |= IXGBE_RT2CR_LSP;
113 112
114 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); 113 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
135 * 134 *
136 * Configure Tx Descriptor Arbiter and credits for each traffic class. 135 * Configure Tx Descriptor Arbiter and credits for each traffic class.
137 */ 136 */
138static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, 137s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
139 struct ixgbe_dcb_config *dcb_config) 138 u16 *refill,
139 u16 *max,
140 u8 *bwg_id,
141 u8 *prio_type)
140{ 142{
141 struct tc_bw_alloc *p;
142 u32 reg, max_credits; 143 u32 reg, max_credits;
143 u8 i; 144 u8 i;
144 145
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
146 147
147 /* Enable arbiter */ 148 /* Enable arbiter */
148 reg &= ~IXGBE_DPMCS_ARBDIS; 149 reg &= ~IXGBE_DPMCS_ARBDIS;
149 if (!(dcb_config->round_robin_enable)) { 150 /* Enable DFP and Recycle mode */
150 /* Enable DFP and Recycle mode */ 151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
152 }
153 reg |= IXGBE_DPMCS_TSOEF; 152 reg |= IXGBE_DPMCS_TSOEF;
154 /* Configure Max TSO packet size 34KB including payload and headers */ 153 /* Configure Max TSO packet size 34KB including payload and headers */
155 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 154 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
158 157
159 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
160 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
161 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
162 max_credits = dcb_config->tc_config[i].desc_credits_max;
163 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; 161 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
164 reg |= p->data_credits_refill; 162 reg |= refill[i];
165 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
166 164
167 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
168 reg |= IXGBE_TDTQ2TCCR_GSP; 166 reg |= IXGBE_TDTQ2TCCR_GSP;
169 167
170 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
171 reg |= IXGBE_TDTQ2TCCR_LSP; 169 reg |= IXGBE_TDTQ2TCCR_LSP;
172 170
173 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
183 * 181 *
184 * Configure Tx Data Arbiter and credits for each traffic class. 182 * Configure Tx Data Arbiter and credits for each traffic class.
185 */ 183 */
186static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, 184s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
187 struct ixgbe_dcb_config *dcb_config) 185 u16 *refill,
186 u16 *max,
187 u8 *bwg_id,
188 u8 *prio_type)
188{ 189{
189 struct tc_bw_alloc *p;
190 u32 reg; 190 u32 reg;
191 u8 i; 191 u8 i;
192 192
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
200 200
201 /* Configure traffic class credits and priority */ 201 /* Configure traffic class credits and priority */
202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
203 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 203 reg = refill[i];
204 reg = p->data_credits_refill; 204 reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
205 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; 205 reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
206 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
207 206
208 if (p->prio_type == prio_group) 207 if (prio_type[i] == prio_group)
209 reg |= IXGBE_TDPT2TCCR_GSP; 208 reg |= IXGBE_TDPT2TCCR_GSP;
210 209
211 if (p->prio_type == prio_link) 210 if (prio_type[i] == prio_link)
212 reg |= IXGBE_TDPT2TCCR_LSP; 211 reg |= IXGBE_TDPT2TCCR_LSP;
213 212
214 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); 213 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,13 +228,12 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
229 * 228 *
230 * Configure Priority Flow Control for each traffic class. 229 * Configure Priority Flow Control for each traffic class.
231 */ 230 */
232s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, 231s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
233 struct ixgbe_dcb_config *dcb_config)
234{ 232{
235 u32 reg, rx_pba_size; 233 u32 reg, rx_pba_size;
236 u8 i; 234 u8 i;
237 235
238 if (!dcb_config->pfc_mode_enable) 236 if (!pfc_en)
239 goto out; 237 goto out;
240 238
241 /* Enable Transmit Priority Flow Control */ 239 /* Enable Transmit Priority Flow Control */
@@ -256,19 +254,20 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
256 * for each traffic class. 254 * for each traffic class.
257 */ 255 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 256 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
257 int enabled = pfc_en & (1 << i);
259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 258 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 259 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 reg = (rx_pba_size - hw->fc.low_water) << 10; 260 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 261
263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 262 if (enabled == pfc_enabled_tx ||
264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 263 enabled == pfc_enabled_full)
265 reg |= IXGBE_FCRTL_XONE; 264 reg |= IXGBE_FCRTL_XONE;
266 265
267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 266 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
268 267
269 reg = (rx_pba_size - hw->fc.high_water) << 10; 268 reg = (rx_pba_size - hw->fc.high_water) << 10;
270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 269 if (enabled == pfc_enabled_tx ||
271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 270 enabled == pfc_enabled_full)
272 reg |= IXGBE_FCRTH_FCEN; 271 reg |= IXGBE_FCRTH_FCEN;
273 272
274 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 273 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
@@ -292,7 +291,7 @@ out:
292 * Configure queue statistics registers, all queues belonging to same traffic 291 * Configure queue statistics registers, all queues belonging to same traffic
293 * class uses a single set of queue statistics counters. 292 * class uses a single set of queue statistics counters.
294 */ 293 */
295static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 294s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
296{ 295{
297 u32 reg = 0; 296 u32 reg = 0;
298 u8 i = 0; 297 u8 i = 0;
@@ -325,13 +324,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
325 * Configure dcb settings and enable dcb mode. 324 * Configure dcb settings and enable dcb mode.
326 */ 325 */
327s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, 326s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
328 struct ixgbe_dcb_config *dcb_config) 327 u8 rx_pba, u8 pfc_en, u16 *refill,
328 u16 *max, u8 *bwg_id, u8 *prio_type)
329{ 329{
330 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); 330 ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
331 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); 331 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
332 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); 332 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
333 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); 333 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, dcb_config); 334 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
335 bwg_id, prio_type);
336 ixgbe_dcb_config_pfc_82598(hw, pfc_en);
335 ixgbe_dcb_config_tc_stats_82598(hw); 337 ixgbe_dcb_config_tc_stats_82598(hw);
336 338
337 return 0; 339 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03ccfa088..0d2a758effce 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -71,9 +71,28 @@
71/* DCB hardware-specific driver APIs */ 71/* DCB hardware-specific driver APIs */
72 72
73/* DCB PFC functions */ 73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
75 75
76/* DCB hw initialization */ 76/* DCB hw initialization */
77s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 77s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
78 u16 *refill,
79 u16 *max,
80 u8 *prio_type);
81
82s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
83 u16 *refill,
84 u16 *max,
85 u8 *bwg_id,
86 u8 *prio_type);
87
88s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
89 u16 *refill,
90 u16 *max,
91 u8 *bwg_id,
92 u8 *prio_type);
93
94s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
95 u8 rx_pba, u8 pfc_en, u16 *refill,
96 u16 *max, u8 *bwg_id, u8 *prio_type);
78 97
79#endif /* _DCB_82598_CONFIG_H */ 98#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f74d0f5..b0d97a98c84d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -33,19 +33,18 @@
33/** 33/**
34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers 34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure 35 * @hw: pointer to hardware structure
36 * @dcb_config: pointer to ixgbe_dcb_config structure 36 * @rx_pba: method to distribute packet buffer
37 * 37 *
38 * Configure packet buffers for DCB mode. 38 * Configure packet buffers for DCB mode.
39 */ 39 */
40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
41 struct ixgbe_dcb_config *dcb_config)
42{ 41{
43 s32 ret_val = 0; 42 s32 ret_val = 0;
44 u32 value = IXGBE_RXPBSIZE_64KB; 43 u32 value = IXGBE_RXPBSIZE_64KB;
45 u8 i = 0; 44 u8 i = 0;
46 45
47 /* Setup Rx packet buffer sizes */ 46 /* Setup Rx packet buffer sizes */
48 switch (dcb_config->rx_pba_cfg) { 47 switch (rx_pba) {
49 case pba_80_48: 48 case pba_80_48:
50 /* Setup the first four at 80KB */ 49 /* Setup the first four at 80KB */
51 value = IXGBE_RXPBSIZE_80KB; 50 value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,19 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
75/** 74/**
76 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 75 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
77 * @hw: pointer to hardware structure 76 * @hw: pointer to hardware structure
78 * @dcb_config: pointer to ixgbe_dcb_config structure 77 * @refill: refill credits index by traffic class
78 * @max: max credits index by traffic class
79 * @bwg_id: bandwidth grouping indexed by traffic class
80 * @prio_type: priority type indexed by traffic class
79 * 81 *
80 * Configure Rx Packet Arbiter and credits for each traffic class. 82 * Configure Rx Packet Arbiter and credits for each traffic class.
81 */ 83 */
82static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 84s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
83 struct ixgbe_dcb_config *dcb_config) 85 u16 *refill,
86 u16 *max,
87 u8 *bwg_id,
88 u8 *prio_type)
84{ 89{
85 struct tc_bw_alloc *p;
86 u32 reg = 0; 90 u32 reg = 0;
87 u32 credit_refill = 0; 91 u32 credit_refill = 0;
88 u32 credit_max = 0; 92 u32 credit_max = 0;
@@ -103,15 +107,13 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
103 107
104 /* Configure traffic class credits and priority */ 108 /* Configure traffic class credits and priority */
105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 109 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
106 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 110 credit_refill = refill[i];
107 111 credit_max = max[i];
108 credit_refill = p->data_credits_refill;
109 credit_max = p->data_credits_max;
110 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 112 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
111 113
112 reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; 114 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
113 115
114 if (p->prio_type == prio_link) 116 if (prio_type[i] == prio_link)
115 reg |= IXGBE_RTRPT4C_LSP; 117 reg |= IXGBE_RTRPT4C_LSP;
116 118
117 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 119 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +132,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
130/** 132/**
131 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 133 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
132 * @hw: pointer to hardware structure 134 * @hw: pointer to hardware structure
133 * @dcb_config: pointer to ixgbe_dcb_config structure 135 * @refill: refill credits index by traffic class
136 * @max: max credits index by traffic class
137 * @bwg_id: bandwidth grouping indexed by traffic class
138 * @prio_type: priority type indexed by traffic class
134 * 139 *
135 * Configure Tx Descriptor Arbiter and credits for each traffic class. 140 * Configure Tx Descriptor Arbiter and credits for each traffic class.
136 */ 141 */
137static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, 142s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
138 struct ixgbe_dcb_config *dcb_config) 143 u16 *refill,
144 u16 *max,
145 u8 *bwg_id,
146 u8 *prio_type)
139{ 147{
140 struct tc_bw_alloc *p;
141 u32 reg, max_credits; 148 u32 reg, max_credits;
142 u8 i; 149 u8 i;
143 150
@@ -149,16 +156,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
149 156
150 /* Configure traffic class credits and priority */ 157 /* Configure traffic class credits and priority */
151 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 158 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
152 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 159 max_credits = max[i];
153 max_credits = dcb_config->tc_config[i].desc_credits_max;
154 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 160 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
155 reg |= p->data_credits_refill; 161 reg |= refill[i];
156 reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; 162 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
157 163
158 if (p->prio_type == prio_group) 164 if (prio_type[i] == prio_group)
159 reg |= IXGBE_RTTDT2C_GSP; 165 reg |= IXGBE_RTTDT2C_GSP;
160 166
161 if (p->prio_type == prio_link) 167 if (prio_type[i] == prio_link)
162 reg |= IXGBE_RTTDT2C_LSP; 168 reg |= IXGBE_RTTDT2C_LSP;
163 169
164 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 170 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +183,19 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
177/** 183/**
178 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 184 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
179 * @hw: pointer to hardware structure 185 * @hw: pointer to hardware structure
180 * @dcb_config: pointer to ixgbe_dcb_config structure 186 * @refill: refill credits index by traffic class
187 * @max: max credits index by traffic class
188 * @bwg_id: bandwidth grouping indexed by traffic class
189 * @prio_type: priority type indexed by traffic class
181 * 190 *
182 * Configure Tx Packet Arbiter and credits for each traffic class. 191 * Configure Tx Packet Arbiter and credits for each traffic class.
183 */ 192 */
184static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, 193s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
185 struct ixgbe_dcb_config *dcb_config) 194 u16 *refill,
195 u16 *max,
196 u8 *bwg_id,
197 u8 *prio_type)
186{ 198{
187 struct tc_bw_alloc *p;
188 u32 reg; 199 u32 reg;
189 u8 i; 200 u8 i;
190 201
@@ -205,15 +216,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
205 216
206 /* Configure traffic class credits and priority */ 217 /* Configure traffic class credits and priority */
207 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 218 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
208 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 219 reg = refill[i];
209 reg = p->data_credits_refill; 220 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
210 reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; 221 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
211 reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
212 222
213 if (p->prio_type == prio_group) 223 if (prio_type[i] == prio_group)
214 reg |= IXGBE_RTTPT2C_GSP; 224 reg |= IXGBE_RTTPT2C_GSP;
215 225
216 if (p->prio_type == prio_link) 226 if (prio_type[i] == prio_link)
217 reg |= IXGBE_RTTPT2C_LSP; 227 reg |= IXGBE_RTTPT2C_LSP;
218 228
219 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 229 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,17 +243,16 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
233/** 243/**
234 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 244 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
235 * @hw: pointer to hardware structure 245 * @hw: pointer to hardware structure
236 * @dcb_config: pointer to ixgbe_dcb_config structure 246 * @pfc_en: enabled pfc bitmask
237 * 247 *
238 * Configure Priority Flow Control (PFC) for each traffic class. 248 * Configure Priority Flow Control (PFC) for each traffic class.
239 */ 249 */
240s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 250s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
241 struct ixgbe_dcb_config *dcb_config)
242{ 251{
243 u32 i, reg, rx_pba_size; 252 u32 i, reg, rx_pba_size;
244 253
245 /* If PFC is disabled globally then fall back to LFC. */ 254 /* If PFC is disabled globally then fall back to LFC. */
246 if (!dcb_config->pfc_mode_enable) { 255 if (!pfc_en) {
247 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 256 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
248 hw->mac.ops.fc_enable(hw, i); 257 hw->mac.ops.fc_enable(hw, i);
249 goto out; 258 goto out;
@@ -251,19 +260,18 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
251 260
252 /* Configure PFC Tx thresholds per TC */ 261 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 262 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
263 int enabled = pfc_en & (1 << i);
254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 264 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 265 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 266
257 reg = (rx_pba_size - hw->fc.low_water) << 10; 267 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 268
259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 269 if (enabled)
260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
261 reg |= IXGBE_FCRTL_XONE; 270 reg |= IXGBE_FCRTL_XONE;
262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 271 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
263 272
264 reg = (rx_pba_size - hw->fc.high_water) << 10; 273 reg = (rx_pba_size - hw->fc.high_water) << 10;
265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 274 if (enabled)
266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
267 reg |= IXGBE_FCRTH_FCEN; 275 reg |= IXGBE_FCRTH_FCEN;
268 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 276 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
269 } 277 }
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
349/** 357/**
350 * ixgbe_dcb_config_82599 - Configure general DCB parameters 358 * ixgbe_dcb_config_82599 - Configure general DCB parameters
351 * @hw: pointer to hardware structure 359 * @hw: pointer to hardware structure
352 * @dcb_config: pointer to ixgbe_dcb_config structure
353 * 360 *
354 * Configure general DCB parameters. 361 * Configure general DCB parameters.
355 */ 362 */
@@ -406,19 +413,27 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
406/** 413/**
407 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 414 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
408 * @hw: pointer to hardware structure 415 * @hw: pointer to hardware structure
409 * @dcb_config: pointer to ixgbe_dcb_config structure 416 * @rx_pba: method to distribute packet buffer
417 * @refill: refill credits index by traffic class
418 * @max: max credits index by traffic class
419 * @bwg_id: bandwidth grouping indexed by traffic class
420 * @prio_type: priority type indexed by traffic class
421 * @pfc_en: enabled pfc bitmask
410 * 422 *
411 * Configure dcb settings and enable dcb mode. 423 * Configure dcb settings and enable dcb mode.
412 */ 424 */
413s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 425s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
414 struct ixgbe_dcb_config *dcb_config) 426 u8 rx_pba, u8 pfc_en, u16 *refill,
427 u16 *max, u8 *bwg_id, u8 *prio_type)
415{ 428{
416 ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); 429 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
417 ixgbe_dcb_config_82599(hw); 430 ixgbe_dcb_config_82599(hw);
418 ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); 431 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
419 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); 432 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
420 ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); 433 bwg_id, prio_type);
421 ixgbe_dcb_config_pfc_82599(hw, dcb_config); 434 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
435 bwg_id, prio_type);
436 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
422 ixgbe_dcb_config_tc_stats_82599(hw); 437 ixgbe_dcb_config_tc_stats_82599(hw);
423 438
424 return 0; 439 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649fb954..5b0ca85614d1 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -102,11 +102,29 @@
102/* DCB hardware-specific driver APIs */ 102/* DCB hardware-specific driver APIs */
103 103
104/* DCB PFC functions */ 104/* DCB PFC functions */
105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
106 struct ixgbe_dcb_config *dcb_config);
107 106
108/* DCB hw initialization */ 107/* DCB hw initialization */
108s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
109 u16 *refill,
110 u16 *max,
111 u8 *bwg_id,
112 u8 *prio_type);
113
114s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
115 u16 *refill,
116 u16 *max,
117 u8 *bwg_id,
118 u8 *prio_type);
119
120s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
121 u16 *refill,
122 u16 *max,
123 u8 *bwg_id,
124 u8 *prio_type);
125
109s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 126s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
110 struct ixgbe_dcb_config *config); 127 u8 rx_pba, u8 pfc_en, u16 *refill,
128 u16 *max, u8 *bwg_id, u8 *prio_type);
111 129
112#endif /* _DCB_82599_CONFIG_H */ 130#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8a455e..a977df3fe81b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -37,7 +37,6 @@
37#define BIT_PG_RX 0x04 37#define BIT_PG_RX 0x04
38#define BIT_PG_TX 0x08 38#define BIT_PG_TX 0x08
39#define BIT_APP_UPCHG 0x10 39#define BIT_APP_UPCHG 0x10
40#define BIT_RESETLINK 0x40
41#define BIT_LINKSPEED 0x80 40#define BIT_LINKSPEED 0x80
42 41
43/* Responses for the DCB_C_SET_ALL command */ 42/* Responses for the DCB_C_SET_ALL command */
@@ -225,10 +224,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
225 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != 224 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
226 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || 225 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != 226 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
228 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { 227 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
229 adapter->dcb_set_bitmap |= BIT_PG_TX; 228 adapter->dcb_set_bitmap |= BIT_PG_TX;
230 adapter->dcb_set_bitmap |= BIT_RESETLINK;
231 }
232} 229}
233 230
234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 231static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +236,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 236 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
240 237
241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 238 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
242 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 239 adapter->dcb_cfg.bw_percentage[0][bwg_id])
243 adapter->dcb_set_bitmap |= BIT_PG_TX; 240 adapter->dcb_set_bitmap |= BIT_PG_TX;
244 adapter->dcb_set_bitmap |= BIT_RESETLINK;
245 }
246} 241}
247 242
248static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 243static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +264,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != 264 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
270 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || 265 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
271 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != 266 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
272 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { 267 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
273 adapter->dcb_set_bitmap |= BIT_PG_RX; 268 adapter->dcb_set_bitmap |= BIT_PG_RX;
274 adapter->dcb_set_bitmap |= BIT_RESETLINK;
275 }
276} 269}
277 270
278static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 271static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +276,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
283 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 276 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
284 277
285 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != 278 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
286 adapter->dcb_cfg.bw_percentage[1][bwg_id]) { 279 adapter->dcb_cfg.bw_percentage[1][bwg_id])
287 adapter->dcb_set_bitmap |= BIT_PG_RX; 280 adapter->dcb_set_bitmap |= BIT_PG_RX;
288 adapter->dcb_set_bitmap |= BIT_RESETLINK;
289 }
290} 281}
291 282
292static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 283static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -365,21 +356,17 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
365 return DCB_NO_HW_CHG; 356 return DCB_NO_HW_CHG;
366 357
367 /* 358 /*
368 * Only take down the adapter if the configuration change 359 * Only take down the adapter if an app change occured. FCoE
369 * requires a reset. 360 * may shuffle tx rings in this case and this can not be done
361 * without a reset currently.
370 */ 362 */
371 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 363 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
372 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 364 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
373 msleep(1); 365 msleep(1);
374 366
375 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 367 if (netif_running(netdev))
376 if (netif_running(netdev)) 368 netdev->netdev_ops->ndo_stop(netdev);
377 netdev->netdev_ops->ndo_stop(netdev); 369 ixgbe_clear_interrupt_scheme(adapter);
378 ixgbe_clear_interrupt_scheme(adapter);
379 } else {
380 if (netif_running(netdev))
381 ixgbe_down(adapter);
382 }
383 } 370 }
384 371
385 if (adapter->dcb_cfg.pfc_mode_enable) { 372 if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +395,51 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
408 } 395 }
409 } 396 }
410 397
411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 398 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
412 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 399 ixgbe_init_interrupt_scheme(adapter);
413 ixgbe_init_interrupt_scheme(adapter); 400 if (netif_running(netdev))
414 if (netif_running(netdev)) 401 netdev->netdev_ops->ndo_open(netdev);
415 netdev->netdev_ops->ndo_open(netdev);
416 } else {
417 if (netif_running(netdev))
418 ixgbe_up(adapter);
419 }
420 ret = DCB_HW_CHG_RST; 402 ret = DCB_HW_CHG_RST;
421 } else if (adapter->dcb_set_bitmap & BIT_PFC) { 403 }
422 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 404
423 ixgbe_dcb_config_pfc_82598(&adapter->hw, 405 if (adapter->dcb_set_bitmap & BIT_PFC) {
424 &adapter->dcb_cfg); 406 u8 pfc_en;
425 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 407 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
426 ixgbe_dcb_config_pfc_82599(&adapter->hw, 408 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
427 &adapter->dcb_cfg);
428 ret = DCB_HW_CHG; 409 ret = DCB_HW_CHG;
429 } 410 }
411
412 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
413 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
414 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
415 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
416
417#ifdef CONFIG_FCOE
418 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
419 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
420#endif
421
422 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
423 max_frame, DCB_TX_CONFIG);
424 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
425 max_frame, DCB_RX_CONFIG);
426
427 ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
428 DCB_TX_CONFIG, refill);
429 ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
430 ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
431 DCB_TX_CONFIG, bwg_id);
432 ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
433 DCB_TX_CONFIG, prio_type);
434
435 ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
436 bwg_id, prio_type);
437 }
438
430 if (adapter->dcb_cfg.pfc_mode_enable) 439 if (adapter->dcb_cfg.pfc_mode_enable)
431 adapter->hw.fc.current_mode = ixgbe_fc_pfc; 440 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
432 441
433 if (adapter->dcb_set_bitmap & BIT_RESETLINK) 442 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
434 clear_bit(__IXGBE_RESETTING, &adapter->state); 443 clear_bit(__IXGBE_RESETTING, &adapter->state);
435 adapter->dcb_set_bitmap = 0x00; 444 adapter->dcb_set_bitmap = 0x00;
436 return ret; 445 return ret;
@@ -568,18 +577,29 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
568 case DCB_APP_IDTYPE_ETHTYPE: 577 case DCB_APP_IDTYPE_ETHTYPE:
569#ifdef IXGBE_FCOE 578#ifdef IXGBE_FCOE
570 if (id == ETH_P_FCOE) { 579 if (id == ETH_P_FCOE) {
571 u8 tc; 580 u8 old_tc;
572 struct ixgbe_adapter *adapter; 581 struct ixgbe_adapter *adapter = netdev_priv(netdev);
573 582
574 adapter = netdev_priv(netdev); 583 /* Get current programmed tc */
575 tc = adapter->fcoe.tc; 584 old_tc = adapter->fcoe.tc;
576 rval = ixgbe_fcoe_setapp(adapter, up); 585 rval = ixgbe_fcoe_setapp(adapter, up);
577 if ((!rval) && (tc != adapter->fcoe.tc) && 586
578 (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 587 if (rval ||
579 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 588 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
589 !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
590 break;
591
592 /* The FCoE application priority may be changed multiple
593 * times in quick sucession with switches that build up
594 * TLVs. To avoid creating uneeded device resets this
595 * checks the actual HW configuration and clears
596 * BIT_APP_UPCHG if a HW configuration change is not
597 * need
598 */
599 if (old_tc == adapter->fcoe.tc)
600 adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
601 else
580 adapter->dcb_set_bitmap |= BIT_APP_UPCHG; 602 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
581 adapter->dcb_set_bitmap |= BIT_RESETLINK;
582 }
583 } 603 }
584#endif 604#endif
585 break; 605 break;
@@ -591,7 +611,98 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
591 return rval; 611 return rval;
592} 612}
593 613
614static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
615 struct ieee_ets *ets)
616{
617 struct ixgbe_adapter *adapter = netdev_priv(dev);
618 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
619
620 /* No IEEE PFC settings available */
621 if (!my_ets)
622 return -EINVAL;
623
624 ets->ets_cap = MAX_TRAFFIC_CLASS;
625 ets->cbs = my_ets->cbs;
626 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
627 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
628 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
629 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
630 return 0;
631}
632
633static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
634 struct ieee_ets *ets)
635{
636 struct ixgbe_adapter *adapter = netdev_priv(dev);
637 __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
638 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
639 int err;
640 /* naively give each TC a bwg to map onto CEE hardware */
641 __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
642
643 if (!adapter->ixgbe_ieee_ets) {
644 adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
645 GFP_KERNEL);
646 if (!adapter->ixgbe_ieee_ets)
647 return -ENOMEM;
648 }
649
650
651 memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
652
653 ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
654 err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
655 bwg_id, ets->tc_tsa);
656 return err;
657}
658
659static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
660 struct ieee_pfc *pfc)
661{
662 struct ixgbe_adapter *adapter = netdev_priv(dev);
663 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
664 int i;
665
666 /* No IEEE PFC settings available */
667 if (!my_pfc)
668 return -EINVAL;
669
670 pfc->pfc_cap = MAX_TRAFFIC_CLASS;
671 pfc->pfc_en = my_pfc->pfc_en;
672 pfc->mbc = my_pfc->mbc;
673 pfc->delay = my_pfc->delay;
674
675 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
676 pfc->requests[i] = adapter->stats.pxoffrxc[i];
677 pfc->indications[i] = adapter->stats.pxofftxc[i];
678 }
679
680 return 0;
681}
682
683static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
684 struct ieee_pfc *pfc)
685{
686 struct ixgbe_adapter *adapter = netdev_priv(dev);
687 int err;
688
689 if (!adapter->ixgbe_ieee_pfc) {
690 adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
691 GFP_KERNEL);
692 if (!adapter->ixgbe_ieee_pfc)
693 return -ENOMEM;
694 }
695
696 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
697 err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
698 return err;
699}
700
594const struct dcbnl_rtnl_ops dcbnl_ops = { 701const struct dcbnl_rtnl_ops dcbnl_ops = {
702 .ieee_getets = ixgbe_dcbnl_ieee_getets,
703 .ieee_setets = ixgbe_dcbnl_ieee_setets,
704 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
705 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
595 .getstate = ixgbe_dcbnl_get_state, 706 .getstate = ixgbe_dcbnl_get_state,
596 .setstate = ixgbe_dcbnl_set_state, 707 .setstate = ixgbe_dcbnl_set_state,
597 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 708 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea88ca2a..309272f8f103 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -152,7 +152,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 152 ecmd->supported |= (SUPPORTED_1000baseT_Full |
153 SUPPORTED_Autoneg); 153 SUPPORTED_Autoneg);
154 154
155 switch (hw->mac.type) {
156 case ixgbe_mac_X540:
157 ecmd->supported |= SUPPORTED_100baseT_Full;
158 break;
159 default:
160 break;
161 }
162
155 ecmd->advertising = ADVERTISED_Autoneg; 163 ecmd->advertising = ADVERTISED_Autoneg;
164 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
165 ecmd->advertising |= ADVERTISED_100baseT_Full;
156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 166 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 167 ecmd->advertising |= ADVERTISED_10000baseT_Full;
158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 168 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -167,6 +177,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 177 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
168 ADVERTISED_1000baseT_Full); 178 ADVERTISED_1000baseT_Full);
169 179
180 switch (hw->mac.type) {
181 case ixgbe_mac_X540:
182 if (!(ecmd->advertising & ADVERTISED_100baseT_Full))
183 ecmd->advertising |= (ADVERTISED_100baseT_Full);
184 break;
185 default:
186 break;
187 }
188
170 if (hw->phy.media_type == ixgbe_media_type_copper) { 189 if (hw->phy.media_type == ixgbe_media_type_copper) {
171 ecmd->supported |= SUPPORTED_TP; 190 ecmd->supported |= SUPPORTED_TP;
172 ecmd->advertising |= ADVERTISED_TP; 191 ecmd->advertising |= ADVERTISED_TP;
@@ -271,8 +290,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
271 290
272 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 291 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
273 if (link_up) { 292 if (link_up) {
274 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 293 switch (link_speed) {
275 SPEED_10000 : SPEED_1000; 294 case IXGBE_LINK_SPEED_10GB_FULL:
295 ecmd->speed = SPEED_10000;
296 break;
297 case IXGBE_LINK_SPEED_1GB_FULL:
298 ecmd->speed = SPEED_1000;
299 break;
300 case IXGBE_LINK_SPEED_100_FULL:
301 ecmd->speed = SPEED_100;
302 break;
303 default:
304 break;
305 }
276 ecmd->duplex = DUPLEX_FULL; 306 ecmd->duplex = DUPLEX_FULL;
277 } else { 307 } else {
278 ecmd->speed = -1; 308 ecmd->speed = -1;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d4859790..c54a88274d51 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
159 struct scatterlist *sg; 159 struct scatterlist *sg;
160 unsigned int i, j, dmacount; 160 unsigned int i, j, dmacount;
161 unsigned int len; 161 unsigned int len;
162 static const unsigned int bufflen = 4096; 162 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
163 unsigned int firstoff = 0; 163 unsigned int firstoff = 0;
164 unsigned int lastsize; 164 unsigned int lastsize;
165 unsigned int thisoff = 0; 165 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 166 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 167 u32 fcbuff, fcdmarw, fcfltrw;
168 dma_addr_t addr; 168 dma_addr_t addr = 0;
169 169
170 if (!netdev || !sgl) 170 if (!netdev || !sgl)
171 return 0; 171 return 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
254 /* only the last buffer may have non-full bufflen */ 254 /* only the last buffer may have non-full bufflen */
255 lastsize = thisoff + thislen; 255 lastsize = thisoff + thislen;
256 256
257 /*
258 * lastsize can not be buffer len.
259 * If it is then adding another buffer with lastsize = 1.
260 */
261 if (lastsize == bufflen) {
262 if (j >= IXGBE_BUFFCNT_MAX) {
263 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
264 "not enough user buffers. We need an extra "
265 "buffer because lastsize is bufflen.\n",
266 xid, i, j, dmacount, (u64)addr);
267 goto out_noddp_free;
268 }
269
270 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
271 j++;
272 lastsize = 1;
273 }
274
257 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
258 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
259 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 277 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 550 e_err(drv, "failed to allocated FCoE DDP pool\n");
533 551
534 spin_lock_init(&fcoe->lock); 552 spin_lock_init(&fcoe->lock);
553
554 /* Extra buffer to be shared by all DDPs for HW work around */
555 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
556 if (fcoe->extra_ddp_buffer == NULL) {
557 e_err(drv, "failed to allocated extra DDP buffer\n");
558 goto out_extra_ddp_buffer_alloc;
559 }
560
561 fcoe->extra_ddp_buffer_dma =
562 dma_map_single(&adapter->pdev->dev,
563 fcoe->extra_ddp_buffer,
564 IXGBE_FCBUFF_MIN,
565 DMA_FROM_DEVICE);
566 if (dma_mapping_error(&adapter->pdev->dev,
567 fcoe->extra_ddp_buffer_dma)) {
568 e_err(drv, "failed to map extra DDP buffer\n");
569 goto out_extra_ddp_buffer_dma;
570 }
535 } 571 }
536 572
537 /* Enable L2 eth type filter for FCoE */ 573 /* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
581 } 617 }
582 } 618 }
583#endif 619#endif
620
621 return;
622
623out_extra_ddp_buffer_dma:
624 kfree(fcoe->extra_ddp_buffer);
625out_extra_ddp_buffer_alloc:
626 pci_pool_destroy(fcoe->pool);
627 fcoe->pool = NULL;
584} 628}
585 629
586/** 630/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
600 if (fcoe->pool) { 644 if (fcoe->pool) {
601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 645 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 646 ixgbe_fcoe_ddp_put(adapter->netdev, i);
647 dma_unmap_single(&adapter->pdev->dev,
648 fcoe->extra_ddp_buffer_dma,
649 IXGBE_FCBUFF_MIN,
650 DMA_FROM_DEVICE);
651 kfree(fcoe->extra_ddp_buffer);
603 pci_pool_destroy(fcoe->pool); 652 pci_pool_destroy(fcoe->pool);
604 fcoe->pool = NULL; 653 fcoe->pool = NULL;
605 } 654 }
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..65cc8fb14fe7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
70 spinlock_t lock; 70 spinlock_t lock;
71 struct pci_pool *pool; 71 struct pci_pool *pool;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
73 unsigned char *extra_ddp_buffer;
74 dma_addr_t extra_ddp_buffer_dma;
73}; 75};
74 76
75#endif /* _IXGBE_FCOE_H */ 77#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 602078b84892..eca762d954c6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "3.0.12-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
@@ -648,7 +648,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
648 * 648 *
649 * Returns : a tc index for use in range 0-7, or 0-3 649 * Returns : a tc index for use in range 0-7, or 0-3
650 */ 650 */
651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) 651static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
652{ 652{
653 int tc = -1; 653 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3176 u32 mhadd, hlreg0; 3176 u32 mhadd, hlreg0;
3177 3177
3178 /* Decide whether to use packet split mode or not */ 3178 /* Decide whether to use packet split mode or not */
3179 /* On by default */
3180 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3181
3179 /* Do not use packet split if we're in SR-IOV Mode */ 3182 /* Do not use packet split if we're in SR-IOV Mode */
3180 if (!adapter->num_vfs) 3183 if (adapter->num_vfs)
3181 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 3184 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3185
3186 /* Disable packet split due to 82599 erratum #45 */
3187 if (hw->mac.type == ixgbe_mac_82599EB)
3188 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3182 3189
3183 /* Set the RX buffer length according to the mode */ 3190 /* Set the RX buffer length according to the mode */
3184 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 3191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3721 * We need to try and force an autonegotiation 3728 * We need to try and force an autonegotiation
3722 * session, then bring up link. 3729 * session, then bring up link.
3723 */ 3730 */
3724 hw->mac.ops.setup_sfp(hw); 3731 if (hw->mac.ops.setup_sfp)
3732 hw->mac.ops.setup_sfp(hw);
3725 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3733 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3726 schedule_work(&adapter->multispeed_fiber_task); 3734 schedule_work(&adapter->multispeed_fiber_task);
3727 } else { 3735 } else {
@@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4863{ 4871{
4864 int q_idx, num_q_vectors; 4872 int q_idx, num_q_vectors;
4865 struct ixgbe_q_vector *q_vector; 4873 struct ixgbe_q_vector *q_vector;
4866 int napi_vectors;
4867 int (*poll)(struct napi_struct *, int); 4874 int (*poll)(struct napi_struct *, int);
4868 4875
4869 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4876 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4870 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4877 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4871 napi_vectors = adapter->num_rx_queues;
4872 poll = &ixgbe_clean_rxtx_many; 4878 poll = &ixgbe_clean_rxtx_many;
4873 } else { 4879 } else {
4874 num_q_vectors = 1; 4880 num_q_vectors = 1;
4875 napi_vectors = 1;
4876 poll = &ixgbe_poll; 4881 poll = &ixgbe_poll;
4877 } 4882 }
4878 4883
@@ -5169,7 +5174,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5169 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5174 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5170 adapter->dcb_cfg.rx_pba_cfg = pba_equal; 5175 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
5171 adapter->dcb_cfg.pfc_mode_enable = false; 5176 adapter->dcb_cfg.pfc_mode_enable = false;
5172 adapter->dcb_cfg.round_robin_enable = false;
5173 adapter->dcb_set_bitmap = 0x00; 5177 adapter->dcb_set_bitmap = 0x00;
5174 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 5178 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
5175 adapter->ring_feature[RING_F_DCB].indices); 5179 adapter->ring_feature[RING_F_DCB].indices);
@@ -5606,6 +5610,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5606 } 5610 }
5607 5611
5608 ixgbe_clear_interrupt_scheme(adapter); 5612 ixgbe_clear_interrupt_scheme(adapter);
5613#ifdef CONFIG_DCB
5614 kfree(adapter->ixgbe_ieee_pfc);
5615 kfree(adapter->ixgbe_ieee_ets);
5616#endif
5609 5617
5610#ifdef CONFIG_PM 5618#ifdef CONFIG_PM
5611 retval = pci_save_state(pdev); 5619 retval = pci_save_state(pdev);
@@ -5964,7 +5972,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5964 unregister_netdev(adapter->netdev); 5972 unregister_netdev(adapter->netdev);
5965 return; 5973 return;
5966 } 5974 }
5967 hw->mac.ops.setup_sfp(hw); 5975 if (hw->mac.ops.setup_sfp)
5976 hw->mac.ops.setup_sfp(hw);
5968 5977
5969 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5978 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5970 /* This will also work for DA Twinax connections */ 5979 /* This will also work for DA Twinax connections */
@@ -6095,7 +6104,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
6095 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6104 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6096 "10 Gbps" : 6105 "10 Gbps" :
6097 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 6106 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6098 "1 Gbps" : "unknown speed")), 6107 "1 Gbps" :
6108 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6109 "100 Mbps" :
6110 "unknown speed"))),
6099 ((flow_rx && flow_tx) ? "RX/TX" : 6111 ((flow_rx && flow_tx) ? "RX/TX" :
6100 (flow_rx ? "RX" : 6112 (flow_rx ? "RX" :
6101 (flow_tx ? "TX" : "None")))); 6113 (flow_tx ? "TX" : "None"))));
@@ -7700,16 +7712,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7700 7712
7701#endif /* CONFIG_IXGBE_DCA */ 7713#endif /* CONFIG_IXGBE_DCA */
7702 7714
7703/**
7704 * ixgbe_get_hw_dev return device
7705 * used by hardware layer to print debugging information
7706 **/
7707struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7708{
7709 struct ixgbe_adapter *adapter = hw->back;
7710 return adapter->netdev;
7711}
7712
7713module_exit(ixgbe_exit_module); 7715module_exit(ixgbe_exit_module);
7714 7716
7715/* ixgbe_main.c */ 7717/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a1cd3e..f215c4c296c4 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -437,6 +437,7 @@ out_no_read:
437 return ret_val; 437 return ret_val;
438} 438}
439 439
440#ifdef CONFIG_PCI_IOV
440/** 441/**
441 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox 442 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
442 * @hw: pointer to the HW structure 443 * @hw: pointer to the HW structure
@@ -465,6 +466,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
465 break; 466 break;
466 } 467 }
467} 468}
469#endif /* CONFIG_PCI_IOV */
468 470
469struct ixgbe_mbx_operations mbx_ops_generic = { 471struct ixgbe_mbx_operations mbx_ops_generic = {
470 .read = ixgbe_read_mbx_pf, 472 .read = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b1590218..ada0ce32a7a6 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); 86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); 87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89#ifdef CONFIG_PCI_IOV
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 90void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
91#endif /* CONFIG_PCI_IOV */
90 92
91extern struct ixgbe_mbx_operations mbx_ops_generic; 93extern struct ixgbe_mbx_operations mbx_ops_generic;
92 94
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b009..187b3a16ec1f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113
114static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 113static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
115{ 114{
116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
117 vmolr |= (IXGBE_VMOLR_ROMPE | 116 vmolr |= (IXGBE_VMOLR_ROMPE |
118 IXGBE_VMOLR_ROPE |
119 IXGBE_VMOLR_BAM); 117 IXGBE_VMOLR_BAM);
120 if (aupe) 118 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE; 119 vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce3..f2518b01067d 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
133 } 133 }
134 134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
137 IXGBE_WRITE_FLUSH(hw); 137 IXGBE_WRITE_FLUSH(hw);
138 138
139 /* Poll for reset bit to self-clear indicating reset is complete */ 139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) { 140 for (i = 0; i < 10; i++) {
141 udelay(1); 141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST)) 143 if (!(ctrl & reset_bit))
144 break; 144 break;
145 } 145 }
146 if (ctrl & IXGBE_CTRL_RST) { 146 if (ctrl & reset_bit) {
147 status = IXGBE_ERR_RESET_FAILED; 147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 149 }
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef3cf47..5b441b75e138 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
161} 161}
162 162
163static inline void 163static inline void
164jme_mac_rxclk_off(struct jme_adapter *jme)
165{
166 jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
167 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
168}
169
170static inline void
171jme_mac_rxclk_on(struct jme_adapter *jme)
172{
173 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
174 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
175}
176
177static inline void
178jme_mac_txclk_off(struct jme_adapter *jme)
179{
180 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
181 jwrite32f(jme, JME_GHC, jme->reg_ghc);
182}
183
184static inline void
185jme_mac_txclk_on(struct jme_adapter *jme)
186{
187 u32 speed = jme->reg_ghc & GHC_SPEED;
188 if (speed == GHC_SPEED_1000M)
189 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
190 else
191 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
192 jwrite32f(jme, JME_GHC, jme->reg_ghc);
193}
194
195static inline void
196jme_reset_ghc_speed(struct jme_adapter *jme)
197{
198 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
199 jwrite32f(jme, JME_GHC, jme->reg_ghc);
200}
201
202static inline void
203jme_reset_250A2_workaround(struct jme_adapter *jme)
204{
205 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
206 GPREG1_RSSPATCH);
207 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
208}
209
210static inline void
211jme_assert_ghc_reset(struct jme_adapter *jme)
212{
213 jme->reg_ghc |= GHC_SWRST;
214 jwrite32f(jme, JME_GHC, jme->reg_ghc);
215}
216
217static inline void
218jme_clear_ghc_reset(struct jme_adapter *jme)
219{
220 jme->reg_ghc &= ~GHC_SWRST;
221 jwrite32f(jme, JME_GHC, jme->reg_ghc);
222}
223
224static inline void
164jme_reset_mac_processor(struct jme_adapter *jme) 225jme_reset_mac_processor(struct jme_adapter *jme)
165{ 226{
166 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 227 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
168 u32 gpreg0; 229 u32 gpreg0;
169 int i; 230 int i;
170 231
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 232 jme_reset_ghc_speed(jme);
172 udelay(2); 233 jme_reset_250A2_workaround(jme);
173 jwrite32(jme, JME_GHC, jme->reg_ghc); 234
235 jme_mac_rxclk_on(jme);
236 jme_mac_txclk_on(jme);
237 udelay(1);
238 jme_assert_ghc_reset(jme);
239 udelay(1);
240 jme_mac_rxclk_off(jme);
241 jme_mac_txclk_off(jme);
242 udelay(1);
243 jme_clear_ghc_reset(jme);
244 udelay(1);
245 jme_mac_rxclk_on(jme);
246 jme_mac_txclk_on(jme);
247 udelay(1);
248 jme_mac_rxclk_off(jme);
249 jme_mac_txclk_off(jme);
174 250
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 251 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 252 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
190 else 266 else
191 gpreg0 = GPREG0_DEFAULT; 267 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0); 268 jwrite32(jme, JME_GPREG0, gpreg0);
193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201} 269}
202 270
203static inline void 271static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
336} 404}
337 405
338static inline void 406static inline void
339jme_set_phyfifoa(struct jme_adapter *jme) 407jme_set_phyfifo_5level(struct jme_adapter *jme)
340{ 408{
341 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 409 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
342} 410}
343 411
344static inline void 412static inline void
345jme_set_phyfifob(struct jme_adapter *jme) 413jme_set_phyfifo_8level(struct jme_adapter *jme)
346{ 414{
347 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 415 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
348} 416}
@@ -351,7 +419,7 @@ static int
351jme_check_link(struct net_device *netdev, int testonly) 419jme_check_link(struct net_device *netdev, int testonly)
352{ 420{
353 struct jme_adapter *jme = netdev_priv(netdev); 421 struct jme_adapter *jme = netdev_priv(netdev);
354 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 422 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
355 char linkmsg[64]; 423 char linkmsg[64];
356 int rc = 0; 424 int rc = 0;
357 425
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
414 482
415 jme->phylink = phylink; 483 jme->phylink = phylink;
416 484
417 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 485 /*
418 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 486 * The speed/duplex setting of jme->reg_ghc already cleared
419 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 487 * by jme_reset_mac_processor()
488 */
420 switch (phylink & PHY_LINK_SPEED_MASK) { 489 switch (phylink & PHY_LINK_SPEED_MASK) {
421 case PHY_LINK_SPEED_10M: 490 case PHY_LINK_SPEED_10M:
422 ghc |= GHC_SPEED_10M | 491 jme->reg_ghc |= GHC_SPEED_10M;
423 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
424 strcat(linkmsg, "10 Mbps, "); 492 strcat(linkmsg, "10 Mbps, ");
425 break; 493 break;
426 case PHY_LINK_SPEED_100M: 494 case PHY_LINK_SPEED_100M:
427 ghc |= GHC_SPEED_100M | 495 jme->reg_ghc |= GHC_SPEED_100M;
428 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
429 strcat(linkmsg, "100 Mbps, "); 496 strcat(linkmsg, "100 Mbps, ");
430 break; 497 break;
431 case PHY_LINK_SPEED_1000M: 498 case PHY_LINK_SPEED_1000M:
432 ghc |= GHC_SPEED_1000M | 499 jme->reg_ghc |= GHC_SPEED_1000M;
433 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
434 strcat(linkmsg, "1000 Mbps, "); 500 strcat(linkmsg, "1000 Mbps, ");
435 break; 501 break;
436 default: 502 default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
439 505
440 if (phylink & PHY_LINK_DUPLEX) { 506 if (phylink & PHY_LINK_DUPLEX) {
441 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 507 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
442 ghc |= GHC_DPX; 508 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
509 jme->reg_ghc |= GHC_DPX;
443 } else { 510 } else {
444 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 511 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
445 TXMCS_BACKOFF | 512 TXMCS_BACKOFF |
446 TXMCS_CARRIERSENSE | 513 TXMCS_CARRIERSENSE |
447 TXMCS_COLLISION); 514 TXMCS_COLLISION);
448 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 515 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
449 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
450 TXTRHD_TXREN |
451 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
452 } 516 }
453 517
454 gpreg1 = GPREG1_DEFAULT; 518 jwrite32(jme, JME_GHC, jme->reg_ghc);
519
455 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 520 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
521 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
522 GPREG1_RSSPATCH);
456 if (!(phylink & PHY_LINK_DUPLEX)) 523 if (!(phylink & PHY_LINK_DUPLEX))
457 gpreg1 |= GPREG1_HALFMODEPATCH; 524 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
458 switch (phylink & PHY_LINK_SPEED_MASK) { 525 switch (phylink & PHY_LINK_SPEED_MASK) {
459 case PHY_LINK_SPEED_10M: 526 case PHY_LINK_SPEED_10M:
460 jme_set_phyfifoa(jme); 527 jme_set_phyfifo_8level(jme);
461 gpreg1 |= GPREG1_RSSPATCH; 528 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
462 break; 529 break;
463 case PHY_LINK_SPEED_100M: 530 case PHY_LINK_SPEED_100M:
464 jme_set_phyfifob(jme); 531 jme_set_phyfifo_5level(jme);
465 gpreg1 |= GPREG1_RSSPATCH; 532 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
466 break; 533 break;
467 case PHY_LINK_SPEED_1000M: 534 case PHY_LINK_SPEED_1000M:
468 jme_set_phyfifoa(jme); 535 jme_set_phyfifo_8level(jme);
469 break; 536 break;
470 default: 537 default:
471 break; 538 break;
472 } 539 }
473 } 540 }
474 541 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
475 jwrite32(jme, JME_GPREG1, gpreg1);
476 jwrite32(jme, JME_GHC, ghc);
477 jme->reg_ghc = ghc;
478 542
479 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 543 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
480 "Full-Duplex, " : 544 "Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
613 * Enable TX Engine 677 * Enable TX Engine
614 */ 678 */
615 wmb(); 679 wmb();
616 jwrite32(jme, JME_TXCS, jme->reg_txcs | 680 jwrite32f(jme, JME_TXCS, jme->reg_txcs |
617 TXCS_SELECT_QUEUE0 | 681 TXCS_SELECT_QUEUE0 |
618 TXCS_ENABLE); 682 TXCS_ENABLE);
619 683
684 /*
685 * Start clock for TX MAC Processor
686 */
687 jme_mac_txclk_on(jme);
620} 688}
621 689
622static inline void 690static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
651 719
652 if (!i) 720 if (!i)
653 pr_err("Disable TX engine timeout\n"); 721 pr_err("Disable TX engine timeout\n");
722
723 /*
724 * Stop clock for TX MAC Processor
725 */
726 jme_mac_txclk_off(jme);
654} 727}
655 728
656static void 729static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
825 /* 898 /*
826 * Setup Unicast Filter 899 * Setup Unicast Filter
827 */ 900 */
901 jme_set_unicastaddr(jme->dev);
828 jme_set_multi(jme->dev); 902 jme_set_multi(jme->dev);
829 903
830 /* 904 /*
831 * Enable RX Engine 905 * Enable RX Engine
832 */ 906 */
833 wmb(); 907 wmb();
834 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 908 jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
835 RXCS_QUEUESEL_Q0 | 909 RXCS_QUEUESEL_Q0 |
836 RXCS_ENABLE | 910 RXCS_ENABLE |
837 RXCS_QST); 911 RXCS_QST);
912
913 /*
914 * Start clock for RX MAC Processor
915 */
916 jme_mac_rxclk_on(jme);
838} 917}
839 918
840static inline void 919static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
871 if (!i) 950 if (!i)
872 pr_err("Disable RX engine timeout\n"); 951 pr_err("Disable RX engine timeout\n");
873 952
953 /*
954 * Stop clock for RX MAC Processor
955 */
956 jme_mac_rxclk_off(jme);
957}
958
959static u16
960jme_udpsum(struct sk_buff *skb)
961{
962 u16 csum = 0xFFFFu;
963
964 if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
965 return csum;
966 if (skb->protocol != htons(ETH_P_IP))
967 return csum;
968 skb_set_network_header(skb, ETH_HLEN);
969 if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
970 (skb->len < (ETH_HLEN +
971 (ip_hdr(skb)->ihl << 2) +
972 sizeof(struct udphdr)))) {
973 skb_reset_network_header(skb);
974 return csum;
975 }
976 skb_set_transport_header(skb,
977 ETH_HLEN + (ip_hdr(skb)->ihl << 2));
978 csum = udp_hdr(skb)->check;
979 skb_reset_transport_header(skb);
980 skb_reset_network_header(skb);
981
982 return csum;
874} 983}
875 984
876static int 985static int
877jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 986jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
878{ 987{
879 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 988 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
880 return false; 989 return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
887 } 996 }
888 997
889 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 998 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
890 == RXWBFLAG_UDPON)) { 999 == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
891 if (flags & RXWBFLAG_IPV4) 1000 if (flags & RXWBFLAG_IPV4)
892 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); 1001 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
893 return false; 1002 return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
935 skb_put(skb, framesize); 1044 skb_put(skb, framesize);
936 skb->protocol = eth_type_trans(skb, jme->dev); 1045 skb->protocol = eth_type_trans(skb, jme->dev);
937 1046
938 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 1047 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
939 skb->ip_summed = CHECKSUM_UNNECESSARY; 1048 skb->ip_summed = CHECKSUM_UNNECESSARY;
940 else 1049 else
941 skb_checksum_none_assert(skb); 1050 skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
1207 tasklet_disable(&jme->rxempty_task); 1316 tasklet_disable(&jme->rxempty_task);
1208 1317
1209 if (netif_carrier_ok(netdev)) { 1318 if (netif_carrier_ok(netdev)) {
1210 jme_reset_ghc_speed(jme);
1211 jme_disable_rx_engine(jme); 1319 jme_disable_rx_engine(jme);
1212 jme_disable_tx_engine(jme); 1320 jme_disable_tx_engine(jme);
1213 jme_reset_mac_processor(jme); 1321 jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
1577} 1685}
1578 1686
1579static inline void 1687static inline void
1688jme_new_phy_on(struct jme_adapter *jme)
1689{
1690 u32 reg;
1691
1692 reg = jread32(jme, JME_PHY_PWR);
1693 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1694 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1695 jwrite32(jme, JME_PHY_PWR, reg);
1696
1697 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1698 reg &= ~PE1_GPREG0_PBG;
1699 reg |= PE1_GPREG0_ENBG;
1700 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1701}
1702
1703static inline void
1704jme_new_phy_off(struct jme_adapter *jme)
1705{
1706 u32 reg;
1707
1708 reg = jread32(jme, JME_PHY_PWR);
1709 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1710 PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1711 jwrite32(jme, JME_PHY_PWR, reg);
1712
1713 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1714 reg &= ~PE1_GPREG0_PBG;
1715 reg |= PE1_GPREG0_PDD3COLD;
1716 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1717}
1718
1719static inline void
1580jme_phy_on(struct jme_adapter *jme) 1720jme_phy_on(struct jme_adapter *jme)
1581{ 1721{
1582 u32 bmcr; 1722 u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
1584 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1724 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1585 bmcr &= ~BMCR_PDOWN; 1725 bmcr &= ~BMCR_PDOWN;
1586 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1726 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1727
1728 if (new_phy_power_ctrl(jme->chip_main_rev))
1729 jme_new_phy_on(jme);
1730}
1731
1732static inline void
1733jme_phy_off(struct jme_adapter *jme)
1734{
1735 u32 bmcr;
1736
1737 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1738 bmcr |= BMCR_PDOWN;
1739 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1740
1741 if (new_phy_power_ctrl(jme->chip_main_rev))
1742 jme_new_phy_off(jme);
1587} 1743}
1588 1744
1589static int 1745static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
1606 1762
1607 jme_start_irq(jme); 1763 jme_start_irq(jme);
1608 1764
1609 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 1765 jme_phy_on(jme);
1610 jme_phy_on(jme); 1766 if (test_bit(JME_FLAG_SSET, &jme->flags))
1611 jme_set_settings(netdev, &jme->old_ecmd); 1767 jme_set_settings(netdev, &jme->old_ecmd);
1612 } else { 1768 else
1613 jme_reset_phy_processor(jme); 1769 jme_reset_phy_processor(jme);
1614 }
1615 1770
1616 jme_reset_link(jme); 1771 jme_reset_link(jme);
1617 1772
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
1657 } 1812 }
1658} 1813}
1659 1814
1660static inline void
1661jme_phy_off(struct jme_adapter *jme)
1662{
1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1664}
1665
1666static void 1815static void
1667jme_powersave_phy(struct jme_adapter *jme) 1816jme_powersave_phy(struct jme_adapter *jme)
1668{ 1817{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
1696 tasklet_disable(&jme->rxclean_task); 1845 tasklet_disable(&jme->rxclean_task);
1697 tasklet_disable(&jme->rxempty_task); 1846 tasklet_disable(&jme->rxempty_task);
1698 1847
1699 jme_reset_ghc_speed(jme);
1700 jme_disable_rx_engine(jme); 1848 jme_disable_rx_engine(jme);
1701 jme_disable_tx_engine(jme); 1849 jme_disable_tx_engine(jme);
1702 jme_reset_mac_processor(jme); 1850 jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1993 return NETDEV_TX_OK; 2141 return NETDEV_TX_OK;
1994} 2142}
1995 2143
2144static void
2145jme_set_unicastaddr(struct net_device *netdev)
2146{
2147 struct jme_adapter *jme = netdev_priv(netdev);
2148 u32 val;
2149
2150 val = (netdev->dev_addr[3] & 0xff) << 24 |
2151 (netdev->dev_addr[2] & 0xff) << 16 |
2152 (netdev->dev_addr[1] & 0xff) << 8 |
2153 (netdev->dev_addr[0] & 0xff);
2154 jwrite32(jme, JME_RXUMA_LO, val);
2155 val = (netdev->dev_addr[5] & 0xff) << 8 |
2156 (netdev->dev_addr[4] & 0xff);
2157 jwrite32(jme, JME_RXUMA_HI, val);
2158}
2159
1996static int 2160static int
1997jme_set_macaddr(struct net_device *netdev, void *p) 2161jme_set_macaddr(struct net_device *netdev, void *p)
1998{ 2162{
1999 struct jme_adapter *jme = netdev_priv(netdev); 2163 struct jme_adapter *jme = netdev_priv(netdev);
2000 struct sockaddr *addr = p; 2164 struct sockaddr *addr = p;
2001 u32 val;
2002 2165
2003 if (netif_running(netdev)) 2166 if (netif_running(netdev))
2004 return -EBUSY; 2167 return -EBUSY;
2005 2168
2006 spin_lock_bh(&jme->macaddr_lock); 2169 spin_lock_bh(&jme->macaddr_lock);
2007 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2170 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2008 2171 jme_set_unicastaddr(netdev);
2009 val = (addr->sa_data[3] & 0xff) << 24 |
2010 (addr->sa_data[2] & 0xff) << 16 |
2011 (addr->sa_data[1] & 0xff) << 8 |
2012 (addr->sa_data[0] & 0xff);
2013 jwrite32(jme, JME_RXUMA_LO, val);
2014 val = (addr->sa_data[5] & 0xff) << 8 |
2015 (addr->sa_data[4] & 0xff);
2016 jwrite32(jme, JME_RXUMA_HI, val);
2017 spin_unlock_bh(&jme->macaddr_lock); 2172 spin_unlock_bh(&jme->macaddr_lock);
2018 2173
2019 return 0; 2174 return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
2731 2886
2732 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2887 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2733 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2888 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2889 jme->chip_main_rev = jme->chiprev & 0xF;
2890 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2734} 2891}
2735 2892
2736static const struct net_device_ops jme_netdev_ops = { 2893static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
2880 jme->reg_rxmcs = RXMCS_DEFAULT; 3037 jme->reg_rxmcs = RXMCS_DEFAULT;
2881 jme->reg_txpfc = 0; 3038 jme->reg_txpfc = 0;
2882 jme->reg_pmcs = PMCS_MFEN; 3039 jme->reg_pmcs = PMCS_MFEN;
3040 jme->reg_gpreg1 = GPREG1_DEFAULT;
2883 set_bit(JME_FLAG_TXCSUM, &jme->flags); 3041 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2884 set_bit(JME_FLAG_TSO, &jme->flags); 3042 set_bit(JME_FLAG_TSO, &jme->flags);
2885 3043
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
2936 jme->mii_if.mdio_write = jme_mdio_write; 3094 jme->mii_if.mdio_write = jme_mdio_write;
2937 3095
2938 jme_clear_pm(jme); 3096 jme_clear_pm(jme);
2939 jme_set_phyfifoa(jme); 3097 jme_set_phyfifo_5level(jme);
2940 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 3098 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev);
2941 if (!jme->fpgaver) 3099 if (!jme->fpgaver)
2942 jme_phy_init(jme); 3100 jme_phy_init(jme);
2943 jme_phy_off(jme); 3101 jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
2964 goto err_out_unmap; 3122 goto err_out_unmap;
2965 } 3123 }
2966 3124
2967 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", 3125 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
2968 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 3126 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2969 "JMC250 Gigabit Ethernet" : 3127 "JMC250 Gigabit Ethernet" :
2970 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 3128 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2971 "JMC260 Fast Ethernet" : "Unknown", 3129 "JMC260 Fast Ethernet" : "Unknown",
2972 (jme->fpgaver != 0) ? " (FPGA)" : "", 3130 (jme->fpgaver != 0) ? " (FPGA)" : "",
2973 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 3131 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2974 jme->rev, netdev->dev_addr); 3132 jme->pcirev, netdev->dev_addr);
2975 3133
2976 return 0; 3134 return 0;
2977 3135
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3035 jme_polling_mode(jme); 3193 jme_polling_mode(jme);
3036 3194
3037 jme_stop_pcc_timer(jme); 3195 jme_stop_pcc_timer(jme);
3038 jme_reset_ghc_speed(jme);
3039 jme_disable_rx_engine(jme); 3196 jme_disable_rx_engine(jme);
3040 jme_disable_tx_engine(jme); 3197 jme_disable_tx_engine(jme);
3041 jme_reset_mac_processor(jme); 3198 jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
3066 jme_clear_pm(jme); 3223 jme_clear_pm(jme);
3067 pci_restore_state(pdev); 3224 pci_restore_state(pdev);
3068 3225
3069 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 3226 jme_phy_on(jme);
3070 jme_phy_on(jme); 3227 if (test_bit(JME_FLAG_SSET, &jme->flags))
3071 jme_set_settings(netdev, &jme->old_ecmd); 3228 jme_set_settings(netdev, &jme->old_ecmd);
3072 } else { 3229 else
3073 jme_reset_phy_processor(jme); 3230 jme_reset_phy_processor(jme);
3074 }
3075 3231
3076 jme_start_irq(jme); 3232 jme_start_irq(jme);
3077 netif_device_attach(netdev); 3233 netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac09264bf2a..8bf30451e821 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
26#define __JME_H_INCLUDED__ 26#define __JME_H_INCLUDED__
27 27
28#define DRV_NAME "jme" 28#define DRV_NAME "jme"
29#define DRV_VERSION "1.0.7" 29#define DRV_VERSION "1.0.8"
30#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
31 31
32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
103#define HALF_US 500 /* 500 ns */ 103#define HALF_US 500 /* 500 ns */
104#define JMESPIIOCTL SIOCDEVPRIVATE 104#define JMESPIIOCTL SIOCDEVPRIVATE
105 105
106#define PCI_PRIV_PE1 0xE4
107
108enum pci_priv_pe1_bit_masks {
109 PE1_ASPMSUPRT = 0x00000003, /*
110 * RW:
111 * Aspm_support[1:0]
112 * (R/W Port of 5C[11:10])
113 */
114 PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
115 PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
116 PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
117 PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
118 PE1_GPREG0 = 0x0000FF00, /*
119 * SRW:
120 * Cfg_gp_reg0
121 * [7:6] phy_giga BG control
122 * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
123 * [4:0] Reserved
124 */
125 PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
126 PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
127 PE1_REVID = 0xFF000000, /* RO: Rev ID */
128};
129
130enum pci_priv_pe1_values {
131 PE1_GPREG0_ENBG = 0x00000000, /* en BG */
132 PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
133 PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
134 PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
135};
136
106/* 137/*
107 * Dynamic(adaptive)/Static PCC values 138 * Dynamic(adaptive)/Static PCC values
108 */ 139 */
@@ -403,6 +434,7 @@ struct jme_adapter {
403 u32 reg_rxmcs; 434 u32 reg_rxmcs;
404 u32 reg_ghc; 435 u32 reg_ghc;
405 u32 reg_pmcs; 436 u32 reg_pmcs;
437 u32 reg_gpreg1;
406 u32 phylink; 438 u32 phylink;
407 u32 tx_ring_size; 439 u32 tx_ring_size;
408 u32 tx_ring_mask; 440 u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
411 u32 rx_ring_mask; 443 u32 rx_ring_mask;
412 u8 mrrs; 444 u8 mrrs;
413 unsigned int fpgaver; 445 unsigned int fpgaver;
414 unsigned int chiprev; 446 u8 chiprev;
415 u8 rev; 447 u8 chip_main_rev;
448 u8 chip_sub_rev;
449 u8 pcirev;
416 u32 msg_enable; 450 u32 msg_enable;
417 struct ethtool_cmd old_ecmd; 451 struct ethtool_cmd old_ecmd;
418 unsigned int old_mtu; 452 unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
497 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ 531 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
498 532
499 533
534 JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
500 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ 535 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
501 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ 536 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
502 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ 537 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
624 TXTRHD_TXRL_SHIFT = 0, 659 TXTRHD_TXRL_SHIFT = 0,
625}; 660};
626 661
662enum jme_txtrhd_values {
663 TXTRHD_FULLDUPLEX = 0x00000000,
664 TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
665 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
666 TXTRHD_TXREN |
667 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
668};
669
627/* 670/*
628 * RX Control/Status Bits 671 * RX Control/Status Bits
629 */ 672 */
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
779 */ 822 */
780enum jme_ghc_bit_mask { 823enum jme_ghc_bit_mask {
781 GHC_SWRST = 0x40000000, 824 GHC_SWRST = 0x40000000,
825 GHC_TO_CLK_SRC = 0x00C00000,
826 GHC_TXMAC_CLK_SRC = 0x00300000,
782 GHC_DPX = 0x00000040, 827 GHC_DPX = 0x00000040,
783 GHC_SPEED = 0x00000030, 828 GHC_SPEED = 0x00000030,
784 GHC_LINK_POLL = 0x00000001, 829 GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
833}; 878};
834 879
835/* 880/*
881 * New PHY Power Control Register
882 */
883enum jme_phy_pwr_bit_masks {
884 PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
885 PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
886 PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
887 PHY_PWR_CLKSEL = 0x08000000, /*
888 * XTL_OUT Clock select
889 * (an internal free-running clock)
890 * 0: xtl_out = phy_giga.A_XTL25_O
891 * 1: xtl_out = phy_giga.PD_OSC
892 */
893};
894
895/*
836 * Giga PHY Status Registers 896 * Giga PHY Status Registers
837 */ 897 */
838enum jme_phy_link_bit_mask { 898enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
942 1002
943/* 1003/*
944 * General Purpose REG-1 1004 * General Purpose REG-1
945 * Note: All theses bits defined here are for
946 * Chip mode revision 0x11 only
947 */ 1005 */
948enum jme_gpreg1_masks { 1006enum jme_gpreg1_bit_masks {
1007 GPREG1_RXCLKOFF = 0x04000000,
1008 GPREG1_PCREQN = 0x00020000,
1009 GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
1010 GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
949 GPREG1_INTRDELAYUNIT = 0x00000018, 1011 GPREG1_INTRDELAYUNIT = 0x00000018,
950 GPREG1_INTRDELAYENABLE = 0x00000007, 1012 GPREG1_INTRDELAYENABLE = 0x00000007,
951}; 1013};
952 1014
953enum jme_gpreg1_vals { 1015enum jme_gpreg1_vals {
954 GPREG1_RSSPATCH = 0x00000040,
955 GPREG1_HALFMODEPATCH = 0x00000020,
956
957 GPREG1_INTDLYUNIT_16NS = 0x00000000, 1016 GPREG1_INTDLYUNIT_16NS = 0x00000000,
958 GPREG1_INTDLYUNIT_256NS = 0x00000008, 1017 GPREG1_INTDLYUNIT_256NS = 0x00000008,
959 GPREG1_INTDLYUNIT_1US = 0x00000010, 1018 GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
967 GPREG1_INTDLYEN_6U = 0x00000006, 1026 GPREG1_INTDLYEN_6U = 0x00000006,
968 GPREG1_INTDLYEN_7U = 0x00000007, 1027 GPREG1_INTDLYEN_7U = 0x00000007,
969 1028
970 GPREG1_DEFAULT = 0x00000000, 1029 GPREG1_DEFAULT = GPREG1_PCREQN,
971}; 1030};
972 1031
973/* 1032/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
1184/* 1243/*
1185 * Workaround 1244 * Workaround
1186 */ 1245 */
1187static inline int is_buggy250(unsigned short device, unsigned int chiprev) 1246static inline int is_buggy250(unsigned short device, u8 chiprev)
1188{ 1247{
1189 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; 1248 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
1190} 1249}
1191 1250
1251static inline int new_phy_power_ctrl(u8 chip_main_rev)
1252{
1253 return chip_main_rev >= 5;
1254}
1255
1192/* 1256/*
1193 * Function prototypes 1257 * Function prototypes
1194 */ 1258 */
1195static int jme_set_settings(struct net_device *netdev, 1259static int jme_set_settings(struct net_device *netdev,
1196 struct ethtool_cmd *ecmd); 1260 struct ethtool_cmd *ecmd);
1261static void jme_set_unicastaddr(struct net_device *netdev);
1197static void jme_set_multi(struct net_device *netdev); 1262static void jme_set_multi(struct net_device *netdev);
1198 1263
1199#endif 1264#endif
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a1c54d..ea0dc451da9c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
129 129
130static const struct ethtool_ops loopback_ethtool_ops = { 130static const struct ethtool_ops loopback_ethtool_ops = {
131 .get_link = always_on, 131 .get_link = always_on,
132 .set_tso = ethtool_op_set_tso,
133 .get_tx_csum = always_on,
134 .get_sg = always_on,
135 .get_rx_csum = always_on,
136}; 132};
137 133
138static int loopback_dev_init(struct net_device *dev) 134static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 165 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
170 dev->flags = IFF_LOOPBACK; 166 dev->flags = IFF_LOOPBACK;
171 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 167 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
168 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
172 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 169 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
173 | NETIF_F_TSO 170 | NETIF_F_ALL_TSO
171 | NETIF_F_UFO
174 | NETIF_F_NO_CSUM 172 | NETIF_F_NO_CSUM
173 | NETIF_F_RXCSUM
175 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
176 | NETIF_F_LLTX 175 | NETIF_F_LLTX
177 | NETIF_F_NETNS_LOCAL; 176 | NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3ff..2300e4599520 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
39 struct socket sock; 39 struct socket sock;
40 struct socket_wq wq; 40 struct socket_wq wq;
41 int vnet_hdr_sz; 41 int vnet_hdr_sz;
42 struct macvlan_dev *vlan; 42 struct macvlan_dev __rcu *vlan;
43 struct file *file; 43 struct file *file;
44 unsigned int flags; 44 unsigned int flags;
45}; 45};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
141 struct macvlan_dev *vlan; 141 struct macvlan_dev *vlan;
142 142
143 spin_lock(&macvtap_lock); 143 spin_lock(&macvtap_lock);
144 vlan = rcu_dereference(q->vlan); 144 vlan = rcu_dereference_protected(q->vlan,
145 lockdep_is_held(&macvtap_lock));
145 if (vlan) { 146 if (vlan) {
146 int index = get_slot(vlan, q); 147 int index = get_slot(vlan, q);
147 148
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
219 /* macvtap_put_queue can free some slots, so go through all slots */ 220 /* macvtap_put_queue can free some slots, so go through all slots */
220 spin_lock(&macvtap_lock); 221 spin_lock(&macvtap_lock);
221 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { 222 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
222 q = rcu_dereference(vlan->taps[i]); 223 q = rcu_dereference_protected(vlan->taps[i],
224 lockdep_is_held(&macvtap_lock));
223 if (q) { 225 if (q) {
224 qlist[j++] = q; 226 qlist[j++] = q;
225 rcu_assign_pointer(vlan->taps[i], NULL); 227 rcu_assign_pointer(vlan->taps[i], NULL);
@@ -569,7 +571,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
569 } 571 }
570 572
571 rcu_read_lock_bh(); 573 rcu_read_lock_bh();
572 vlan = rcu_dereference(q->vlan); 574 vlan = rcu_dereference_bh(q->vlan);
573 if (vlan) 575 if (vlan)
574 macvlan_start_xmit(skb, vlan->dev); 576 macvlan_start_xmit(skb, vlan->dev);
575 else 577 else
@@ -583,7 +585,7 @@ err_kfree:
583 585
584err: 586err:
585 rcu_read_lock_bh(); 587 rcu_read_lock_bh();
586 vlan = rcu_dereference(q->vlan); 588 vlan = rcu_dereference_bh(q->vlan);
587 if (vlan) 589 if (vlan)
588 vlan->dev->stats.tx_dropped++; 590 vlan->dev->stats.tx_dropped++;
589 rcu_read_unlock_bh(); 591 rcu_read_unlock_bh();
@@ -631,7 +633,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
631 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); 633 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
632 634
633 rcu_read_lock_bh(); 635 rcu_read_lock_bh();
634 vlan = rcu_dereference(q->vlan); 636 vlan = rcu_dereference_bh(q->vlan);
635 if (vlan) 637 if (vlan)
636 macvlan_count_rx(vlan, len, ret == 0, 0); 638 macvlan_count_rx(vlan, len, ret == 0, 0);
637 rcu_read_unlock_bh(); 639 rcu_read_unlock_bh();
@@ -727,7 +729,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
727 729
728 case TUNGETIFF: 730 case TUNGETIFF:
729 rcu_read_lock_bh(); 731 rcu_read_lock_bh();
730 vlan = rcu_dereference(q->vlan); 732 vlan = rcu_dereference_bh(q->vlan);
731 if (vlan) 733 if (vlan)
732 dev_hold(vlan->dev); 734 dev_hold(vlan->dev);
733 rcu_read_unlock_bh(); 735 rcu_read_unlock_bh();
@@ -736,7 +738,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
736 return -ENOLINK; 738 return -ENOLINK;
737 739
738 ret = 0; 740 ret = 0;
739 if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) || 741 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
740 put_user(q->flags, &ifr->ifr_flags)) 742 put_user(q->flags, &ifr->ifr_flags))
741 ret = -EFAULT; 743 ret = -EFAULT;
742 dev_put(vlan->dev); 744 dev_put(vlan->dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 4ffdc18fcb8a..2765a3ce9c24 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1289 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1290 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1291 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1292 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1293 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1294 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1295 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1296 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1297 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1298 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1299 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1300 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1301 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1302 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1303 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1289 { 0, } 1304 { 0, }
1290}; 1305};
1291 1306
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2c3a04..a7f2eed9a08a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
253 unsigned long serial_number; 253 unsigned long serial_number;
254 int vendor_specific_offset; 254 int vendor_specific_offset;
255 int fw_multicast_support; 255 int fw_multicast_support;
256 unsigned long features; 256 u32 features;
257 u32 max_tso6; 257 u32 max_tso6;
258 u32 read_dma; 258 u32 read_dma;
259 u32 write_dma; 259 u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) 1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1777{ 1777{
1778 struct myri10ge_priv *mgp = netdev_priv(netdev); 1778 struct myri10ge_priv *mgp = netdev_priv(netdev);
1779 unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); 1779 u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
1780 1780
1781 if (tso_enabled) 1781 if (tso_enabled)
1782 netdev->features |= flags; 1782 netdev->features |= flags;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321bad82..9fb59d3f9c92 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
4489{ 4489{
4490 struct niu_parent *parent = np->parent; 4490 struct niu_parent *parent = np->parent;
4491 int first_rx_channel, first_tx_channel; 4491 int first_rx_channel, first_tx_channel;
4492 int num_rx_rings, num_tx_rings;
4493 struct rx_ring_info *rx_rings;
4494 struct tx_ring_info *tx_rings;
4492 int i, port, err; 4495 int i, port, err;
4493 4496
4494 port = np->port; 4497 port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
4498 first_tx_channel += parent->txchan_per_port[i]; 4501 first_tx_channel += parent->txchan_per_port[i];
4499 } 4502 }
4500 4503
4501 np->num_rx_rings = parent->rxchan_per_port[port]; 4504 num_rx_rings = parent->rxchan_per_port[port];
4502 np->num_tx_rings = parent->txchan_per_port[port]; 4505 num_tx_rings = parent->txchan_per_port[port];
4503 4506
4504 netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); 4507 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4505 netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); 4508 GFP_KERNEL);
4506
4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL);
4509 err = -ENOMEM; 4509 err = -ENOMEM;
4510 if (!np->rx_rings) 4510 if (!rx_rings)
4511 goto out_err; 4511 goto out_err;
4512 4512
4513 np->num_rx_rings = num_rx_rings;
4514 smp_wmb();
4515 np->rx_rings = rx_rings;
4516
4517 netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4518
4513 for (i = 0; i < np->num_rx_rings; i++) { 4519 for (i = 0; i < np->num_rx_rings; i++) {
4514 struct rx_ring_info *rp = &np->rx_rings[i]; 4520 struct rx_ring_info *rp = &np->rx_rings[i];
4515 4521
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4544 return err;
4539 } 4545 }
4540 4546
4541 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), 4547 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4548 GFP_KERNEL);
4543 err = -ENOMEM; 4549 err = -ENOMEM;
4544 if (!np->tx_rings) 4550 if (!tx_rings)
4545 goto out_err; 4551 goto out_err;
4546 4552
4553 np->num_tx_rings = num_tx_rings;
4554 smp_wmb();
4555 np->tx_rings = tx_rings;
4556
4557 netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4558
4547 for (i = 0; i < np->num_tx_rings; i++) { 4559 for (i = 0; i < np->num_tx_rings; i++) {
4548 struct tx_ring_info *rp = &np->tx_rings[i]; 4560 struct tx_ring_info *rp = &np->tx_rings[i];
4549 4561
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
6246static void niu_get_rx_stats(struct niu *np) 6258static void niu_get_rx_stats(struct niu *np)
6247{ 6259{
6248 unsigned long pkts, dropped, errors, bytes; 6260 unsigned long pkts, dropped, errors, bytes;
6261 struct rx_ring_info *rx_rings;
6249 int i; 6262 int i;
6250 6263
6251 pkts = dropped = errors = bytes = 0; 6264 pkts = dropped = errors = bytes = 0;
6265
6266 rx_rings = ACCESS_ONCE(np->rx_rings);
6267 if (!rx_rings)
6268 goto no_rings;
6269
6252 for (i = 0; i < np->num_rx_rings; i++) { 6270 for (i = 0; i < np->num_rx_rings; i++) {
6253 struct rx_ring_info *rp = &np->rx_rings[i]; 6271 struct rx_ring_info *rp = &rx_rings[i];
6254 6272
6255 niu_sync_rx_discard_stats(np, rp, 0); 6273 niu_sync_rx_discard_stats(np, rp, 0);
6256 6274
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
6259 dropped += rp->rx_dropped; 6277 dropped += rp->rx_dropped;
6260 errors += rp->rx_errors; 6278 errors += rp->rx_errors;
6261 } 6279 }
6280
6281no_rings:
6262 np->dev->stats.rx_packets = pkts; 6282 np->dev->stats.rx_packets = pkts;
6263 np->dev->stats.rx_bytes = bytes; 6283 np->dev->stats.rx_bytes = bytes;
6264 np->dev->stats.rx_dropped = dropped; 6284 np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
6268static void niu_get_tx_stats(struct niu *np) 6288static void niu_get_tx_stats(struct niu *np)
6269{ 6289{
6270 unsigned long pkts, errors, bytes; 6290 unsigned long pkts, errors, bytes;
6291 struct tx_ring_info *tx_rings;
6271 int i; 6292 int i;
6272 6293
6273 pkts = errors = bytes = 0; 6294 pkts = errors = bytes = 0;
6295
6296 tx_rings = ACCESS_ONCE(np->tx_rings);
6297 if (!tx_rings)
6298 goto no_rings;
6299
6274 for (i = 0; i < np->num_tx_rings; i++) { 6300 for (i = 0; i < np->num_tx_rings; i++) {
6275 struct tx_ring_info *rp = &np->tx_rings[i]; 6301 struct tx_ring_info *rp = &tx_rings[i];
6276 6302
6277 pkts += rp->tx_packets; 6303 pkts += rp->tx_packets;
6278 bytes += rp->tx_bytes; 6304 bytes += rp->tx_bytes;
6279 errors += rp->tx_errors; 6305 errors += rp->tx_errors;
6280 } 6306 }
6307
6308no_rings:
6281 np->dev->stats.tx_packets = pkts; 6309 np->dev->stats.tx_packets = pkts;
6282 np->dev->stats.tx_bytes = bytes; 6310 np->dev->stats.tx_bytes = bytes;
6283 np->dev->stats.tx_errors = errors; 6311 np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
6287{ 6315{
6288 struct niu *np = netdev_priv(dev); 6316 struct niu *np = netdev_priv(dev);
6289 6317
6290 niu_get_rx_stats(np); 6318 if (netif_running(dev)) {
6291 niu_get_tx_stats(np); 6319 niu_get_rx_stats(np);
6292 6320 niu_get_tx_stats(np);
6321 }
6293 return &dev->stats; 6322 return &dev->stats;
6294} 6323}
6295 6324
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3a..a41b2cf4d917 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
1988 } 1988 }
1989 1989
1990 ndev = alloc_etherdev(sizeof(struct ns83820)); 1990 ndev = alloc_etherdev(sizeof(struct ns83820));
1991 dev = PRIV(ndev);
1992
1993 err = -ENOMEM; 1991 err = -ENOMEM;
1994 if (!dev) 1992 if (!ndev)
1995 goto out; 1993 goto out;
1996 1994
1995 dev = PRIV(ndev);
1997 dev->ndev = ndev; 1996 dev->ndev = ndev;
1998 1997
1999 spin_lock_init(&dev->rx_info.lock); 1998 spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
73 struct pch_gbe_regs_mac_adr mac_adr[16]; 73 struct pch_gbe_regs_mac_adr mac_adr[16];
74 u32 ADDR_MASK; 74 u32 ADDR_MASK;
75 u32 MIIM; 75 u32 MIIM;
76 u32 reserve2; 76 u32 MAC_ADDR_LOAD;
77 u32 RGMII_ST; 77 u32 RGMII_ST;
78 u32 RGMII_CTRL; 78 u32 RGMII_CTRL;
79 u32 reserve3[3]; 79 u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d7355306a738..b99e90aca37d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
29#define PCH_GBE_SHORT_PKT 64 29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000 30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0 31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_DMA_PADDING 2
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
88static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 89static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
89static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 90static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
90 int data); 91 int data);
92
93inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
94{
95 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
96}
97
91/** 98/**
92 * pch_gbe_mac_read_mac_addr - Read MAC address 99 * pch_gbe_mac_read_mac_addr - Read MAC address
93 * @hw: Pointer to the HW structure 100 * @hw: Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
519 struct pch_gbe_adapter *adapter; 526 struct pch_gbe_adapter *adapter;
520 adapter = container_of(work, struct pch_gbe_adapter, reset_task); 527 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
521 528
529 rtnl_lock();
522 pch_gbe_reinit_locked(adapter); 530 pch_gbe_reinit_locked(adapter);
531 rtnl_unlock();
523} 532}
524 533
525/** 534/**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
528 */ 537 */
529void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) 538void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
530{ 539{
531 struct net_device *netdev = adapter->netdev; 540 pch_gbe_down(adapter);
532 541 pch_gbe_up(adapter);
533 rtnl_lock();
534 if (netif_running(netdev)) {
535 pch_gbe_down(adapter);
536 pch_gbe_up(adapter);
537 }
538 rtnl_unlock();
539} 542}
540 543
541/** 544/**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1369 struct pch_gbe_buffer *buffer_info; 1372 struct pch_gbe_buffer *buffer_info;
1370 struct pch_gbe_rx_desc *rx_desc; 1373 struct pch_gbe_rx_desc *rx_desc;
1371 u32 length; 1374 u32 length;
1372 unsigned char tmp_packet[ETH_HLEN];
1373 unsigned int i; 1375 unsigned int i;
1374 unsigned int cleaned_count = 0; 1376 unsigned int cleaned_count = 0;
1375 bool cleaned = false; 1377 bool cleaned = false;
1376 struct sk_buff *skb; 1378 struct sk_buff *skb, *new_skb;
1377 u8 dma_status; 1379 u8 dma_status;
1378 u16 gbec_status; 1380 u16 gbec_status;
1379 u32 tcp_ip_status; 1381 u32 tcp_ip_status;
1380 u8 skb_copy_flag = 0;
1381 u8 skb_padding_flag = 0;
1382 1382
1383 i = rx_ring->next_to_clean; 1383 i = rx_ring->next_to_clean;
1384 1384
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1422 pr_err("Receive CRC Error\n"); 1422 pr_err("Receive CRC Error\n");
1423 } else { 1423 } else {
1424 /* get receive length */ 1424 /* get receive length */
1425 /* length convert[-3], padding[-2] */ 1425 /* length convert[-3] */
1426 length = (rx_desc->rx_words_eob) - 3 - 2; 1426 length = (rx_desc->rx_words_eob) - 3;
1427 1427
1428 /* Decide the data conversion method */ 1428 /* Decide the data conversion method */
1429 if (!adapter->rx_csum) { 1429 if (!adapter->rx_csum) {
1430 /* [Header:14][payload] */ 1430 /* [Header:14][payload] */
1431 skb_padding_flag = 0; 1431 if (NET_IP_ALIGN) {
1432 skb_copy_flag = 1; 1432 /* Because alignment differs,
1433 * the new_skb is newly allocated,
1434 * and data is copied to new_skb.*/
1435 new_skb = netdev_alloc_skb(netdev,
1436 length + NET_IP_ALIGN);
1437 if (!new_skb) {
1438 /* dorrop error */
1439 pr_err("New skb allocation "
1440 "Error\n");
1441 goto dorrop;
1442 }
1443 skb_reserve(new_skb, NET_IP_ALIGN);
1444 memcpy(new_skb->data, skb->data,
1445 length);
1446 skb = new_skb;
1447 } else {
1448 /* DMA buffer is used as SKB as it is.*/
1449 buffer_info->skb = NULL;
1450 }
1433 } else { 1451 } else {
1434 /* [Header:14][padding:2][payload] */ 1452 /* [Header:14][padding:2][payload] */
1435 skb_padding_flag = 1; 1453 /* The length includes padding length */
1436 if (length < copybreak) 1454 length = length - PCH_GBE_DMA_PADDING;
1437 skb_copy_flag = 1; 1455 if ((length < copybreak) ||
1438 else 1456 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1439 skb_copy_flag = 0; 1457 /* Because alignment differs,
1440 } 1458 * the new_skb is newly allocated,
1441 1459 * and data is copied to new_skb.
1442 /* Data conversion */ 1460 * Padding data is deleted
1443 if (skb_copy_flag) { /* recycle skb */ 1461 * at the time of a copy.*/
1444 struct sk_buff *new_skb; 1462 new_skb = netdev_alloc_skb(netdev,
1445 new_skb = 1463 length + NET_IP_ALIGN);
1446 netdev_alloc_skb(netdev, 1464 if (!new_skb) {
1447 length + NET_IP_ALIGN); 1465 /* dorrop error */
1448 if (new_skb) { 1466 pr_err("New skb allocation "
1449 if (!skb_padding_flag) { 1467 "Error\n");
1450 skb_reserve(new_skb, 1468 goto dorrop;
1451 NET_IP_ALIGN);
1452 } 1469 }
1470 skb_reserve(new_skb, NET_IP_ALIGN);
1453 memcpy(new_skb->data, skb->data, 1471 memcpy(new_skb->data, skb->data,
1454 length); 1472 ETH_HLEN);
1455 /* save the skb 1473 memcpy(&new_skb->data[ETH_HLEN],
1456 * in buffer_info as good */ 1474 &skb->data[ETH_HLEN +
1475 PCH_GBE_DMA_PADDING],
1476 length - ETH_HLEN);
1457 skb = new_skb; 1477 skb = new_skb;
1458 } else if (!skb_padding_flag) { 1478 } else {
1459 /* dorrop error */ 1479 /* Padding data is deleted
1460 pr_err("New skb allocation Error\n"); 1480 * by moving header data.*/
1461 goto dorrop; 1481 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1482 &skb->data[0], ETH_HLEN);
1483 skb_reserve(skb, NET_IP_ALIGN);
1484 buffer_info->skb = NULL;
1462 } 1485 }
1463 } else {
1464 buffer_info->skb = NULL;
1465 } 1486 }
1466 if (skb_padding_flag) { 1487 /* The length includes FCS length */
1467 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); 1488 length = length - ETH_FCS_LEN;
1468 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1469 ETH_HLEN);
1470 skb_reserve(skb, NET_IP_ALIGN);
1471
1472 }
1473
1474 /* update status of driver */ 1489 /* update status of driver */
1475 adapter->stats.rx_bytes += length; 1490 adapter->stats.rx_bytes += length;
1476 adapter->stats.rx_packets++; 1491 adapter->stats.rx_packets++;
@@ -2247,7 +2262,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
2247 struct net_device *netdev = pci_get_drvdata(pdev); 2262 struct net_device *netdev = pci_get_drvdata(pdev);
2248 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2263 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2249 2264
2250 flush_scheduled_work(); 2265 cancel_work_sync(&adapter->reset_task);
2251 unregister_netdev(netdev); 2266 unregister_netdev(netdev);
2252 2267
2253 pch_gbe_hal_phy_hw_reset(&adapter->hw); 2268 pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2322 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2337 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2323 pch_gbe_set_ethtool_ops(netdev); 2338 pch_gbe_set_ethtool_ops(netdev);
2324 2339
2340 pch_gbe_mac_load_mac_addr(&adapter->hw);
2325 pch_gbe_mac_reset_hw(&adapter->hw); 2341 pch_gbe_mac_reset_hw(&adapter->hw);
2326 2342
2327 /* setup the private structure */ 2343 /* setup the private structure */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6ac8551..d3cb77205863 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
1488 1488
1489 /* 1489 /*
1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
1491 * Early datasheets said to poll the reset bit, but now they say that 1491 * We wait at least 2ms.
1492 * it "is not a reliable indicator and subsequently should be ignored."
1493 * We wait at least 10ms.
1494 */ 1492 */
1495 1493
1496 mdelay(10); 1494 mdelay(2);
1497 1495
1498 /* 1496 /*
1499 * Reset RBCR[01] back to zero as per magic incantation. 1497 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5ac8120..392a6c4b72e5 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@ config NATIONAL_PHY
77 Currently supports the DP83865 PHY. 77 Currently supports the DP83865 PHY.
78 78
79config STE10XP 79config STE10XP
80 depends on PHYLIB
81 tristate "Driver for STMicroelectronics STe10Xp PHYs" 80 tristate "Driver for STMicroelectronics STe10Xp PHYs"
82 ---help--- 81 ---help---
83 This is the driver for the STe100p and STe101p PHYs. 82 This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678bc5a9..590f902deb6b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/phy.h> 21#include <linux/phy.h>
22 22#include <linux/micrel_phy.h>
23#define PHY_ID_KSZ9021 0x00221611
24#define PHY_ID_KS8737 0x00221720
25#define PHY_ID_KS8041 0x00221510
26#define PHY_ID_KS8051 0x00221550
27/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
28#define PHY_ID_KS8001 0x0022161A
29 23
30/* general Interrupt control/status reg in vendor specific block. */ 24/* general Interrupt control/status reg in vendor specific block. */
31#define MII_KSZPHY_INTCS 0x1B 25#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
46#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) 40#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
47#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) 41#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
48#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 42#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
43#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
49 44
50static int kszphy_ack_interrupt(struct phy_device *phydev) 45static int kszphy_ack_interrupt(struct phy_device *phydev)
51{ 46{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
106 return 0; 101 return 0;
107} 102}
108 103
104static int ks8051_config_init(struct phy_device *phydev)
105{
106 int regval;
107
108 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
109 regval = phy_read(phydev, MII_KSZPHY_CTRL);
110 regval |= KSZ8051_RMII_50MHZ_CLK;
111 phy_write(phydev, MII_KSZPHY_CTRL, regval);
112 }
113
114 return 0;
115}
116
109static struct phy_driver ks8737_driver = { 117static struct phy_driver ks8737_driver = {
110 .phy_id = PHY_ID_KS8737, 118 .phy_id = PHY_ID_KS8737,
111 .phy_id_mask = 0x00fffff0, 119 .phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
142 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 150 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
143 | SUPPORTED_Asym_Pause), 151 | SUPPORTED_Asym_Pause),
144 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 152 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
145 .config_init = kszphy_config_init, 153 .config_init = ks8051_config_init,
146 .config_aneg = genphy_config_aneg, 154 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status, 155 .read_status = genphy_read_status,
148 .ack_interrupt = kszphy_ack_interrupt, 156 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c4466978..9f6d670748d1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
592 ppp_release(NULL, file); 592 ppp_release(NULL, file);
593 err = 0; 593 err = 0;
594 } else 594 } else
595 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 595 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
596 atomic_long_read(&file->f_count)); 596 atomic_long_read(&file->f_count));
597 mutex_unlock(&ppp_mutex); 597 mutex_unlock(&ppp_mutex);
598 return err; 598 return err;
599 } 599 }
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630 630
631 if (pf->kind != INTERFACE) { 631 if (pf->kind != INTERFACE) {
632 /* can't happen */ 632 /* can't happen */
633 printk(KERN_ERR "PPP: not interface or channel??\n"); 633 pr_err("PPP: not interface or channel??\n");
634 return -EINVAL; 634 return -EINVAL;
635 } 635 }
636 636
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
704 } 704 }
705 vj = slhc_init(val2+1, val+1); 705 vj = slhc_init(val2+1, val+1);
706 if (!vj) { 706 if (!vj) {
707 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 707 netdev_err(ppp->dev,
708 "PPP: no memory (VJ compressor)\n");
708 err = -ENOMEM; 709 err = -ENOMEM;
709 break; 710 break;
710 } 711 }
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
898{ 899{
899 int err; 900 int err;
900 901
901 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 902 pr_info("PPP generic driver version " PPP_VERSION "\n");
902 903
903 err = register_pernet_device(&ppp_net_ops); 904 err = register_pernet_device(&ppp_net_ops);
904 if (err) { 905 if (err) {
905 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); 906 pr_err("failed to register PPP pernet device (%d)\n", err);
906 goto out; 907 goto out;
907 } 908 }
908 909
909 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 910 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
910 if (err) { 911 if (err) {
911 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 912 pr_err("failed to register PPP device (%d)\n", err);
912 goto out_net; 913 goto out_net;
913 } 914 }
914 915
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1078 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1079 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1079 if (!new_skb) { 1080 if (!new_skb) {
1080 if (net_ratelimit()) 1081 if (net_ratelimit())
1081 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1082 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1082 return NULL; 1083 return NULL;
1083 } 1084 }
1084 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1085 if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1108 * the same number. 1109 * the same number.
1109 */ 1110 */
1110 if (net_ratelimit()) 1111 if (net_ratelimit())
1111 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1112 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1112 kfree_skb(skb); 1113 kfree_skb(skb);
1113 kfree_skb(new_skb); 1114 kfree_skb(new_skb);
1114 new_skb = NULL; 1115 new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1138 if (ppp->pass_filter && 1139 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter) == 0) { 1140 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 if (ppp->debug & 1) 1141 if (ppp->debug & 1)
1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1142 netdev_printk(KERN_DEBUG, ppp->dev,
1143 "PPP: outbound frame "
1144 "not passed\n");
1142 kfree_skb(skb); 1145 kfree_skb(skb);
1143 return; 1146 return;
1144 } 1147 }
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1164 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1167 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1165 GFP_ATOMIC); 1168 GFP_ATOMIC);
1166 if (!new_skb) { 1169 if (!new_skb) {
1167 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1170 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1168 goto drop; 1171 goto drop;
1169 } 1172 }
1170 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1173 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1202 proto != PPP_LCP && proto != PPP_CCP) { 1205 proto != PPP_LCP && proto != PPP_CCP) {
1203 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1206 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1204 if (net_ratelimit()) 1207 if (net_ratelimit())
1205 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1208 netdev_err(ppp->dev,
1209 "ppp: compression required but "
1210 "down - pkt dropped.\n");
1206 goto drop; 1211 goto drop;
1207 } 1212 }
1208 skb = pad_compress_skb(ppp, skb); 1213 skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1505 noskb: 1510 noskb:
1506 spin_unlock_bh(&pch->downl); 1511 spin_unlock_bh(&pch->downl);
1507 if (ppp->debug & 1) 1512 if (ppp->debug & 1)
1508 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1513 netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1509 ++ppp->dev->stats.tx_errors; 1514 ++ppp->dev->stats.tx_errors;
1510 ++ppp->nxseq; 1515 ++ppp->nxseq;
1511 return 1; /* abandon the frame */ 1516 return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1686 /* copy to a new sk_buff with more tailroom */ 1691 /* copy to a new sk_buff with more tailroom */
1687 ns = dev_alloc_skb(skb->len + 128); 1692 ns = dev_alloc_skb(skb->len + 128);
1688 if (!ns) { 1693 if (!ns) {
1689 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1694 netdev_err(ppp->dev, "PPP: no memory "
1695 "(VJ decomp)\n");
1690 goto err; 1696 goto err;
1691 } 1697 }
1692 skb_reserve(ns, 2); 1698 skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1699 1705
1700 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1706 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1701 if (len <= 0) { 1707 if (len <= 0) {
1702 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1708 netdev_printk(KERN_DEBUG, ppp->dev,
1709 "PPP: VJ decompression error\n");
1703 goto err; 1710 goto err;
1704 } 1711 }
1705 len += 2; 1712 len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1721 goto err; 1728 goto err;
1722 1729
1723 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1730 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1724 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1731 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1725 goto err; 1732 goto err;
1726 } 1733 }
1727 proto = PPP_IP; 1734 proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1762 if (ppp->pass_filter && 1769 if (ppp->pass_filter &&
1763 sk_run_filter(skb, ppp->pass_filter) == 0) { 1770 sk_run_filter(skb, ppp->pass_filter) == 0) {
1764 if (ppp->debug & 1) 1771 if (ppp->debug & 1)
1765 printk(KERN_DEBUG "PPP: inbound frame " 1772 netdev_printk(KERN_DEBUG, ppp->dev,
1766 "not passed\n"); 1773 "PPP: inbound frame "
1774 "not passed\n");
1767 kfree_skb(skb); 1775 kfree_skb(skb);
1768 return; 1776 return;
1769 } 1777 }
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1821 1829
1822 ns = dev_alloc_skb(obuff_size); 1830 ns = dev_alloc_skb(obuff_size);
1823 if (!ns) { 1831 if (!ns) {
1824 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1832 netdev_err(ppp->dev, "ppp_decompress_frame: "
1833 "no memory\n");
1825 goto err; 1834 goto err;
1826 } 1835 }
1827 /* the decompressor still expects the A/C bytes in the hdr */ 1836 /* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
1989 u32 seq = ppp->nextseq; 1998 u32 seq = ppp->nextseq;
1990 u32 minseq = ppp->minseq; 1999 u32 minseq = ppp->minseq;
1991 struct sk_buff_head *list = &ppp->mrq; 2000 struct sk_buff_head *list = &ppp->mrq;
1992 struct sk_buff *p, *next; 2001 struct sk_buff *p, *tmp;
1993 struct sk_buff *head, *tail; 2002 struct sk_buff *head, *tail;
1994 struct sk_buff *skb = NULL; 2003 struct sk_buff *skb = NULL;
1995 int lost = 0, len = 0; 2004 int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
1998 return NULL; 2007 return NULL;
1999 head = list->next; 2008 head = list->next;
2000 tail = NULL; 2009 tail = NULL;
2001 for (p = head; p != (struct sk_buff *) list; p = next) { 2010 skb_queue_walk_safe(list, p, tmp) {
2002 next = p->next; 2011 again:
2003 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2012 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2004 /* this can't happen, anyway ignore the skb */ 2013 /* this can't happen, anyway ignore the skb */
2005 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2014 netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2006 PPP_MP_CB(p)->sequence, seq); 2015 "seq %u < %u\n",
2007 head = next; 2016 PPP_MP_CB(p)->sequence, seq);
2017 __skb_unlink(p, list);
2018 kfree_skb(p);
2008 continue; 2019 continue;
2009 } 2020 }
2010 if (PPP_MP_CB(p)->sequence != seq) { 2021 if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2016 lost = 1; 2027 lost = 1;
2017 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2028 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2018 minseq + 1: PPP_MP_CB(p)->sequence; 2029 minseq + 1: PPP_MP_CB(p)->sequence;
2019 next = p; 2030 goto again;
2020 continue;
2021 } 2031 }
2022 2032
2023 /* 2033 /*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
2042 (PPP_MP_CB(head)->BEbits & B)) { 2052 (PPP_MP_CB(head)->BEbits & B)) {
2043 if (len > ppp->mrru + 2) { 2053 if (len > ppp->mrru + 2) {
2044 ++ppp->dev->stats.rx_length_errors; 2054 ++ppp->dev->stats.rx_length_errors;
2045 printk(KERN_DEBUG "PPP: reconstructed packet" 2055 netdev_printk(KERN_DEBUG, ppp->dev,
2046 " is too long (%d)\n", len); 2056 "PPP: reconstructed packet"
2047 } else if (p == head) { 2057 " is too long (%d)\n", len);
2048 /* fragment is complete packet - reuse skb */
2049 tail = p;
2050 skb = skb_get(p);
2051 break;
2052 } else if ((skb = dev_alloc_skb(len)) == NULL) {
2053 ++ppp->dev->stats.rx_missed_errors;
2054 printk(KERN_DEBUG "PPP: no memory for "
2055 "reconstructed packet");
2056 } else { 2058 } else {
2057 tail = p; 2059 tail = p;
2058 break; 2060 break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
2065 * and we haven't found a complete valid packet yet, 2067 * and we haven't found a complete valid packet yet,
2066 * we can discard up to and including this fragment. 2068 * we can discard up to and including this fragment.
2067 */ 2069 */
2068 if (PPP_MP_CB(p)->BEbits & E) 2070 if (PPP_MP_CB(p)->BEbits & E) {
2069 head = next; 2071 struct sk_buff *tmp2;
2070 2072
2073 skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2074 __skb_unlink(p, list);
2075 kfree_skb(p);
2076 }
2077 head = skb_peek(list);
2078 if (!head)
2079 break;
2080 }
2071 ++seq; 2081 ++seq;
2072 } 2082 }
2073 2083
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
2077 signal a receive error. */ 2087 signal a receive error. */
2078 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2088 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2079 if (ppp->debug & 1) 2089 if (ppp->debug & 1)
2080 printk(KERN_DEBUG " missed pkts %u..%u\n", 2090 netdev_printk(KERN_DEBUG, ppp->dev,
2081 ppp->nextseq, 2091 " missed pkts %u..%u\n",
2082 PPP_MP_CB(head)->sequence-1); 2092 ppp->nextseq,
2093 PPP_MP_CB(head)->sequence-1);
2083 ++ppp->dev->stats.rx_dropped; 2094 ++ppp->dev->stats.rx_dropped;
2084 ppp_receive_error(ppp); 2095 ppp_receive_error(ppp);
2085 } 2096 }
2086 2097
2087 if (head != tail) 2098 skb = head;
2088 /* copy to a single skb */ 2099 if (head != tail) {
2089 for (p = head; p != tail->next; p = p->next) 2100 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2090 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2101 p = skb_queue_next(list, head);
2091 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2102 __skb_unlink(skb, list);
2092 head = tail->next; 2103 skb_queue_walk_from_safe(list, p, tmp) {
2093 } 2104 __skb_unlink(p, list);
2105 *fragpp = p;
2106 p->next = NULL;
2107 fragpp = &p->next;
2108
2109 skb->len += p->len;
2110 skb->data_len += p->len;
2111 skb->truesize += p->len;
2112
2113 if (p == tail)
2114 break;
2115 }
2116 } else {
2117 __skb_unlink(skb, list);
2118 }
2094 2119
2095 /* Discard all the skbuffs that we have copied the data out of 2120 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2096 or that we can't use. */
2097 while ((p = list->next) != head) {
2098 __skb_unlink(p, list);
2099 kfree_skb(p);
2100 } 2121 }
2101 2122
2102 return skb; 2123 return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2617 ret = register_netdev(dev); 2638 ret = register_netdev(dev);
2618 if (ret != 0) { 2639 if (ret != 0) {
2619 unit_put(&pn->units_idr, unit); 2640 unit_put(&pn->units_idr, unit);
2620 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2641 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2621 dev->name, ret); 2642 dev->name, ret);
2622 goto out2; 2643 goto out2;
2623 } 2644 }
2624 2645
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
2690 2711
2691 if (!ppp->file.dead || ppp->n_channels) { 2712 if (!ppp->file.dead || ppp->n_channels) {
2692 /* "can't happen" */ 2713 /* "can't happen" */
2693 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2714 netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2694 "n_channels=%d !\n", ppp, ppp->file.dead, 2715 "but dead=%d n_channels=%d !\n",
2695 ppp->n_channels); 2716 ppp, ppp->file.dead, ppp->n_channels);
2696 return; 2717 return;
2697 } 2718 }
2698 2719
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
2834 2855
2835 if (!pch->file.dead) { 2856 if (!pch->file.dead) {
2836 /* "can't happen" */ 2857 /* "can't happen" */
2837 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2858 pr_err("ppp: destroying undead channel %p !\n", pch);
2838 pch);
2839 return; 2859 return;
2840 } 2860 }
2841 skb_queue_purge(&pch->file.xq); 2861 skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
2847{ 2867{
2848 /* should never happen */ 2868 /* should never happen */
2849 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2869 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2850 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2870 pr_err("PPP: removing module but units remain!\n");
2851 unregister_chrdev(PPP_MAJOR, "ppp"); 2871 unregister_chrdev(PPP_MAJOR, "ppp");
2852 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2872 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2853 class_destroy(ppp_class); 2873 class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
2865 2885
2866again: 2886again:
2867 if (!idr_pre_get(p, GFP_KERNEL)) { 2887 if (!idr_pre_get(p, GFP_KERNEL)) {
2868 printk(KERN_ERR "PPP: No free memory for idr\n"); 2888 pr_err("PPP: No free memory for idr\n");
2869 return -ENOMEM; 2889 return -ENOMEM;
2870 } 2890 }
2871 2891
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bde7d61f1930..469ab0b7ce31 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -973,7 +973,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
973 if (pm) 973 if (pm)
974 pm_request_resume(&tp->pci_dev->dev); 974 pm_request_resume(&tp->pci_dev->dev);
975 netif_carrier_on(dev); 975 netif_carrier_on(dev);
976 netif_info(tp, ifup, dev, "link up\n"); 976 if (net_ratelimit())
977 netif_info(tp, ifup, dev, "link up\n");
977 } else { 978 } else {
978 netif_carrier_off(dev); 979 netif_carrier_off(dev);
979 netif_info(tp, ifdown, dev, "link down\n"); 980 netif_info(tp, ifdown, dev, "link down\n");
@@ -3189,6 +3190,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3189 if (pci_dev_run_wake(pdev)) 3190 if (pci_dev_run_wake(pdev))
3190 pm_runtime_put_noidle(&pdev->dev); 3191 pm_runtime_put_noidle(&pdev->dev);
3191 3192
3193 netif_carrier_off(dev);
3194
3192out: 3195out:
3193 return rc; 3196 return rc;
3194 3197
@@ -3757,7 +3760,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
3757 RTL_W16(IntrMitigate, 0x5151); 3760 RTL_W16(IntrMitigate, 0x5151);
3758 3761
3759 /* Work around for RxFIFO overflow. */ 3762 /* Work around for RxFIFO overflow. */
3760 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 3763 if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
3764 tp->mac_version == RTL_GIGA_MAC_VER_22) {
3761 tp->intr_event |= RxFIFOOver | PCSTimeout; 3765 tp->intr_event |= RxFIFOOver | PCSTimeout;
3762 tp->intr_event &= ~RxOverflow; 3766 tp->intr_event &= ~RxOverflow;
3763 } 3767 }
@@ -4639,12 +4643,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4639 break; 4643 break;
4640 } 4644 }
4641 4645
4642 /* Work around for rx fifo overflow */ 4646 if (unlikely(status & RxFIFOOver)) {
4643 if (unlikely(status & RxFIFOOver) && 4647 switch (tp->mac_version) {
4644 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4648 /* Work around for rx fifo overflow */
4645 netif_stop_queue(dev); 4649 case RTL_GIGA_MAC_VER_11:
4646 rtl8169_tx_timeout(dev); 4650 case RTL_GIGA_MAC_VER_22:
4647 break; 4651 case RTL_GIGA_MAC_VER_26:
4652 netif_stop_queue(dev);
4653 rtl8169_tx_timeout(dev);
4654 goto done;
4655 /* Testers needed. */
4656 case RTL_GIGA_MAC_VER_17:
4657 case RTL_GIGA_MAC_VER_19:
4658 case RTL_GIGA_MAC_VER_20:
4659 case RTL_GIGA_MAC_VER_21:
4660 case RTL_GIGA_MAC_VER_23:
4661 case RTL_GIGA_MAC_VER_24:
4662 case RTL_GIGA_MAC_VER_27:
4663 case RTL_GIGA_MAC_VER_28:
4664 /* Experimental science. Pktgen proof. */
4665 case RTL_GIGA_MAC_VER_12:
4666 case RTL_GIGA_MAC_VER_25:
4667 if (status == RxFIFOOver)
4668 goto done;
4669 break;
4670 default:
4671 break;
4672 }
4648 } 4673 }
4649 4674
4650 if (unlikely(status & SYSErr)) { 4675 if (unlikely(status & SYSErr)) {
@@ -4680,7 +4705,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4680 (status & RxFIFOOver) ? (status | RxOverflow) : status); 4705 (status & RxFIFOOver) ? (status | RxOverflow) : status);
4681 status = RTL_R16(IntrStatus); 4706 status = RTL_R16(IntrStatus);
4682 } 4707 }
4683 4708done:
4684 return IRQ_RETVAL(handled); 4709 return IRQ_RETVAL(handled);
4685} 4710}
4686 4711
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac743843..35b7bc52a2d1 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -21,6 +21,7 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
24#include "net_driver.h" 25#include "net_driver.h"
25#include "efx.h" 26#include "efx.h"
26#include "nic.h" 27#include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
307 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
308 } 309 }
309 310
311 efx_filter_rfs_expire(channel);
312
310 /* There is no race here; although napi_disable() will 313 /* There is no race here; although napi_disable() will
311 * only wait for napi_complete(), this isn't a problem 314 * only wait for napi_complete(), this isn't a problem
312 * since efx_channel_processed() will have no effect if 315 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
673 676
674 efx_for_each_channel_rx_queue(rx_queue, channel) 677 efx_for_each_channel_rx_queue(rx_queue, channel)
675 efx_fini_rx_queue(rx_queue); 678 efx_fini_rx_queue(rx_queue);
676 efx_for_each_channel_tx_queue(tx_queue, channel) 679 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
677 efx_fini_tx_queue(tx_queue); 680 efx_fini_tx_queue(tx_queue);
678 efx_fini_eventq(channel); 681 efx_fini_eventq(channel);
679 } 682 }
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
689 692
690 efx_for_each_channel_rx_queue(rx_queue, channel) 693 efx_for_each_channel_rx_queue(rx_queue, channel)
691 efx_remove_rx_queue(rx_queue); 694 efx_remove_rx_queue(rx_queue);
692 efx_for_each_channel_tx_queue(tx_queue, channel) 695 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
693 efx_remove_tx_queue(tx_queue); 696 efx_remove_tx_queue(tx_queue);
694 efx_remove_eventq(channel); 697 efx_remove_eventq(channel);
695} 698}
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
1175 return count; 1178 return count;
1176} 1179}
1177 1180
1181static int
1182efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1183{
1184#ifdef CONFIG_RFS_ACCEL
1185 int i, rc;
1186
1187 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1188 if (!efx->net_dev->rx_cpu_rmap)
1189 return -ENOMEM;
1190 for (i = 0; i < efx->n_rx_channels; i++) {
1191 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1192 xentries[i].vector);
1193 if (rc) {
1194 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1195 efx->net_dev->rx_cpu_rmap = NULL;
1196 return rc;
1197 }
1198 }
1199#endif
1200 return 0;
1201}
1202
1178/* Probe the number and type of interrupts we are able to obtain, and 1203/* Probe the number and type of interrupts we are able to obtain, and
1179 * the resulting numbers of channels and RX queues. 1204 * the resulting numbers of channels and RX queues.
1180 */ 1205 */
1181static void efx_probe_interrupts(struct efx_nic *efx) 1206static int efx_probe_interrupts(struct efx_nic *efx)
1182{ 1207{
1183 int max_channels = 1208 int max_channels =
1184 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1209 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1220 efx->n_tx_channels = efx->n_channels; 1245 efx->n_tx_channels = efx->n_channels;
1221 efx->n_rx_channels = efx->n_channels; 1246 efx->n_rx_channels = efx->n_channels;
1222 } 1247 }
1248 rc = efx_init_rx_cpu_rmap(efx, xentries);
1249 if (rc) {
1250 pci_disable_msix(efx->pci_dev);
1251 return rc;
1252 }
1223 for (i = 0; i < n_channels; i++) 1253 for (i = 0; i < n_channels; i++)
1224 efx_get_channel(efx, i)->irq = 1254 efx_get_channel(efx, i)->irq =
1225 xentries[i].vector; 1255 xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1253 efx->n_tx_channels = 1; 1283 efx->n_tx_channels = 1;
1254 efx->legacy_irq = efx->pci_dev->irq; 1284 efx->legacy_irq = efx->pci_dev->irq;
1255 } 1285 }
1286
1287 return 0;
1256} 1288}
1257 1289
1258static void efx_remove_interrupts(struct efx_nic *efx) 1290static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1271 1303
1272static void efx_set_channels(struct efx_nic *efx) 1304static void efx_set_channels(struct efx_nic *efx)
1273{ 1305{
1274 struct efx_channel *channel;
1275 struct efx_tx_queue *tx_queue;
1276
1277 efx->tx_channel_offset = 1306 efx->tx_channel_offset =
1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1307 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1279
1280 /* Channel pointers were set in efx_init_struct() but we now
1281 * need to clear them for TX queues in any RX-only channels. */
1282 efx_for_each_channel(channel, efx) {
1283 if (channel->channel - efx->tx_channel_offset >=
1284 efx->n_tx_channels) {
1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1286 tx_queue->channel = NULL;
1287 }
1288 }
1289} 1308}
1290 1309
1291static int efx_probe_nic(struct efx_nic *efx) 1310static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1302 1321
1303 /* Determine the number of channels and queues by trying to hook 1322 /* Determine the number of channels and queues by trying to hook
1304 * in MSI-X interrupts. */ 1323 * in MSI-X interrupts. */
1305 efx_probe_interrupts(efx); 1324 rc = efx_probe_interrupts(efx);
1325 if (rc)
1326 goto fail;
1306 1327
1307 if (efx->n_channels > 1) 1328 if (efx->n_channels > 1)
1308 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1329 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
1317 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1338 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1318 1339
1319 return 0; 1340 return 0;
1341
1342fail:
1343 efx->type->remove(efx);
1344 return rc;
1320} 1345}
1321 1346
1322static void efx_remove_nic(struct efx_nic *efx) 1347static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1531 efx->irq_rx_adaptive = rx_adaptive; 1556 efx->irq_rx_adaptive = rx_adaptive;
1532 efx->irq_rx_moderation = rx_ticks; 1557 efx->irq_rx_moderation = rx_ticks;
1533 efx_for_each_channel(channel, efx) { 1558 efx_for_each_channel(channel, efx) {
1534 if (efx_channel_get_rx_queue(channel)) 1559 if (efx_channel_has_rx_queue(channel))
1535 channel->irq_moderation = rx_ticks; 1560 channel->irq_moderation = rx_ticks;
1536 else if (efx_channel_get_tx_queue(channel, 0)) 1561 else if (efx_channel_has_tx_queues(channel))
1537 channel->irq_moderation = tx_ticks; 1562 channel->irq_moderation = tx_ticks;
1538 } 1563 }
1539} 1564}
@@ -1849,6 +1874,10 @@ static const struct net_device_ops efx_netdev_ops = {
1849#ifdef CONFIG_NET_POLL_CONTROLLER 1874#ifdef CONFIG_NET_POLL_CONTROLLER
1850 .ndo_poll_controller = efx_netpoll, 1875 .ndo_poll_controller = efx_netpoll,
1851#endif 1876#endif
1877 .ndo_setup_tc = efx_setup_tc,
1878#ifdef CONFIG_RFS_ACCEL
1879 .ndo_rx_flow_steer = efx_filter_rfs,
1880#endif
1852}; 1881};
1853 1882
1854static void efx_update_name(struct efx_nic *efx) 1883static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx)
1910 1939
1911 efx_for_each_channel(channel, efx) { 1940 efx_for_each_channel(channel, efx) {
1912 struct efx_tx_queue *tx_queue; 1941 struct efx_tx_queue *tx_queue;
1913 efx_for_each_channel_tx_queue(tx_queue, channel) { 1942 efx_for_each_channel_tx_queue(tx_queue, channel)
1914 tx_queue->core_txq = netdev_get_tx_queue( 1943 efx_init_tx_queue_core_txq(tx_queue);
1915 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
1916 }
1917 } 1944 }
1918 1945
1919 /* Always start with carrier off; PHY events will detect the link */ 1946 /* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2288 */ 2315 */
2289static void efx_pci_remove_main(struct efx_nic *efx) 2316static void efx_pci_remove_main(struct efx_nic *efx)
2290{ 2317{
2318#ifdef CONFIG_RFS_ACCEL
2319 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2320 efx->net_dev->rx_cpu_rmap = NULL;
2321#endif
2291 efx_nic_fini_interrupt(efx); 2322 efx_nic_fini_interrupt(efx);
2292 efx_fini_channels(efx); 2323 efx_fini_channels(efx);
2293 efx_fini_port(efx); 2324 efx_fini_port(efx);
@@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2401 int i, rc; 2432 int i, rc;
2402 2433
2403 /* Allocate and initialise a struct net_device and struct efx_nic */ 2434 /* Allocate and initialise a struct net_device and struct efx_nic */
2404 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2435 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2436 EFX_MAX_RX_QUEUES);
2405 if (!net_dev) 2437 if (!net_dev)
2406 return -ENOMEM; 2438 return -ENOMEM;
2407 net_dev->features |= (type->offload_features | NETIF_F_SG | 2439 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5212b1..cbce62b9c996 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -29,6 +29,7 @@
29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
32extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 33extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
33extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 34extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
34extern netdev_tx_t 35extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
36extern netdev_tx_t 37extern netdev_tx_t
37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
39 41
40/* RX */ 42/* RX */
41extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
74 struct efx_filter_spec *spec); 76 struct efx_filter_spec *spec);
75extern void efx_filter_clear_rx(struct efx_nic *efx, 77extern void efx_filter_clear_rx(struct efx_nic *efx,
76 enum efx_filter_priority priority); 78 enum efx_filter_priority priority);
79#ifdef CONFIG_RFS_ACCEL
80extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
81 u16 rxq_index, u32 flow_id);
82extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
83static inline void efx_filter_rfs_expire(struct efx_channel *channel)
84{
85 if (channel->rfs_filters_added >= 60 &&
86 __efx_filter_rfs_expire(channel->efx, 100))
87 channel->rfs_filters_added -= 60;
88}
89#define efx_filter_rfs_enabled() 1
90#else
91static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
92#define efx_filter_rfs_enabled() 0
93#endif
77 94
78/* Channels */ 95/* Channels */
79extern void efx_process_channel_now(struct efx_channel *channel); 96extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60d..272cfe724e1b 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -502,7 +502,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
502static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) 502static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
503{ 503{
504 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); 504 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
505 unsigned long features; 505 u32 features;
506 506
507 features = NETIF_F_TSO; 507 features = NETIF_F_TSO;
508 if (efx->type->offload_features & NETIF_F_V6_CSUM) 508 if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +519,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
519static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 519static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
520{ 520{
521 struct efx_nic *efx = netdev_priv(net_dev); 521 struct efx_nic *efx = netdev_priv(net_dev);
522 unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; 522 u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
523 523
524 if (enable) 524 if (enable)
525 net_dev->features |= features; 525 net_dev->features |= features;
@@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
631 /* Find lowest IRQ moderation across all used TX queues */ 631 /* Find lowest IRQ moderation across all used TX queues */
632 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 632 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
633 efx_for_each_channel(channel, efx) { 633 efx_for_each_channel(channel, efx) {
634 if (!efx_channel_get_tx_queue(channel, 0)) 634 if (!efx_channel_has_tx_queues(channel))
635 continue; 635 continue;
636 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 636 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
637 if (channel->channel < efx->n_rx_channels) 637 if (channel->channel < efx->n_rx_channels)
@@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
676 676
677 /* If the channel is shared only allow RX parameters to be set */ 677 /* If the channel is shared only allow RX parameters to be set */
678 efx_for_each_channel(channel, efx) { 678 efx_for_each_channel(channel, efx) {
679 if (efx_channel_get_rx_queue(channel) && 679 if (efx_channel_has_rx_queue(channel) &&
680 efx_channel_get_tx_queue(channel, 0) && 680 efx_channel_has_tx_queues(channel) &&
681 tx_usecs) { 681 tx_usecs) {
682 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 682 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
683 "Only RX coalescing may be set\n"); 683 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c41c4ce..95a980fd63d5 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/in.h> 10#include <linux/in.h>
11#include <net/ip.h>
11#include "efx.h" 12#include "efx.h"
12#include "filter.h" 13#include "filter.h"
13#include "io.h" 14#include "io.h"
@@ -27,6 +28,10 @@
27 */ 28 */
28#define FILTER_CTL_SRCH_MAX 200 29#define FILTER_CTL_SRCH_MAX 200
29 30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
30enum efx_filter_table_id { 35enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
47struct efx_filter_state { 52struct efx_filter_state {
48 spinlock_t lock; 53 spinlock_t lock;
49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 54 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
55#ifdef CONFIG_RFS_ACCEL
56 u32 *rps_flow_id;
57 unsigned rps_expire_index;
58#endif
50}; 59};
51 60
52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 61/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
325 struct efx_filter_spec *spec, u32 key, 334 struct efx_filter_spec *spec, u32 key,
326 bool for_insert, int *depth_required) 335 bool for_insert, int *depth_required)
327{ 336{
328 unsigned hash, incr, filter_idx, depth; 337 unsigned hash, incr, filter_idx, depth, depth_max;
329 struct efx_filter_spec *cmp; 338 struct efx_filter_spec *cmp;
330 339
331 hash = efx_filter_hash(key); 340 hash = efx_filter_hash(key);
332 incr = efx_filter_increment(key); 341 incr = efx_filter_increment(key);
342 depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
343 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
333 344
334 for (depth = 1, filter_idx = hash & (table->size - 1); 345 for (depth = 1, filter_idx = hash & (table->size - 1);
335 depth <= FILTER_CTL_SRCH_MAX && 346 depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
336 test_bit(filter_idx, table->used_bitmap);
337 ++depth) { 347 ++depth) {
338 cmp = &table->spec[filter_idx]; 348 cmp = &table->spec[filter_idx];
339 if (efx_filter_equal(spec, cmp)) 349 if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
342 } 352 }
343 if (!for_insert) 353 if (!for_insert)
344 return -ENOENT; 354 return -ENOENT;
345 if (depth > FILTER_CTL_SRCH_MAX) 355 if (depth > depth_max)
346 return -EBUSY; 356 return -EBUSY;
347found: 357found:
348 *depth_required = depth; 358 *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
562 spin_lock_init(&state->lock); 572 spin_lock_init(&state->lock);
563 573
564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 574 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
575#ifdef CONFIG_RFS_ACCEL
576 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
577 sizeof(*state->rps_flow_id),
578 GFP_KERNEL);
579 if (!state->rps_flow_id)
580 goto fail;
581#endif
565 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 582 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP; 583 table->id = EFX_FILTER_TABLE_RX_IP;
567 table->offset = FR_BZ_RX_FILTER_TBL0; 584 table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
607 kfree(state->table[table_id].used_bitmap); 624 kfree(state->table[table_id].used_bitmap);
608 vfree(state->table[table_id].spec); 625 vfree(state->table[table_id].spec);
609 } 626 }
627#ifdef CONFIG_RFS_ACCEL
628 kfree(state->rps_flow_id);
629#endif
610 kfree(state); 630 kfree(state);
611} 631}
632
633#ifdef CONFIG_RFS_ACCEL
634
635int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
636 u16 rxq_index, u32 flow_id)
637{
638 struct efx_nic *efx = netdev_priv(net_dev);
639 struct efx_channel *channel;
640 struct efx_filter_state *state = efx->filter_state;
641 struct efx_filter_spec spec;
642 const struct iphdr *ip;
643 const __be16 *ports;
644 int nhoff;
645 int rc;
646
647 nhoff = skb_network_offset(skb);
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return -EPROTONOSUPPORT;
651
652 /* RFS must validate the IP header length before calling us */
653 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
654 ip = (const struct iphdr *)(skb->data + nhoff);
655 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
656 return -EPROTONOSUPPORT;
657 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
658 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
659
660 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
661 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
662 ip->daddr, ports[1], ip->saddr, ports[0]);
663 if (rc)
664 return rc;
665
666 rc = efx_filter_insert_filter(efx, &spec, true);
667 if (rc < 0)
668 return rc;
669
670 /* Remember this so we can check whether to expire the filter later */
671 state->rps_flow_id[rc] = flow_id;
672 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
673 ++channel->rfs_filters_added;
674
675 netif_info(efx, rx_status, efx->net_dev,
676 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
677 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
678 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
679 rxq_index, flow_id, rc);
680
681 return rc;
682}
683
684bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
685{
686 struct efx_filter_state *state = efx->filter_state;
687 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
688 unsigned mask = table->size - 1;
689 unsigned index;
690 unsigned stop;
691
692 if (!spin_trylock_bh(&state->lock))
693 return false;
694
695 index = state->rps_expire_index;
696 stop = (index + quota) & mask;
697
698 while (index != stop) {
699 if (test_bit(index, table->used_bitmap) &&
700 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
701 rps_may_expire_flow(efx->net_dev,
702 table->spec[index].dmaq_id,
703 state->rps_flow_id[index], index)) {
704 netif_info(efx, rx_status, efx->net_dev,
705 "expiring filter %d [flow %u]\n",
706 index, state->rps_flow_id[index]);
707 efx_filter_table_clear_entry(efx, table, index);
708 }
709 index = (index + 1) & mask;
710 }
711
712 state->rps_expire_index = stop;
713 if (table->used == 0)
714 efx_filter_table_reset_search_depth(table);
715
716 spin_unlock_bh(&state->lock);
717 return true;
718}
719
720#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df8665256a..15b9068e5b87 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -63,10 +63,12 @@
63/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
64 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
65 * queues. */ 65 * queues. */
66#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
67#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
68#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
140 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
141 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
142 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
143 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
144 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
145 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
182 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
183 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
184 unsigned int ptr_mask; 187 unsigned int ptr_mask;
188 bool initialised;
185 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
186 190
187 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
@@ -358,6 +362,9 @@ struct efx_channel {
358 362
359 unsigned int irq_count; 363 unsigned int irq_count;
360 unsigned int irq_mod_score; 364 unsigned int irq_mod_score;
365#ifdef CONFIG_RFS_ACCEL
366 unsigned int rfs_filters_added;
367#endif
361 368
362 int rx_alloc_level; 369 int rx_alloc_level;
363 int rx_alloc_push_pages; 370 int rx_alloc_push_pages;
@@ -377,7 +384,7 @@ struct efx_channel {
377 bool rx_pkt_csummed; 384 bool rx_pkt_csummed;
378 385
379 struct efx_rx_queue rx_queue; 386 struct efx_rx_queue rx_queue;
380 struct efx_tx_queue tx_queue[2]; 387 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
381}; 388};
382 389
383enum efx_led_mode { 390enum efx_led_mode {
@@ -906,7 +913,7 @@ struct efx_nic_type {
906 unsigned int phys_addr_channels; 913 unsigned int phys_addr_channels;
907 unsigned int tx_dc_base; 914 unsigned int tx_dc_base;
908 unsigned int rx_dc_base; 915 unsigned int rx_dc_base;
909 unsigned long offload_features; 916 u32 offload_features;
910 u32 reset_world_flags; 917 u32 reset_world_flags;
911}; 918};
912 919
@@ -938,18 +945,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 945 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939} 946}
940 947
948static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
949{
950 return channel->channel - channel->efx->tx_channel_offset <
951 channel->efx->n_tx_channels;
952}
953
941static inline struct efx_tx_queue * 954static inline struct efx_tx_queue *
942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 955efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
943{ 956{
944 struct efx_tx_queue *tx_queue = channel->tx_queue; 957 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
945 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); 958 type >= EFX_TXQ_TYPES);
946 return tx_queue->channel ? tx_queue + type : NULL; 959 return &channel->tx_queue[type];
960}
961
962static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
963{
964 return !(tx_queue->efx->net_dev->num_tc < 2 &&
965 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
947} 966}
948 967
949/* Iterate over all TX queues belonging to a channel */ 968/* Iterate over all TX queues belonging to a channel */
950#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 969#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
951 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ 970 if (!efx_channel_has_tx_queues(_channel)) \
952 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 971 ; \
972 else \
973 for (_tx_queue = (_channel)->tx_queue; \
974 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
975 efx_tx_queue_used(_tx_queue); \
976 _tx_queue++)
977
978/* Iterate over all possible TX queues belonging to a channel */
979#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
980 for (_tx_queue = (_channel)->tx_queue; \
981 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
953 _tx_queue++) 982 _tx_queue++)
954 983
955static inline struct efx_rx_queue * 984static inline struct efx_rx_queue *
@@ -959,18 +988,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
959 return &efx->channel[index]->rx_queue; 988 return &efx->channel[index]->rx_queue;
960} 989}
961 990
991static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
992{
993 return channel->channel < channel->efx->n_rx_channels;
994}
995
962static inline struct efx_rx_queue * 996static inline struct efx_rx_queue *
963efx_channel_get_rx_queue(struct efx_channel *channel) 997efx_channel_get_rx_queue(struct efx_channel *channel)
964{ 998{
965 return channel->channel < channel->efx->n_rx_channels ? 999 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
966 &channel->rx_queue : NULL; 1000 return &channel->rx_queue;
967} 1001}
968 1002
969/* Iterate over all RX queues belonging to a channel */ 1003/* Iterate over all RX queues belonging to a channel */
970#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 1004#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
971 for (_rx_queue = efx_channel_get_rx_queue(channel); \ 1005 if (!efx_channel_has_rx_queue(_channel)) \
972 _rx_queue; \ 1006 ; \
973 _rx_queue = NULL) 1007 else \
1008 for (_rx_queue = &(_channel)->rx_queue; \
1009 _rx_queue; \
1010 _rx_queue = NULL)
974 1011
975static inline struct efx_channel * 1012static inline struct efx_channel *
976efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 1013efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab68..1d0b8b6f25c4 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 445
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 446void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 447{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 448 struct efx_nic *efx = tx_queue->efx;
449 efx_oword_t reg;
450 450
451 tx_queue->flushed = FLUSH_NONE; 451 tx_queue->flushed = FLUSH_NONE;
452 452
@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 454 efx_init_special_buffer(efx, &tx_queue->txd);
455 455
456 /* Push TX descriptor ring to card */ 456 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 457 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 458 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 459 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 460 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 470
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 473 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 474 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 475 !csum);
476 } 476 }
477 477
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 478 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 479 tx_queue->queue);
480 480
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 482 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 483 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 484
@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 489 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 490 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 491 }
492
493 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
494 EFX_POPULATE_OWORD_1(reg,
495 FRF_BZ_TX_PACE,
496 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
497 FFE_BZ_TX_PACE_OFF :
498 FFE_BZ_TX_PACE_RESERVED);
499 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
500 tx_queue->queue);
501 }
494} 502}
495 503
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 504static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1246
1239 /* Flush all tx queues in parallel */ 1247 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1248 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1249 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1250 if (tx_queue->initialised)
1251 efx_flush_tx_queue(tx_queue);
1252 }
1243 } 1253 }
1244 1254
1245 /* The hardware supports four concurrent rx flushes, each of which may 1255 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1272 ++rx_pending;
1263 } 1273 }
1264 } 1274 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1275 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1276 if (tx_queue->initialised &&
1277 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1278 ++tx_pending;
1268 } 1279 }
1269 } 1280 }
@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1289 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1290 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1291 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1292 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1293 if (tx_queue->initialised &&
1294 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1295 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1296 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1297 tx_queue->queue);
@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1694 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1695 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1696 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1697
1698 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1699 EFX_POPULATE_OWORD_4(temp,
1700 /* Default values */
1701 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1702 FRF_BZ_TX_PACE_SB_AF, 0xb,
1703 FRF_BZ_TX_PACE_FB_BASE, 0,
1704 /* Allow large pace values in the
1705 * fast bin. */
1706 FRF_BZ_TX_PACE_BIN_TH,
1707 FFE_BZ_TX_PACE_RESERVED);
1708 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1709 }
1685} 1710}
1686 1711
1687/* Register dump */ 1712/* Register dump */
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c36..8227de62014f 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2907,6 +2907,12 @@
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909 2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2910/* DRIVER_EV */ 2916/* DRIVER_EV */
2911/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f1299..f936892aa423 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
644 goto out; 644 goto out;
645 } 645 }
646 646
647 /* Test both types of TX queue */ 647 /* Test all enabled types of TX queue */
648 efx_for_each_channel_tx_queue(tx_queue, channel) { 648 efx_for_each_channel_tx_queue(tx_queue, channel) {
649 state->offload_csum = (tx_queue->queue & 649 state->offload_csum = (tx_queue->queue &
650 EFX_TXQ_TYPE_OFFLOAD); 650 EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da657bf..1a51653bb92b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
356{
357 struct efx_nic *efx = tx_queue->efx;
358
359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
360 tx_queue->core_txq =
361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
422}
423
350void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
351{ 425{
352 unsigned fill_level; 426 unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
430 504
431 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
432 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
433} 509}
434 510
435void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
452 528
453void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
454{ 530{
531 if (!tx_queue->initialised)
532 return;
533
455 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
456 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
457 536
537 tx_queue->initialised = false;
538
458 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
459 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
460 541
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
466 547
467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
468{ 549{
550 if (!tx_queue->buffer)
551 return;
552
469 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
470 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
471 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2ab..095e52580884 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,10 +32,17 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ethtool.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36 37
37#include "sh_eth.h" 38#include "sh_eth.h"
38 39
40#define SH_ETH_DEF_MSG_ENABLE \
41 (NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_RX_ERR| \
44 NETIF_MSG_TX_ERR)
45
39/* There is CPU dependent code */ 46/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724) 47#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT 1 48#define SH_ETH_RESET_DEFAULT 1
@@ -817,6 +824,20 @@ static int sh_eth_rx(struct net_device *ndev)
817 return 0; 824 return 0;
818} 825}
819 826
827static void sh_eth_rcv_snd_disable(u32 ioaddr)
828{
829 /* disable tx and rx */
830 writel(readl(ioaddr + ECMR) &
831 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
832}
833
834static void sh_eth_rcv_snd_enable(u32 ioaddr)
835{
836 /* enable tx and rx */
837 writel(readl(ioaddr + ECMR) |
838 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
839}
840
820/* error control function */ 841/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status) 842static void sh_eth_error(struct net_device *ndev, int intr_status)
822{ 843{
@@ -843,11 +864,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
843 if (mdp->ether_link_active_low) 864 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 865 link_stat = ~link_stat;
845 } 866 }
846 if (!(link_stat & PHY_ST_LINK)) { 867 if (!(link_stat & PHY_ST_LINK))
847 /* Link Down : disable tx and rx */ 868 sh_eth_rcv_snd_disable(ioaddr);
848 writel(readl(ioaddr + ECMR) & 869 else {
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else {
851 /* Link Up */ 870 /* Link Up */
852 writel(readl(ioaddr + EESIPR) & 871 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 872 ~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
857 writel(readl(ioaddr + EESIPR) | 876 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 877 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 878 /* enable tx and rx */
860 writel(readl(ioaddr + ECMR) | 879 sh_eth_rcv_snd_enable(ioaddr);
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 880 }
863 } 881 }
864 } 882 }
@@ -867,6 +885,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
867 /* Write buck end. unused write back interrupt */ 885 /* Write buck end. unused write back interrupt */
868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 886 if (intr_status & EESR_TABT) /* Transmit Abort int */
869 mdp->stats.tx_aborted_errors++; 887 mdp->stats.tx_aborted_errors++;
888 if (netif_msg_tx_err(mdp))
889 dev_err(&ndev->dev, "Transmit Abort\n");
870 } 890 }
871 891
872 if (intr_status & EESR_RABT) { 892 if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
874 if (intr_status & EESR_RFRMER) { 894 if (intr_status & EESR_RFRMER) {
875 /* Receive Frame Overflow int */ 895 /* Receive Frame Overflow int */
876 mdp->stats.rx_frame_errors++; 896 mdp->stats.rx_frame_errors++;
877 dev_err(&ndev->dev, "Receive Frame Overflow\n"); 897 if (netif_msg_rx_err(mdp))
898 dev_err(&ndev->dev, "Receive Abort\n");
878 } 899 }
879 } 900 }
880 901
881 if (!mdp->cd->no_ade) { 902 if (intr_status & EESR_TDE) {
882 if (intr_status & EESR_ADE && intr_status & EESR_TDE && 903 /* Transmit Descriptor Empty int */
883 intr_status & EESR_TFE) 904 mdp->stats.tx_fifo_errors++;
884 mdp->stats.tx_fifo_errors++; 905 if (netif_msg_tx_err(mdp))
906 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
907 }
908
909 if (intr_status & EESR_TFE) {
910 /* FIFO under flow */
911 mdp->stats.tx_fifo_errors++;
912 if (netif_msg_tx_err(mdp))
913 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
885 } 914 }
886 915
887 if (intr_status & EESR_RDE) { 916 if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
890 919
891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 920 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 writel(EDRRR_R, ioaddr + EDRRR); 921 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 922 if (netif_msg_rx_err(mdp))
923 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 924 }
925
895 if (intr_status & EESR_RFE) { 926 if (intr_status & EESR_RFE) {
896 /* Receive FIFO Overflow int */ 927 /* Receive FIFO Overflow int */
897 mdp->stats.rx_fifo_errors++; 928 mdp->stats.rx_fifo_errors++;
898 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 929 if (netif_msg_rx_err(mdp))
930 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
931 }
932
933 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
934 /* Address Error */
935 mdp->stats.tx_fifo_errors++;
936 if (netif_msg_tx_err(mdp))
937 dev_err(&ndev->dev, "Address Error\n");
899 } 938 }
900 939
901 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 940 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1012 mdp->duplex = -1; 1051 mdp->duplex = -1;
1013 } 1052 }
1014 1053
1015 if (new_state) 1054 if (new_state && netif_msg_link(mdp))
1016 phy_print_status(phydev); 1055 phy_print_status(phydev);
1017} 1056}
1018 1057
@@ -1063,6 +1102,132 @@ static int sh_eth_phy_start(struct net_device *ndev)
1063 return 0; 1102 return 0;
1064} 1103}
1065 1104
1105static int sh_eth_get_settings(struct net_device *ndev,
1106 struct ethtool_cmd *ecmd)
1107{
1108 struct sh_eth_private *mdp = netdev_priv(ndev);
1109 unsigned long flags;
1110 int ret;
1111
1112 spin_lock_irqsave(&mdp->lock, flags);
1113 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1114 spin_unlock_irqrestore(&mdp->lock, flags);
1115
1116 return ret;
1117}
1118
1119static int sh_eth_set_settings(struct net_device *ndev,
1120 struct ethtool_cmd *ecmd)
1121{
1122 struct sh_eth_private *mdp = netdev_priv(ndev);
1123 unsigned long flags;
1124 int ret;
1125 u32 ioaddr = ndev->base_addr;
1126
1127 spin_lock_irqsave(&mdp->lock, flags);
1128
1129 /* disable tx and rx */
1130 sh_eth_rcv_snd_disable(ioaddr);
1131
1132 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1133 if (ret)
1134 goto error_exit;
1135
1136 if (ecmd->duplex == DUPLEX_FULL)
1137 mdp->duplex = 1;
1138 else
1139 mdp->duplex = 0;
1140
1141 if (mdp->cd->set_duplex)
1142 mdp->cd->set_duplex(ndev);
1143
1144error_exit:
1145 mdelay(1);
1146
1147 /* enable tx and rx */
1148 sh_eth_rcv_snd_enable(ioaddr);
1149
1150 spin_unlock_irqrestore(&mdp->lock, flags);
1151
1152 return ret;
1153}
1154
1155static int sh_eth_nway_reset(struct net_device *ndev)
1156{
1157 struct sh_eth_private *mdp = netdev_priv(ndev);
1158 unsigned long flags;
1159 int ret;
1160
1161 spin_lock_irqsave(&mdp->lock, flags);
1162 ret = phy_start_aneg(mdp->phydev);
1163 spin_unlock_irqrestore(&mdp->lock, flags);
1164
1165 return ret;
1166}
1167
1168static u32 sh_eth_get_msglevel(struct net_device *ndev)
1169{
1170 struct sh_eth_private *mdp = netdev_priv(ndev);
1171 return mdp->msg_enable;
1172}
1173
1174static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1175{
1176 struct sh_eth_private *mdp = netdev_priv(ndev);
1177 mdp->msg_enable = value;
1178}
1179
1180static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1181 "rx_current", "tx_current",
1182 "rx_dirty", "tx_dirty",
1183};
1184#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1185
1186static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1187{
1188 switch (sset) {
1189 case ETH_SS_STATS:
1190 return SH_ETH_STATS_LEN;
1191 default:
1192 return -EOPNOTSUPP;
1193 }
1194}
1195
1196static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1197 struct ethtool_stats *stats, u64 *data)
1198{
1199 struct sh_eth_private *mdp = netdev_priv(ndev);
1200 int i = 0;
1201
1202 /* device-specific stats */
1203 data[i++] = mdp->cur_rx;
1204 data[i++] = mdp->cur_tx;
1205 data[i++] = mdp->dirty_rx;
1206 data[i++] = mdp->dirty_tx;
1207}
1208
1209static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1210{
1211 switch (stringset) {
1212 case ETH_SS_STATS:
1213 memcpy(data, *sh_eth_gstrings_stats,
1214 sizeof(sh_eth_gstrings_stats));
1215 break;
1216 }
1217}
1218
1219static struct ethtool_ops sh_eth_ethtool_ops = {
1220 .get_settings = sh_eth_get_settings,
1221 .set_settings = sh_eth_set_settings,
1222 .nway_reset = sh_eth_nway_reset,
1223 .get_msglevel = sh_eth_get_msglevel,
1224 .set_msglevel = sh_eth_set_msglevel,
1225 .get_link = ethtool_op_get_link,
1226 .get_strings = sh_eth_get_strings,
1227 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1228 .get_sset_count = sh_eth_get_sset_count,
1229};
1230
1066/* network device open function */ 1231/* network device open function */
1067static int sh_eth_open(struct net_device *ndev) 1232static int sh_eth_open(struct net_device *ndev)
1068{ 1233{
@@ -1073,8 +1238,8 @@ static int sh_eth_open(struct net_device *ndev)
1073 1238
1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1239 ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1240#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1241 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757) 1242 defined(CONFIG_CPU_SUBTYPE_SH7757)
1078 IRQF_SHARED, 1243 IRQF_SHARED,
1079#else 1244#else
1080 0, 1245 0,
@@ -1123,8 +1288,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1123 1288
1124 netif_stop_queue(ndev); 1289 netif_stop_queue(ndev);
1125 1290
1126 /* worning message out. */ 1291 if (netif_msg_timer(mdp))
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1292 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1293 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1294
1130 /* tx_errors count up */ 1295 /* tx_errors count up */
@@ -1167,6 +1332,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167 spin_lock_irqsave(&mdp->lock, flags); 1332 spin_lock_irqsave(&mdp->lock, flags);
1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1333 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169 if (!sh_eth_txfree(ndev)) { 1334 if (!sh_eth_txfree(ndev)) {
1335 if (netif_msg_tx_queued(mdp))
1336 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1170 netif_stop_queue(ndev); 1337 netif_stop_queue(ndev);
1171 spin_unlock_irqrestore(&mdp->lock, flags); 1338 spin_unlock_irqrestore(&mdp->lock, flags);
1172 return NETDEV_TX_BUSY; 1339 return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1497 1664
1498 /* set function */ 1665 /* set function */
1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1666 ndev->netdev_ops = &sh_eth_netdev_ops;
1667 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1500 ndev->watchdog_timeo = TX_TIMEOUT; 1668 ndev->watchdog_timeo = TX_TIMEOUT;
1501 1669
1670 /* debug message level */
1671 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1502 mdp->post_rx = POST_RX >> (devno << 1); 1672 mdp->post_rx = POST_RX >> (devno << 1);
1503 mdp->post_fw = POST_FW >> (devno << 1); 1673 mdp->post_fw = POST_FW >> (devno << 1);
1504 1674
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d51df1..640e368ebeee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n", 1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1778 net_dev->name, sis_priv->cur_rx, 1778 net_dev->name, sis_priv->cur_rx,
1779 sis_priv->dirty_rx); 1779 sis_priv->dirty_rx);
1780 dev_kfree_skb(skb);
1780 break; 1781 break;
1781 } 1782 }
1782 1783
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee17..43654a3bb0ec 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
81#include <linux/ethtool.h> 81#include <linux/ethtool.h>
82#include <linux/mii.h> 82#include <linux/mii.h>
83#include <linux/workqueue.h> 83#include <linux/workqueue.h>
84#include <linux/of.h>
84 85
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/etherdevice.h> 87#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
2394 return 0; 2395 return 0;
2395} 2396}
2396 2397
2398#ifdef CONFIG_OF
2399static const struct of_device_id smc91x_match[] = {
2400 { .compatible = "smsc,lan91c94", },
2401 { .compatible = "smsc,lan91c111", },
2402 {},
2403}
2404MODULE_DEVICE_TABLE(of, smc91x_match);
2405#endif
2406
2397static struct dev_pm_ops smc_drv_pm_ops = { 2407static struct dev_pm_ops smc_drv_pm_ops = {
2398 .suspend = smc_drv_suspend, 2408 .suspend = smc_drv_suspend,
2399 .resume = smc_drv_resume, 2409 .resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
2406 .name = CARDNAME, 2416 .name = CARDNAME,
2407 .owner = THIS_MODULE, 2417 .owner = THIS_MODULE,
2408 .pm = &smc_drv_pm_ops, 2418 .pm = &smc_drv_pm_ops,
2419#ifdef CONFIG_OF
2420 .of_match_table = smc91x_match,
2421#endif
2409 }, 2422 },
2410}; 2423};
2411 2424
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f9..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1560 1560
1561 priv->hw = device; 1561 priv->hw = device;
1562 1562
1563 if (device_can_wakeup(priv->device)) 1563 if (device_can_wakeup(priv->device)) {
1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1565 enable_irq_wake(dev->irq);
1566 }
1565 1567
1566 return 0; 1568 return 0;
1567} 1569}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f83937..c1a344829b54 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
320 320
321 if (txmac_stat & MAC_TXSTAT_URUN) { 321 if (txmac_stat & MAC_TXSTAT_URUN) {
322 netdev_err(dev, "TX MAC xmit underrun\n"); 322 netdev_err(dev, "TX MAC xmit underrun\n");
323 gp->net_stats.tx_fifo_errors++; 323 dev->stats.tx_fifo_errors++;
324 } 324 }
325 325
326 if (txmac_stat & MAC_TXSTAT_MPE) { 326 if (txmac_stat & MAC_TXSTAT_MPE) {
327 netdev_err(dev, "TX MAC max packet size error\n"); 327 netdev_err(dev, "TX MAC max packet size error\n");
328 gp->net_stats.tx_errors++; 328 dev->stats.tx_errors++;
329 } 329 }
330 330
331 /* The rest are all cases of one of the 16-bit TX 331 /* The rest are all cases of one of the 16-bit TX
332 * counters expiring. 332 * counters expiring.
333 */ 333 */
334 if (txmac_stat & MAC_TXSTAT_NCE) 334 if (txmac_stat & MAC_TXSTAT_NCE)
335 gp->net_stats.collisions += 0x10000; 335 dev->stats.collisions += 0x10000;
336 336
337 if (txmac_stat & MAC_TXSTAT_ECE) { 337 if (txmac_stat & MAC_TXSTAT_ECE) {
338 gp->net_stats.tx_aborted_errors += 0x10000; 338 dev->stats.tx_aborted_errors += 0x10000;
339 gp->net_stats.collisions += 0x10000; 339 dev->stats.collisions += 0x10000;
340 } 340 }
341 341
342 if (txmac_stat & MAC_TXSTAT_LCE) { 342 if (txmac_stat & MAC_TXSTAT_LCE) {
343 gp->net_stats.tx_aborted_errors += 0x10000; 343 dev->stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000; 344 dev->stats.collisions += 0x10000;
345 } 345 }
346 346
347 /* We do not keep track of MAC_TXSTAT_FCE and 347 /* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
469 u32 smac = readl(gp->regs + MAC_SMACHINE); 469 u32 smac = readl(gp->regs + MAC_SMACHINE);
470 470
471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
472 gp->net_stats.rx_over_errors++; 472 dev->stats.rx_over_errors++;
473 gp->net_stats.rx_fifo_errors++; 473 dev->stats.rx_fifo_errors++;
474 474
475 ret = gem_rxmac_reset(gp); 475 ret = gem_rxmac_reset(gp);
476 } 476 }
477 477
478 if (rxmac_stat & MAC_RXSTAT_ACE) 478 if (rxmac_stat & MAC_RXSTAT_ACE)
479 gp->net_stats.rx_frame_errors += 0x10000; 479 dev->stats.rx_frame_errors += 0x10000;
480 480
481 if (rxmac_stat & MAC_RXSTAT_CCE) 481 if (rxmac_stat & MAC_RXSTAT_CCE)
482 gp->net_stats.rx_crc_errors += 0x10000; 482 dev->stats.rx_crc_errors += 0x10000;
483 483
484 if (rxmac_stat & MAC_RXSTAT_LCE) 484 if (rxmac_stat & MAC_RXSTAT_LCE)
485 gp->net_stats.rx_length_errors += 0x10000; 485 dev->stats.rx_length_errors += 0x10000;
486 486
487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
488 * events. 488 * events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
594 if (netif_msg_rx_err(gp)) 594 if (netif_msg_rx_err(gp))
595 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 595 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
596 gp->dev->name); 596 gp->dev->name);
597 gp->net_stats.rx_dropped++; 597 dev->stats.rx_dropped++;
598 } 598 }
599 599
600 if (gem_status & GREG_STAT_RXTAGERR) { 600 if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
602 if (netif_msg_rx_err(gp)) 602 if (netif_msg_rx_err(gp))
603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
604 gp->dev->name); 604 gp->dev->name);
605 gp->net_stats.rx_errors++; 605 dev->stats.rx_errors++;
606 606
607 goto do_reset; 607 goto do_reset;
608 } 608 }
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
684 break; 684 break;
685 } 685 }
686 gp->tx_skbs[entry] = NULL; 686 gp->tx_skbs[entry] = NULL;
687 gp->net_stats.tx_bytes += skb->len; 687 dev->stats.tx_bytes += skb->len;
688 688
689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
690 txd = &gp->init_block->txd[entry]; 690 txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
696 entry = NEXT_TX(entry); 696 entry = NEXT_TX(entry);
697 } 697 }
698 698
699 gp->net_stats.tx_packets++; 699 dev->stats.tx_packets++;
700 dev_kfree_skb_irq(skb); 700 dev_kfree_skb_irq(skb);
701 } 701 }
702 gp->tx_old = entry; 702 gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
738 738
739static int gem_rx(struct gem *gp, int work_to_do) 739static int gem_rx(struct gem *gp, int work_to_do)
740{ 740{
741 struct net_device *dev = gp->dev;
741 int entry, drops, work_done = 0; 742 int entry, drops, work_done = 0;
742 u32 done; 743 u32 done;
743 __sum16 csum; 744 __sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
782 783
783 len = (status & RXDCTRL_BUFSZ) >> 16; 784 len = (status & RXDCTRL_BUFSZ) >> 16;
784 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 785 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
785 gp->net_stats.rx_errors++; 786 dev->stats.rx_errors++;
786 if (len < ETH_ZLEN) 787 if (len < ETH_ZLEN)
787 gp->net_stats.rx_length_errors++; 788 dev->stats.rx_length_errors++;
788 if (len & RXDCTRL_BAD) 789 if (len & RXDCTRL_BAD)
789 gp->net_stats.rx_crc_errors++; 790 dev->stats.rx_crc_errors++;
790 791
791 /* We'll just return it to GEM. */ 792 /* We'll just return it to GEM. */
792 drop_it: 793 drop_it:
793 gp->net_stats.rx_dropped++; 794 dev->stats.rx_dropped++;
794 goto next; 795 goto next;
795 } 796 }
796 797
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
843 844
844 netif_receive_skb(skb); 845 netif_receive_skb(skb);
845 846
846 gp->net_stats.rx_packets++; 847 dev->stats.rx_packets++;
847 gp->net_stats.rx_bytes += len; 848 dev->stats.rx_bytes += len;
848 849
849 next: 850 next:
850 entry = NEXT_RX(entry); 851 entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
2472static struct net_device_stats *gem_get_stats(struct net_device *dev) 2473static struct net_device_stats *gem_get_stats(struct net_device *dev)
2473{ 2474{
2474 struct gem *gp = netdev_priv(dev); 2475 struct gem *gp = netdev_priv(dev);
2475 struct net_device_stats *stats = &gp->net_stats;
2476 2476
2477 spin_lock_irq(&gp->lock); 2477 spin_lock_irq(&gp->lock);
2478 spin_lock(&gp->tx_lock); 2478 spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2481 * so we shield against this 2481 * so we shield against this
2482 */ 2482 */
2483 if (gp->running) { 2483 if (gp->running) {
2484 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2484 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2485 writel(0, gp->regs + MAC_FCSERR); 2485 writel(0, gp->regs + MAC_FCSERR);
2486 2486
2487 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2487 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
2488 writel(0, gp->regs + MAC_AERR); 2488 writel(0, gp->regs + MAC_AERR);
2489 2489
2490 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2490 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
2491 writel(0, gp->regs + MAC_LERR); 2491 writel(0, gp->regs + MAC_LERR);
2492 2492
2493 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2493 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2494 stats->collisions += 2494 dev->stats.collisions +=
2495 (readl(gp->regs + MAC_ECOLL) + 2495 (readl(gp->regs + MAC_ECOLL) +
2496 readl(gp->regs + MAC_LCOLL)); 2496 readl(gp->regs + MAC_LCOLL));
2497 writel(0, gp->regs + MAC_ECOLL); 2497 writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2501 spin_unlock(&gp->tx_lock); 2501 spin_unlock(&gp->tx_lock);
2502 spin_unlock_irq(&gp->lock); 2502 spin_unlock_irq(&gp->lock);
2503 2503
2504 return &gp->net_stats; 2504 return &dev->stats;
2505} 2505}
2506 2506
2507static int gem_set_mac_address(struct net_device *dev, void *addr) 2507static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def6..ede017872367 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -994,7 +994,6 @@ struct gem {
994 u32 status; 994 u32 status;
995 995
996 struct napi_struct napi; 996 struct napi_struct napi;
997 struct net_device_stats net_stats;
998 997
999 int tx_fifo_sz; 998 int tx_fifo_sz;
1000 int rx_fifo_sz; 999 int rx_fifo_sz;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f69998..6be418591df9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation. 7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -60,20 +60,14 @@
60#define BAR_0 0 60#define BAR_0 0
61#define BAR_2 2 61#define BAR_2 2
62 62
63#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
64#define TG3_VLAN_TAG_USED 1
65#else
66#define TG3_VLAN_TAG_USED 0
67#endif
68
69#include "tg3.h" 63#include "tg3.h"
70 64
71#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
72#define TG3_MAJ_NUM 3 66#define TG3_MAJ_NUM 3
73#define TG3_MIN_NUM 116 67#define TG3_MIN_NUM 117
74#define DRV_MODULE_VERSION \ 68#define DRV_MODULE_VERSION \
75 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
76#define DRV_MODULE_RELDATE "December 3, 2010" 70#define DRV_MODULE_RELDATE "January 25, 2011"
77 71
78#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
79#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -134,9 +128,6 @@
134 TG3_TX_RING_SIZE) 128 TG3_TX_RING_SIZE)
135#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
136 130
137#define TG3_RX_DMA_ALIGN 16
138#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
139
140#define TG3_DMA_BYTE_ENAB 64 131#define TG3_DMA_BYTE_ENAB 64
141 132
142#define TG3_RX_STD_DMA_SZ 1536 133#define TG3_RX_STD_DMA_SZ 1536
@@ -1785,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1785 tg3_phy_cl45_read(tp, MDIO_MMD_AN, 1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1786 TG3_CL45_D7_EEERES_STAT, &val); 1777 TG3_CL45_D7_EEERES_STAT, &val);
1787 1778
1788 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1779 switch (val) {
1789 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1780 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1781 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1782 case ASIC_REV_5717:
1783 case ASIC_REV_5719:
1784 case ASIC_REV_57765:
1785 /* Enable SM_DSP clock and tx 6dB coding. */
1786 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1787 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1788 MII_TG3_AUXCTL_ACTL_TX_6DB;
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1790
1791 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792
1793 /* Turn off SM_DSP clock. */
1794 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795 MII_TG3_AUXCTL_ACTL_TX_6DB;
1796 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797 }
1798 /* Fallthrough */
1799 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1790 tp->setlpicnt = 2; 1800 tp->setlpicnt = 2;
1801 }
1791 } 1802 }
1792 1803
1793 if (!tp->setlpicnt) { 1804 if (!tp->setlpicnt) {
@@ -2977,11 +2988,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2977 MII_TG3_AUXCTL_ACTL_TX_6DB; 2988 MII_TG3_AUXCTL_ACTL_TX_6DB;
2978 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 2989 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2979 2990
2980 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2991 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 2992 case ASIC_REV_5717:
2982 !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 2993 case ASIC_REV_57765:
2983 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2994 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2984 val | MII_TG3_DSP_CH34TP2_HIBW01); 2995 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2996 MII_TG3_DSP_CH34TP2_HIBW01);
2997 /* Fall through */
2998 case ASIC_REV_5719:
2999 val = MII_TG3_DSP_TAP26_ALNOKO |
3000 MII_TG3_DSP_TAP26_RMRXSTO |
3001 MII_TG3_DSP_TAP26_OPCSINPT;
3002 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3003 }
2985 3004
2986 val = 0; 3005 val = 0;
2987 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 3006 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -4722,8 +4741,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4722 struct sk_buff *skb; 4741 struct sk_buff *skb;
4723 dma_addr_t dma_addr; 4742 dma_addr_t dma_addr;
4724 u32 opaque_key, desc_idx, *post_ptr; 4743 u32 opaque_key, desc_idx, *post_ptr;
4725 bool hw_vlan __maybe_unused = false;
4726 u16 vtag __maybe_unused = 0;
4727 4744
4728 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4745 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4729 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4746 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4799,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4782 tg3_recycle_rx(tnapi, tpr, opaque_key, 4799 tg3_recycle_rx(tnapi, tpr, opaque_key,
4783 desc_idx, *post_ptr); 4800 desc_idx, *post_ptr);
4784 4801
4785 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + 4802 copy_skb = netdev_alloc_skb(tp->dev, len +
4786 TG3_RAW_IP_ALIGN); 4803 TG3_RAW_IP_ALIGN);
4787 if (copy_skb == NULL) 4804 if (copy_skb == NULL)
4788 goto drop_it_no_recycle; 4805 goto drop_it_no_recycle;
4789 4806
4790 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); 4807 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4791 skb_put(copy_skb, len); 4808 skb_put(copy_skb, len);
4792 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4809 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4793 skb_copy_from_linear_data(skb, copy_skb->data, len); 4810 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4831,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4814 } 4831 }
4815 4832
4816 if (desc->type_flags & RXD_FLAG_VLAN && 4833 if (desc->type_flags & RXD_FLAG_VLAN &&
4817 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { 4834 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4818 vtag = desc->err_vlan & RXD_VLAN_MASK; 4835 __vlan_hwaccel_put_tag(skb,
4819#if TG3_VLAN_TAG_USED 4836 desc->err_vlan & RXD_VLAN_MASK);
4820 if (tp->vlgrp)
4821 hw_vlan = true;
4822 else
4823#endif
4824 {
4825 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4826 __skb_push(skb, VLAN_HLEN);
4827
4828 memmove(ve, skb->data + VLAN_HLEN,
4829 ETH_ALEN * 2);
4830 ve->h_vlan_proto = htons(ETH_P_8021Q);
4831 ve->h_vlan_TCI = htons(vtag);
4832 }
4833 }
4834 4837
4835#if TG3_VLAN_TAG_USED 4838 napi_gro_receive(&tnapi->napi, skb);
4836 if (hw_vlan)
4837 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4838 else
4839#endif
4840 napi_gro_receive(&tnapi->napi, skb);
4841 4839
4842 received++; 4840 received++;
4843 budget--; 4841 budget--;
@@ -5740,11 +5738,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5740 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5738 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5741 } 5739 }
5742 5740
5743#if TG3_VLAN_TAG_USED
5744 if (vlan_tx_tag_present(skb)) 5741 if (vlan_tx_tag_present(skb))
5745 base_flags |= (TXD_FLAG_VLAN | 5742 base_flags |= (TXD_FLAG_VLAN |
5746 (vlan_tx_tag_get(skb) << 16)); 5743 (vlan_tx_tag_get(skb) << 16));
5747#endif
5748 5744
5749 len = skb_headlen(skb); 5745 len = skb_headlen(skb);
5750 5746
@@ -5986,11 +5982,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5986 } 5982 }
5987 } 5983 }
5988 } 5984 }
5989#if TG3_VLAN_TAG_USED 5985
5990 if (vlan_tx_tag_present(skb)) 5986 if (vlan_tx_tag_present(skb))
5991 base_flags |= (TXD_FLAG_VLAN | 5987 base_flags |= (TXD_FLAG_VLAN |
5992 (vlan_tx_tag_get(skb) << 16)); 5988 (vlan_tx_tag_get(skb) << 16));
5993#endif
5994 5989
5995 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5990 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5996 !mss && skb->len > VLAN_ETH_FRAME_LEN) 5991 !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -7834,7 +7829,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7834 TG3_CPMU_DBTMR1_LNKIDLE_2047US); 7829 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7835 7830
7836 tw32_f(TG3_CPMU_EEE_DBTMR2, 7831 tw32_f(TG3_CPMU_EEE_DBTMR2,
7837 TG3_CPMU_DBTMR1_APE_TX_2047US | 7832 TG3_CPMU_DBTMR2_APE_TX_2047US |
7838 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 7833 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7839 } 7834 }
7840 7835
@@ -8108,8 +8103,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8108 /* Program the jumbo buffer descriptor ring control 8103 /* Program the jumbo buffer descriptor ring control
8109 * blocks on those devices that have them. 8104 * blocks on those devices that have them.
8110 */ 8105 */
8111 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 8106 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8112 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 8107 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8108 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8113 /* Setup replenish threshold. */ 8109 /* Setup replenish threshold. */
8114 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 8110 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8115 8111
@@ -8227,8 +8223,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8227 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8223 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8228 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8224 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8230 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; 8226 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8231 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; 8227 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8228 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8229 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8230 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8231 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8232 } 8232 }
8233 tw32(TG3_RDMA_RSRVCTRL_REG, 8233 tw32(TG3_RDMA_RSRVCTRL_REG,
8234 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8234 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8350,7 +8350,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8350 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8350 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8351 udelay(100); 8351 udelay(100);
8352 8352
8353 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { 8353 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8354 tp->irq_cnt > 1) {
8354 val = tr32(MSGINT_MODE); 8355 val = tr32(MSGINT_MODE);
8355 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8356 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8356 tw32(MSGINT_MODE, val); 8357 tw32(MSGINT_MODE, val);
@@ -9090,7 +9091,8 @@ static void tg3_ints_init(struct tg3 *tp)
9090 9091
9091 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 9092 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9092 u32 msi_mode = tr32(MSGINT_MODE); 9093 u32 msi_mode = tr32(MSGINT_MODE);
9093 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 9094 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9095 tp->irq_cnt > 1)
9094 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 9096 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9095 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 9097 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9096 } 9098 }
@@ -9532,17 +9534,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9532 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9534 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9533 RX_MODE_KEEP_VLAN_TAG); 9535 RX_MODE_KEEP_VLAN_TAG);
9534 9536
9537#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9535 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9538 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9536 * flag clear. 9539 * flag clear.
9537 */ 9540 */
9538#if TG3_VLAN_TAG_USED
9539 if (!tp->vlgrp &&
9540 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9541 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9542#else
9543 /* By definition, VLAN is disabled always in this
9544 * case.
9545 */
9546 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9541 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9547 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9542 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9548#endif 9543#endif
@@ -10873,13 +10868,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10873 if (loopback_mode == TG3_MAC_LOOPBACK) { 10868 if (loopback_mode == TG3_MAC_LOOPBACK) {
10874 /* HW errata - mac loopback fails in some cases on 5780. 10869 /* HW errata - mac loopback fails in some cases on 5780.
10875 * Normal traffic and PHY loopback are not affected by 10870 * Normal traffic and PHY loopback are not affected by
10876 * errata. 10871 * errata. Also, the MAC loopback test is deprecated for
10872 * all newer ASIC revisions.
10877 */ 10873 */
10878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 10874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10875 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
10879 return 0; 10876 return 0;
10880 10877
10881 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 10878 mac_mode = tp->mac_mode &
10882 MAC_MODE_PORT_INT_LPBACK; 10879 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10880 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10883 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10881 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10884 mac_mode |= MAC_MODE_LINK_POLARITY; 10882 mac_mode |= MAC_MODE_LINK_POLARITY;
10885 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 10883 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10901,7 +10899,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10901 tg3_writephy(tp, MII_BMCR, val); 10899 tg3_writephy(tp, MII_BMCR, val);
10902 udelay(40); 10900 udelay(40);
10903 10901
10904 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10902 mac_mode = tp->mac_mode &
10903 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10905 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 10904 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10906 tg3_writephy(tp, MII_TG3_FET_PTEST, 10905 tg3_writephy(tp, MII_TG3_FET_PTEST,
10907 MII_TG3_FET_PTEST_FRC_TX_LINK | 10906 MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10929,6 +10928,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10929 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10928 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10930 } 10929 }
10931 tw32(MAC_MODE, mac_mode); 10930 tw32(MAC_MODE, mac_mode);
10931
10932 /* Wait for link */
10933 for (i = 0; i < 100; i++) {
10934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
10935 break;
10936 mdelay(1);
10937 }
10932 } else { 10938 } else {
10933 return -EINVAL; 10939 return -EINVAL;
10934 } 10940 }
@@ -11035,14 +11041,19 @@ out:
11035static int tg3_test_loopback(struct tg3 *tp) 11041static int tg3_test_loopback(struct tg3 *tp)
11036{ 11042{
11037 int err = 0; 11043 int err = 0;
11038 u32 cpmuctrl = 0; 11044 u32 eee_cap, cpmuctrl = 0;
11039 11045
11040 if (!netif_running(tp->dev)) 11046 if (!netif_running(tp->dev))
11041 return TG3_LOOPBACK_FAILED; 11047 return TG3_LOOPBACK_FAILED;
11042 11048
11049 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11050 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11051
11043 err = tg3_reset_hw(tp, 1); 11052 err = tg3_reset_hw(tp, 1);
11044 if (err) 11053 if (err) {
11045 return TG3_LOOPBACK_FAILED; 11054 err = TG3_LOOPBACK_FAILED;
11055 goto done;
11056 }
11046 11057
11047 /* Turn off gphy autopowerdown. */ 11058 /* Turn off gphy autopowerdown. */
11048 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11059 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11062,8 +11073,10 @@ static int tg3_test_loopback(struct tg3 *tp)
11062 udelay(10); 11073 udelay(10);
11063 } 11074 }
11064 11075
11065 if (status != CPMU_MUTEX_GNT_DRIVER) 11076 if (status != CPMU_MUTEX_GNT_DRIVER) {
11066 return TG3_LOOPBACK_FAILED; 11077 err = TG3_LOOPBACK_FAILED;
11078 goto done;
11079 }
11067 11080
11068 /* Turn off link-based power management. */ 11081 /* Turn off link-based power management. */
11069 cpmuctrl = tr32(TG3_CPMU_CTRL); 11082 cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11092,6 +11105,9 @@ static int tg3_test_loopback(struct tg3 *tp)
11092 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11105 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11093 tg3_phy_toggle_apd(tp, true); 11106 tg3_phy_toggle_apd(tp, true);
11094 11107
11108done:
11109 tp->phy_flags |= eee_cap;
11110
11095 return err; 11111 return err;
11096} 11112}
11097 11113
@@ -11198,7 +11214,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11198 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11214 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11199 break; /* We have no PHY */ 11215 break; /* We have no PHY */
11200 11216
11201 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11217 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11218 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11219 !netif_running(dev)))
11202 return -EAGAIN; 11220 return -EAGAIN;
11203 11221
11204 spin_lock_bh(&tp->lock); 11222 spin_lock_bh(&tp->lock);
@@ -11214,7 +11232,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11214 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11232 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11215 break; /* We have no PHY */ 11233 break; /* We have no PHY */
11216 11234
11217 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11235 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11236 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11237 !netif_running(dev)))
11218 return -EAGAIN; 11238 return -EAGAIN;
11219 11239
11220 spin_lock_bh(&tp->lock); 11240 spin_lock_bh(&tp->lock);
@@ -11230,31 +11250,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11230 return -EOPNOTSUPP; 11250 return -EOPNOTSUPP;
11231} 11251}
11232 11252
11233#if TG3_VLAN_TAG_USED
11234static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11235{
11236 struct tg3 *tp = netdev_priv(dev);
11237
11238 if (!netif_running(dev)) {
11239 tp->vlgrp = grp;
11240 return;
11241 }
11242
11243 tg3_netif_stop(tp);
11244
11245 tg3_full_lock(tp, 0);
11246
11247 tp->vlgrp = grp;
11248
11249 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11250 __tg3_set_rx_mode(dev);
11251
11252 tg3_netif_start(tp);
11253
11254 tg3_full_unlock(tp);
11255}
11256#endif
11257
11258static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 11253static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11259{ 11254{
11260 struct tg3 *tp = netdev_priv(dev); 11255 struct tg3 *tp = netdev_priv(dev);
@@ -12468,9 +12463,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12468 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 12463 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12469 } 12464 }
12470done: 12465done:
12471 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); 12466 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12472 device_set_wakeup_enable(&tp->pdev->dev, 12467 device_set_wakeup_enable(&tp->pdev->dev,
12473 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 12468 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12469 else
12470 device_set_wakeup_capable(&tp->pdev->dev, false);
12474} 12471}
12475 12472
12476static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 12473static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -13066,9 +13063,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13066 13063
13067static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13064static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
13068{ 13065{
13069#if TG3_VLAN_TAG_USED
13070 dev->vlan_features |= flags; 13066 dev->vlan_features |= flags;
13071#endif
13072} 13067}
13073 13068
13074static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 13069static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13325,7 +13320,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13325 } 13320 }
13326 13321
13327 /* Determine TSO capabilities */ 13322 /* Determine TSO capabilities */
13328 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13324 ; /* Do nothing. HW bug. */
13325 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13329 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13326 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13330 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13327 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13376,7 +13373,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13376 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13373 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13377 } 13374 }
13378 13375
13379 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13376 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
13377 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13380 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13378 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13381 13379
13382 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13394,42 +13392,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13394 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13392 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13395 13393
13396 tp->pcie_readrq = 4096; 13394 tp->pcie_readrq = 4096;
13397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 13395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13398 u16 word; 13396 tp->pcie_readrq = 2048;
13399
13400 pci_read_config_word(tp->pdev,
13401 tp->pcie_cap + PCI_EXP_LNKSTA,
13402 &word);
13403 switch (word & PCI_EXP_LNKSTA_CLS) {
13404 case PCI_EXP_LNKSTA_CLS_2_5GB:
13405 word &= PCI_EXP_LNKSTA_NLW;
13406 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13407 switch (word) {
13408 case 2:
13409 tp->pcie_readrq = 2048;
13410 break;
13411 case 4:
13412 tp->pcie_readrq = 1024;
13413 break;
13414 }
13415 break;
13416
13417 case PCI_EXP_LNKSTA_CLS_5_0GB:
13418 word &= PCI_EXP_LNKSTA_NLW;
13419 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13420 switch (word) {
13421 case 1:
13422 tp->pcie_readrq = 2048;
13423 break;
13424 case 2:
13425 tp->pcie_readrq = 1024;
13426 break;
13427 case 4:
13428 tp->pcie_readrq = 512;
13429 break;
13430 }
13431 }
13432 }
13433 13397
13434 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 13398 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13435 13399
@@ -13861,11 +13825,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13861 else 13825 else
13862 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13826 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13863 13827
13864 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; 13828 tp->rx_offset = NET_IP_ALIGN;
13865 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13829 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13867 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13831 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13868 tp->rx_offset -= NET_IP_ALIGN; 13832 tp->rx_offset = 0;
13869#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13833#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13870 tp->rx_copy_thresh = ~(u16)0; 13834 tp->rx_copy_thresh = ~(u16)0;
13871#endif 13835#endif
@@ -14629,9 +14593,6 @@ static const struct net_device_ops tg3_netdev_ops = {
14629 .ndo_do_ioctl = tg3_ioctl, 14593 .ndo_do_ioctl = tg3_ioctl,
14630 .ndo_tx_timeout = tg3_tx_timeout, 14594 .ndo_tx_timeout = tg3_tx_timeout,
14631 .ndo_change_mtu = tg3_change_mtu, 14595 .ndo_change_mtu = tg3_change_mtu,
14632#if TG3_VLAN_TAG_USED
14633 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14634#endif
14635#ifdef CONFIG_NET_POLL_CONTROLLER 14596#ifdef CONFIG_NET_POLL_CONTROLLER
14636 .ndo_poll_controller = tg3_poll_controller, 14597 .ndo_poll_controller = tg3_poll_controller,
14637#endif 14598#endif
@@ -14648,9 +14609,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14648 .ndo_do_ioctl = tg3_ioctl, 14609 .ndo_do_ioctl = tg3_ioctl,
14649 .ndo_tx_timeout = tg3_tx_timeout, 14610 .ndo_tx_timeout = tg3_tx_timeout,
14650 .ndo_change_mtu = tg3_change_mtu, 14611 .ndo_change_mtu = tg3_change_mtu,
14651#if TG3_VLAN_TAG_USED
14652 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14653#endif
14654#ifdef CONFIG_NET_POLL_CONTROLLER 14612#ifdef CONFIG_NET_POLL_CONTROLLER
14655 .ndo_poll_controller = tg3_poll_controller, 14613 .ndo_poll_controller = tg3_poll_controller,
14656#endif 14614#endif
@@ -14700,9 +14658,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14700 14658
14701 SET_NETDEV_DEV(dev, &pdev->dev); 14659 SET_NETDEV_DEV(dev, &pdev->dev);
14702 14660
14703#if TG3_VLAN_TAG_USED
14704 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14661 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14705#endif
14706 14662
14707 tp = netdev_priv(dev); 14663 tp = netdev_priv(dev);
14708 tp->pdev = pdev; 14664 tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d937c82..73884b69b749 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2010 Broadcom Corporation. 7 * Copyright (C) 2007-2011 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
@@ -141,6 +141,7 @@
141#define CHIPREV_ID_57780_A1 0x57780001 141#define CHIPREV_ID_57780_A1 0x57780001
142#define CHIPREV_ID_5717_A0 0x05717000 142#define CHIPREV_ID_5717_A0 0x05717000
143#define CHIPREV_ID_57765_A0 0x57785000 143#define CHIPREV_ID_57765_A0 0x57785000
144#define CHIPREV_ID_5719_A0 0x05719000
144#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 145#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
145#define ASIC_REV_5700 0x07 146#define ASIC_REV_5700 0x07
146#define ASIC_REV_5701 0x00 147#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 1106#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff 1107#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8 1108#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 1109#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff 1110#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1111#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1112#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
1333 1334
1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1335#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1336#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1337#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
1338#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
1339#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
1340#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 1341#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 1342#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1338/* 0x4904 --> 0x4910 unused */ 1343/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
2108 2113
2109#define MII_TG3_DSP_TAP1 0x0001 2114#define MII_TG3_DSP_TAP1 0x0001
2110#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 2115#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
2116#define MII_TG3_DSP_TAP26 0x001a
2117#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
2118#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
2119#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
2111#define MII_TG3_DSP_AADJ1CH0 0x001f 2120#define MII_TG3_DSP_AADJ1CH0 0x001f
2112#define MII_TG3_DSP_CH34TP2 0x4022 2121#define MII_TG3_DSP_CH34TP2 0x4022
2113#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 2122#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
@@ -2808,9 +2817,6 @@ struct tg3 {
2808 u32 rx_std_max_post; 2817 u32 rx_std_max_post;
2809 u32 rx_offset; 2818 u32 rx_offset;
2810 u32 rx_pkt_map_sz; 2819 u32 rx_pkt_map_sz;
2811#if TG3_VLAN_TAG_USED
2812 struct vlan_group *vlgrp;
2813#endif
2814 2820
2815 2821
2816 /* begin "everything else" cacheline(s) section */ 2822 /* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ecc..e48a80885343 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -63,45 +63,45 @@
63 * - Other minor stuff 63 * - Other minor stuff
64 * 64 *
65 * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's 65 * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
66 * network cleanup in 2.3.43pre7 (Tigran & myself) 66 * network cleanup in 2.3.43pre7 (Tigran & myself)
67 * - Minor stuff. 67 * - Minor stuff.
68 * 68 *
69 * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver 69 * v1.5 March 22, 2000 - Fixed another timer bug that would hang the
70 * if no cable/link were present. 70 * driver if no cable/link were present.
71 * - Cosmetic changes. 71 * - Cosmetic changes.
72 * - TODO: Port completely to new PCI/DMA API 72 * - TODO: Port completely to new PCI/DMA API
73 * Auto-Neg fallback. 73 * Auto-Neg fallback.
74 * 74 *
75 * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't 75 * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters.
76 * tested it though, as the kernel support is currently 76 * Haven't tested it though, as the kernel support
77 * broken (2.3.99p4p3). 77 * is currently broken (2.3.99p4p3).
78 * - Updated tlan.txt accordingly. 78 * - Updated tlan.txt accordingly.
79 * - Adjusted minimum/maximum frame length. 79 * - Adjusted minimum/maximum frame length.
80 * - There is now a TLAN website up at 80 * - There is now a TLAN website up at
81 * http://hp.sourceforge.net/ 81 * http://hp.sourceforge.net/
82 * 82 *
83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now 83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
84 * reports PHY information when used with Donald 84 * reports PHY information when used with Donald
85 * Beckers userspace MII diagnostics utility. 85 * Beckers userspace MII diagnostics utility.
86 * 86 *
87 * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings. 87 * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
88 * - Added link information to Auto-Neg and forced 88 * - Added link information to Auto-Neg and forced
89 * modes. When NIC operates with auto-neg the driver 89 * modes. When NIC operates with auto-neg the driver
90 * will report Link speed & duplex modes as well as 90 * will report Link speed & duplex modes as well as
91 * link partner abilities. When forced link is used, 91 * link partner abilities. When forced link is used,
92 * the driver will report status of the established 92 * the driver will report status of the established
93 * link. 93 * link.
94 * Please read tlan.txt for additional information. 94 * Please read tlan.txt for additional information.
95 * - Removed call to check_region(), and used 95 * - Removed call to check_region(), and used
96 * return value of request_region() instead. 96 * return value of request_region() instead.
97 * 97 *
98 * v1.8a May 28, 2000 - Minor updates. 98 * v1.8a May 28, 2000 - Minor updates.
99 * 99 *
100 * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues. 100 * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
101 * - Updated with timer fixes from Andrew Morton. 101 * - Updated with timer fixes from Andrew Morton.
102 * - Fixed module race in TLan_Open. 102 * - Fixed module race in TLan_Open.
103 * - Added routine to monitor PHY status. 103 * - Added routine to monitor PHY status.
104 * - Added activity led support for Proliant devices. 104 * - Added activity led support for Proliant devices.
105 * 105 *
106 * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers 106 * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
107 * like the Compaq NetFlex3/E. 107 * like the Compaq NetFlex3/E.
@@ -111,8 +111,8 @@
111 * hardware probe is done with kernel API and 111 * hardware probe is done with kernel API and
112 * TLan_EisaProbe. 112 * TLan_EisaProbe.
113 * - Adjusted debug information for probing. 113 * - Adjusted debug information for probing.
114 * - Fixed bug that would cause general debug information 114 * - Fixed bug that would cause general debug
115 * to be printed after driver removal. 115 * information to be printed after driver removal.
116 * - Added transmit timeout handling. 116 * - Added transmit timeout handling.
117 * - Fixed OOM return values in tlan_probe. 117 * - Fixed OOM return values in tlan_probe.
118 * - Fixed possible mem leak in tlan_exit 118 * - Fixed possible mem leak in tlan_exit
@@ -136,8 +136,8 @@
136 * 136 *
137 * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.) 137 * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
138 * 138 *
139 * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues 139 * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
140 * when link can't be established. 140 * when link can't be established.
141 * - Added the bbuf option as a kernel parameter. 141 * - Added the bbuf option as a kernel parameter.
142 * - Fixed ioaddr probe bug. 142 * - Fixed ioaddr probe bug.
143 * - Fixed stupid deadlock with MII interrupts. 143 * - Fixed stupid deadlock with MII interrupts.
@@ -147,28 +147,30 @@
147 * TLAN v1.0 silicon. This needs to be investigated 147 * TLAN v1.0 silicon. This needs to be investigated
148 * further. 148 * further.
149 * 149 *
150 * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per. 150 * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
151 * interrupt. Thanks goes to 151 * interrupt. Thanks goes to
152 * Adam Keys <adam@ti.com> 152 * Adam Keys <adam@ti.com>
153 * Denis Beaudoin <dbeaudoin@ti.com> 153 * Denis Beaudoin <dbeaudoin@ti.com>
154 * for providing the patch. 154 * for providing the patch.
155 * - Fixed auto-neg output when using multiple 155 * - Fixed auto-neg output when using multiple
156 * adapters. 156 * adapters.
157 * - Converted to use new taskq interface. 157 * - Converted to use new taskq interface.
158 * 158 *
159 * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.) 159 * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
160 * 160 *
161 * Samuel Chessman <chessman@tux.org> New Maintainer! 161 * Samuel Chessman <chessman@tux.org> New Maintainer!
162 * 162 *
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be 163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback 164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman 165 * Thanks to Gunnar Eikman
166 * 166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>: 167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 * 168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway. 169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 * v1.16 Jan 6 2011 - Make checkpatch.pl happy.
171 * v1.17 Jan 6 2011 - Add suspend/resume support.
170 * 172 *
171 *******************************************************************************/ 173 ******************************************************************************/
172 174
173#include <linux/module.h> 175#include <linux/module.h>
174#include <linux/init.h> 176#include <linux/init.h>
@@ -185,13 +187,11 @@
185 187
186#include "tlan.h" 188#include "tlan.h"
187 189
188typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
189
190 190
191/* For removing EISA devices */ 191/* For removing EISA devices */
192static struct net_device *TLan_Eisa_Devices; 192static struct net_device *tlan_eisa_devices;
193 193
194static int TLanDevicesInstalled; 194static int tlan_devices_installed;
195 195
196/* Set speed, duplex and aui settings */ 196/* Set speed, duplex and aui settings */
197static int aui[MAX_TLAN_BOARDS]; 197static int aui[MAX_TLAN_BOARDS];
@@ -202,7 +202,8 @@ module_param_array(aui, int, NULL, 0);
202module_param_array(duplex, int, NULL, 0); 202module_param_array(duplex, int, NULL, 0);
203module_param_array(speed, int, NULL, 0); 203module_param_array(speed, int, NULL, 0);
204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); 204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
205MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); 205MODULE_PARM_DESC(duplex,
206 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
206MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); 207MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
207 208
208MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); 209MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
@@ -218,139 +219,144 @@ static int debug;
218module_param(debug, int, 0); 219module_param(debug, int, 0);
219MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); 220MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
220 221
221static const char TLanSignature[] = "TLAN"; 222static const char tlan_signature[] = "TLAN";
222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n"; 223static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
223static int tlan_have_pci; 224static int tlan_have_pci;
224static int tlan_have_eisa; 225static int tlan_have_eisa;
225 226
226static const char *media[] = { 227static const char * const media[] = {
227 "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", 228 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
228 "100baseTx-FD", "100baseT4", NULL 229 "100BaseTx-FD", "100BaseT4", NULL
229}; 230};
230 231
231static struct board { 232static struct board {
232 const char *deviceLabel; 233 const char *device_label;
233 u32 flags; 234 u32 flags;
234 u16 addrOfs; 235 u16 addr_ofs;
235} board_info[] = { 236} board_info[] = {
236 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 237 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
237 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 238 { "Compaq Netelligent 10/100 TX PCI UTP",
239 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
238 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 240 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
239 { "Compaq NetFlex-3/P", 241 { "Compaq NetFlex-3/P",
240 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 242 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 243 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
242 { "Compaq Netelligent Integrated 10/100 TX UTP", 244 { "Compaq Netelligent Integrated 10/100 TX UTP",
243 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 245 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 246 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 247 TLAN_ADAPTER_NONE, 0x83 },
248 { "Compaq Netelligent 10/100 TX Embedded UTP",
249 TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 250 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
247 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 }, 251 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 252 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 253 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 254 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", 255 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 256 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
253 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 257 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 258 { "Compaq NetFlex-3/E",
259 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 260};
256 261
257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { 262static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 263 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 264 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 265 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 266 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
262 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, 267 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 268 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
264 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, 269 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, 270 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
266 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, 271 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
268 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, 273 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, 274 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
270 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, 275 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
271 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, 276 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
272 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, 277 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, 278 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
274 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, 279 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 280 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
276 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, 281 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, 282 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
278 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, 283 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, 284 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
280 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, 285 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, 286 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
282 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, 287 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, 288 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
284 { 0,} 289 { 0,}
285}; 290};
286MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); 291MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
287 292
288static void TLan_EisaProbe( void ); 293static void tlan_eisa_probe(void);
289static void TLan_Eisa_Cleanup( void ); 294static void tlan_eisa_cleanup(void);
290static int TLan_Init( struct net_device * ); 295static int tlan_init(struct net_device *);
291static int TLan_Open( struct net_device *dev ); 296static int tlan_open(struct net_device *dev);
292static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); 297static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
293static irqreturn_t TLan_HandleInterrupt( int, void *); 298static irqreturn_t tlan_handle_interrupt(int, void *);
294static int TLan_Close( struct net_device *); 299static int tlan_close(struct net_device *);
295static struct net_device_stats *TLan_GetStats( struct net_device *); 300static struct net_device_stats *tlan_get_stats(struct net_device *);
296static void TLan_SetMulticastList( struct net_device *); 301static void tlan_set_multicast_list(struct net_device *);
297static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 302static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
298static int TLan_probe1( struct pci_dev *pdev, long ioaddr, 303static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
299 int irq, int rev, const struct pci_device_id *ent); 304 int irq, int rev, const struct pci_device_id *ent);
300static void TLan_tx_timeout( struct net_device *dev); 305static void tlan_tx_timeout(struct net_device *dev);
301static void TLan_tx_timeout_work(struct work_struct *work); 306static void tlan_tx_timeout_work(struct work_struct *work);
302static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 307static int tlan_init_one(struct pci_dev *pdev,
303 308 const struct pci_device_id *ent);
304static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 309
305static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 310static u32 tlan_handle_tx_eof(struct net_device *, u16);
306static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 311static u32 tlan_handle_stat_overflow(struct net_device *, u16);
307static u32 TLan_HandleDummy( struct net_device *, u16 ); 312static u32 tlan_handle_rx_eof(struct net_device *, u16);
308static u32 TLan_HandleTxEOC( struct net_device *, u16 ); 313static u32 tlan_handle_dummy(struct net_device *, u16);
309static u32 TLan_HandleStatusCheck( struct net_device *, u16 ); 314static u32 tlan_handle_tx_eoc(struct net_device *, u16);
310static u32 TLan_HandleRxEOC( struct net_device *, u16 ); 315static u32 tlan_handle_status_check(struct net_device *, u16);
311 316static u32 tlan_handle_rx_eoc(struct net_device *, u16);
312static void TLan_Timer( unsigned long ); 317
313 318static void tlan_timer(unsigned long);
314static void TLan_ResetLists( struct net_device * ); 319
315static void TLan_FreeLists( struct net_device * ); 320static void tlan_reset_lists(struct net_device *);
316static void TLan_PrintDio( u16 ); 321static void tlan_free_lists(struct net_device *);
317static void TLan_PrintList( TLanList *, char *, int ); 322static void tlan_print_dio(u16);
318static void TLan_ReadAndClearStats( struct net_device *, int ); 323static void tlan_print_list(struct tlan_list *, char *, int);
319static void TLan_ResetAdapter( struct net_device * ); 324static void tlan_read_and_clear_stats(struct net_device *, int);
320static void TLan_FinishReset( struct net_device * ); 325static void tlan_reset_adapter(struct net_device *);
321static void TLan_SetMac( struct net_device *, int areg, char *mac ); 326static void tlan_finish_reset(struct net_device *);
322 327static void tlan_set_mac(struct net_device *, int areg, char *mac);
323static void TLan_PhyPrint( struct net_device * ); 328
324static void TLan_PhyDetect( struct net_device * ); 329static void tlan_phy_print(struct net_device *);
325static void TLan_PhyPowerDown( struct net_device * ); 330static void tlan_phy_detect(struct net_device *);
326static void TLan_PhyPowerUp( struct net_device * ); 331static void tlan_phy_power_down(struct net_device *);
327static void TLan_PhyReset( struct net_device * ); 332static void tlan_phy_power_up(struct net_device *);
328static void TLan_PhyStartLink( struct net_device * ); 333static void tlan_phy_reset(struct net_device *);
329static void TLan_PhyFinishAutoNeg( struct net_device * ); 334static void tlan_phy_start_link(struct net_device *);
335static void tlan_phy_finish_auto_neg(struct net_device *);
330#ifdef MONITOR 336#ifdef MONITOR
331static void TLan_PhyMonitor( struct net_device * ); 337static void tlan_phy_monitor(struct net_device *);
332#endif 338#endif
333 339
334/* 340/*
335static int TLan_PhyNop( struct net_device * ); 341 static int tlan_phy_nop(struct net_device *);
336static int TLan_PhyInternalCheck( struct net_device * ); 342 static int tlan_phy_internal_check(struct net_device *);
337static int TLan_PhyInternalService( struct net_device * ); 343 static int tlan_phy_internal_service(struct net_device *);
338static int TLan_PhyDp83840aCheck( struct net_device * ); 344 static int tlan_phy_dp83840a_check(struct net_device *);
339*/ 345*/
340 346
341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 347static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
342static void TLan_MiiSendData( u16, u32, unsigned ); 348static void tlan_mii_send_data(u16, u32, unsigned);
343static void TLan_MiiSync( u16 ); 349static void tlan_mii_sync(u16);
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 350static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
345 351
346static void TLan_EeSendStart( u16 ); 352static void tlan_ee_send_start(u16);
347static int TLan_EeSendByte( u16, u8, int ); 353static int tlan_ee_send_byte(u16, u8, int);
348static void TLan_EeReceiveByte( u16, u8 *, int ); 354static void tlan_ee_receive_byte(u16, u8 *, int);
349static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 355static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
350 356
351 357
352static inline void 358static inline void
353TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 359tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
354{ 360{
355 unsigned long addr = (unsigned long)skb; 361 unsigned long addr = (unsigned long)skb;
356 tag->buffer[9].address = addr; 362 tag->buffer[9].address = addr;
@@ -358,7 +364,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
358} 364}
359 365
360static inline struct sk_buff * 366static inline struct sk_buff *
361TLan_GetSKB( const struct tlan_list_tag *tag) 367tlan_get_skb(const struct tlan_list *tag)
362{ 368{
363 unsigned long addr; 369 unsigned long addr;
364 370
@@ -367,50 +373,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
367 return (struct sk_buff *) addr; 373 return (struct sk_buff *) addr;
368} 374}
369 375
370 376static u32
371static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 377(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
372 NULL, 378 NULL,
373 TLan_HandleTxEOF, 379 tlan_handle_tx_eof,
374 TLan_HandleStatOverflow, 380 tlan_handle_stat_overflow,
375 TLan_HandleRxEOF, 381 tlan_handle_rx_eof,
376 TLan_HandleDummy, 382 tlan_handle_dummy,
377 TLan_HandleTxEOC, 383 tlan_handle_tx_eoc,
378 TLan_HandleStatusCheck, 384 tlan_handle_status_check,
379 TLan_HandleRxEOC 385 tlan_handle_rx_eoc
380}; 386};
381 387
382static inline void 388static inline void
383TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) 389tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
384{ 390{
385 TLanPrivateInfo *priv = netdev_priv(dev); 391 struct tlan_priv *priv = netdev_priv(dev);
386 unsigned long flags = 0; 392 unsigned long flags = 0;
387 393
388 if (!in_irq()) 394 if (!in_irq())
389 spin_lock_irqsave(&priv->lock, flags); 395 spin_lock_irqsave(&priv->lock, flags);
390 if ( priv->timer.function != NULL && 396 if (priv->timer.function != NULL &&
391 priv->timerType != TLAN_TIMER_ACTIVITY ) { 397 priv->timer_type != TLAN_TIMER_ACTIVITY) {
392 if (!in_irq()) 398 if (!in_irq())
393 spin_unlock_irqrestore(&priv->lock, flags); 399 spin_unlock_irqrestore(&priv->lock, flags);
394 return; 400 return;
395 } 401 }
396 priv->timer.function = TLan_Timer; 402 priv->timer.function = tlan_timer;
397 if (!in_irq()) 403 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags); 404 spin_unlock_irqrestore(&priv->lock, flags);
399 405
400 priv->timer.data = (unsigned long) dev; 406 priv->timer.data = (unsigned long) dev;
401 priv->timerSetAt = jiffies; 407 priv->timer_set_at = jiffies;
402 priv->timerType = type; 408 priv->timer_type = type;
403 mod_timer(&priv->timer, jiffies + ticks); 409 mod_timer(&priv->timer, jiffies + ticks);
404 410
405} /* TLan_SetTimer */ 411}
406 412
407 413
408/***************************************************************************** 414/*****************************************************************************
409****************************************************************************** 415******************************************************************************
410 416
411 ThunderLAN Driver Primary Functions 417ThunderLAN driver primary functions
412 418
413 These functions are more or less common to all Linux network drivers. 419these functions are more or less common to all linux network drivers.
414 420
415****************************************************************************** 421******************************************************************************
416*****************************************************************************/ 422*****************************************************************************/
@@ -419,49 +425,117 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
419 425
420 426
421 427
422 /*************************************************************** 428/***************************************************************
423 * tlan_remove_one 429 * tlan_remove_one
424 * 430 *
425 * Returns: 431 * Returns:
426 * Nothing 432 * Nothing
427 * Parms: 433 * Parms:
428 * None 434 * None
429 * 435 *
430 * Goes through the TLanDevices list and frees the device 436 * Goes through the TLanDevices list and frees the device
431 * structs and memory associated with each device (lists 437 * structs and memory associated with each device (lists
432 * and buffers). It also ureserves the IO port regions 438 * and buffers). It also ureserves the IO port regions
433 * associated with this device. 439 * associated with this device.
434 * 440 *
435 **************************************************************/ 441 **************************************************************/
436 442
437 443
438static void __devexit tlan_remove_one( struct pci_dev *pdev) 444static void __devexit tlan_remove_one(struct pci_dev *pdev)
439{ 445{
440 struct net_device *dev = pci_get_drvdata( pdev ); 446 struct net_device *dev = pci_get_drvdata(pdev);
441 TLanPrivateInfo *priv = netdev_priv(dev); 447 struct tlan_priv *priv = netdev_priv(dev);
442 448
443 unregister_netdev( dev ); 449 unregister_netdev(dev);
444 450
445 if ( priv->dmaStorage ) { 451 if (priv->dma_storage) {
446 pci_free_consistent(priv->pciDev, 452 pci_free_consistent(priv->pci_dev,
447 priv->dmaSize, priv->dmaStorage, 453 priv->dma_size, priv->dma_storage,
448 priv->dmaStorageDMA ); 454 priv->dma_storage_dma);
449 } 455 }
450 456
451#ifdef CONFIG_PCI 457#ifdef CONFIG_PCI
452 pci_release_regions(pdev); 458 pci_release_regions(pdev);
453#endif 459#endif
454 460
455 free_netdev( dev ); 461 free_netdev(dev);
462
463 pci_set_drvdata(pdev, NULL);
464}
465
466static void tlan_start(struct net_device *dev)
467{
468 tlan_reset_lists(dev);
469 /* NOTE: It might not be necessary to read the stats before a
470 reset if you don't care what the values are.
471 */
472 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
473 tlan_reset_adapter(dev);
474 netif_wake_queue(dev);
475}
476
477static void tlan_stop(struct net_device *dev)
478{
479 struct tlan_priv *priv = netdev_priv(dev);
480
481 tlan_read_and_clear_stats(dev, TLAN_RECORD);
482 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
483 /* Reset and power down phy */
484 tlan_reset_adapter(dev);
485 if (priv->timer.function != NULL) {
486 del_timer_sync(&priv->timer);
487 priv->timer.function = NULL;
488 }
489}
490
491#ifdef CONFIG_PM
492
493static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
494{
495 struct net_device *dev = pci_get_drvdata(pdev);
496
497 if (netif_running(dev))
498 tlan_stop(dev);
499
500 netif_device_detach(dev);
501 pci_save_state(pdev);
502 pci_disable_device(pdev);
503 pci_wake_from_d3(pdev, false);
504 pci_set_power_state(pdev, PCI_D3hot);
456 505
457 pci_set_drvdata( pdev, NULL ); 506 return 0;
458} 507}
459 508
509static int tlan_resume(struct pci_dev *pdev)
510{
511 struct net_device *dev = pci_get_drvdata(pdev);
512
513 pci_set_power_state(pdev, PCI_D0);
514 pci_restore_state(pdev);
515 pci_enable_wake(pdev, 0, 0);
516 netif_device_attach(dev);
517
518 if (netif_running(dev))
519 tlan_start(dev);
520
521 return 0;
522}
523
524#else /* CONFIG_PM */
525
526#define tlan_suspend NULL
527#define tlan_resume NULL
528
529#endif /* CONFIG_PM */
530
531
460static struct pci_driver tlan_driver = { 532static struct pci_driver tlan_driver = {
461 .name = "tlan", 533 .name = "tlan",
462 .id_table = tlan_pci_tbl, 534 .id_table = tlan_pci_tbl,
463 .probe = tlan_init_one, 535 .probe = tlan_init_one,
464 .remove = __devexit_p(tlan_remove_one), 536 .remove = __devexit_p(tlan_remove_one),
537 .suspend = tlan_suspend,
538 .resume = tlan_resume,
465}; 539};
466 540
467static int __init tlan_probe(void) 541static int __init tlan_probe(void)
@@ -482,13 +556,13 @@ static int __init tlan_probe(void)
482 } 556 }
483 557
484 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); 558 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
485 TLan_EisaProbe(); 559 tlan_eisa_probe();
486 560
487 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", 561 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
488 TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s", 562 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
489 tlan_have_pci, tlan_have_eisa); 563 tlan_have_pci, tlan_have_eisa);
490 564
491 if (TLanDevicesInstalled == 0) { 565 if (tlan_devices_installed == 0) {
492 rc = -ENODEV; 566 rc = -ENODEV;
493 goto err_out_pci_unreg; 567 goto err_out_pci_unreg;
494 } 568 }
@@ -501,39 +575,39 @@ err_out_pci_free:
501} 575}
502 576
503 577
504static int __devinit tlan_init_one( struct pci_dev *pdev, 578static int __devinit tlan_init_one(struct pci_dev *pdev,
505 const struct pci_device_id *ent) 579 const struct pci_device_id *ent)
506{ 580{
507 return TLan_probe1( pdev, -1, -1, 0, ent); 581 return tlan_probe1(pdev, -1, -1, 0, ent);
508} 582}
509 583
510 584
511/* 585/*
512 *************************************************************** 586***************************************************************
513 * tlan_probe1 587* tlan_probe1
514 * 588*
515 * Returns: 589* Returns:
516 * 0 on success, error code on error 590* 0 on success, error code on error
517 * Parms: 591* Parms:
518 * none 592* none
519 * 593*
520 * The name is lower case to fit in with all the rest of 594* The name is lower case to fit in with all the rest of
521 * the netcard_probe names. This function looks for 595* the netcard_probe names. This function looks for
522 * another TLan based adapter, setting it up with the 596* another TLan based adapter, setting it up with the
523 * allocated device struct if one is found. 597* allocated device struct if one is found.
524 * tlan_probe has been ported to the new net API and 598* tlan_probe has been ported to the new net API and
525 * now allocates its own device structure. This function 599* now allocates its own device structure. This function
526 * is also used by modules. 600* is also used by modules.
527 * 601*
528 **************************************************************/ 602**************************************************************/
529 603
530static int __devinit TLan_probe1(struct pci_dev *pdev, 604static int __devinit tlan_probe1(struct pci_dev *pdev,
531 long ioaddr, int irq, int rev, 605 long ioaddr, int irq, int rev,
532 const struct pci_device_id *ent ) 606 const struct pci_device_id *ent)
533{ 607{
534 608
535 struct net_device *dev; 609 struct net_device *dev;
536 TLanPrivateInfo *priv; 610 struct tlan_priv *priv;
537 u16 device_id; 611 u16 device_id;
538 int reg, rc = -ENODEV; 612 int reg, rc = -ENODEV;
539 613
@@ -543,7 +617,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
543 if (rc) 617 if (rc)
544 return rc; 618 return rc;
545 619
546 rc = pci_request_regions(pdev, TLanSignature); 620 rc = pci_request_regions(pdev, tlan_signature);
547 if (rc) { 621 if (rc) {
548 printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); 622 printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
549 goto err_out; 623 goto err_out;
@@ -551,7 +625,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
551 } 625 }
552#endif /* CONFIG_PCI */ 626#endif /* CONFIG_PCI */
553 627
554 dev = alloc_etherdev(sizeof(TLanPrivateInfo)); 628 dev = alloc_etherdev(sizeof(struct tlan_priv));
555 if (dev == NULL) { 629 if (dev == NULL) {
556 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); 630 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
557 rc = -ENOMEM; 631 rc = -ENOMEM;
@@ -561,26 +635,28 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
561 635
562 priv = netdev_priv(dev); 636 priv = netdev_priv(dev);
563 637
564 priv->pciDev = pdev; 638 priv->pci_dev = pdev;
565 priv->dev = dev; 639 priv->dev = dev;
566 640
567 /* Is this a PCI device? */ 641 /* Is this a PCI device? */
568 if (pdev) { 642 if (pdev) {
569 u32 pci_io_base = 0; 643 u32 pci_io_base = 0;
570 644
571 priv->adapter = &board_info[ent->driver_data]; 645 priv->adapter = &board_info[ent->driver_data];
572 646
573 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 647 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
574 if (rc) { 648 if (rc) {
575 printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); 649 printk(KERN_ERR
650 "TLAN: No suitable PCI mapping available.\n");
576 goto err_out_free_dev; 651 goto err_out_free_dev;
577 } 652 }
578 653
579 for ( reg= 0; reg <= 5; reg ++ ) { 654 for (reg = 0; reg <= 5; reg++) {
580 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { 655 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
581 pci_io_base = pci_resource_start(pdev, reg); 656 pci_io_base = pci_resource_start(pdev, reg);
582 TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n", 657 TLAN_DBG(TLAN_DEBUG_GNRL,
583 pci_io_base); 658 "IO mapping is available at %x.\n",
659 pci_io_base);
584 break; 660 break;
585 } 661 }
586 } 662 }
@@ -592,7 +668,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
592 668
593 dev->base_addr = pci_io_base; 669 dev->base_addr = pci_io_base;
594 dev->irq = pdev->irq; 670 dev->irq = pdev->irq;
595 priv->adapterRev = pdev->revision; 671 priv->adapter_rev = pdev->revision;
596 pci_set_master(pdev); 672 pci_set_master(pdev);
597 pci_set_drvdata(pdev, dev); 673 pci_set_drvdata(pdev, dev);
598 674
@@ -602,11 +678,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
602 device_id = inw(ioaddr + EISA_ID2); 678 device_id = inw(ioaddr + EISA_ID2);
603 priv->is_eisa = 1; 679 priv->is_eisa = 1;
604 if (device_id == 0x20F1) { 680 if (device_id == 0x20F1) {
605 priv->adapter = &board_info[13]; /* NetFlex-3/E */ 681 priv->adapter = &board_info[13]; /* NetFlex-3/E */
606 priv->adapterRev = 23; /* TLAN 2.3 */ 682 priv->adapter_rev = 23; /* TLAN 2.3 */
607 } else { 683 } else {
608 priv->adapter = &board_info[14]; 684 priv->adapter = &board_info[14];
609 priv->adapterRev = 10; /* TLAN 1.0 */ 685 priv->adapter_rev = 10; /* TLAN 1.0 */
610 } 686 }
611 dev->base_addr = ioaddr; 687 dev->base_addr = ioaddr;
612 dev->irq = irq; 688 dev->irq = irq;
@@ -620,11 +696,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
620 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 696 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
621 : (dev->mem_start & 0x18) >> 3; 697 : (dev->mem_start & 0x18) >> 3;
622 698
623 if (priv->speed == 0x1) { 699 if (priv->speed == 0x1)
624 priv->speed = TLAN_SPEED_10; 700 priv->speed = TLAN_SPEED_10;
625 } else if (priv->speed == 0x2) { 701 else if (priv->speed == 0x2)
626 priv->speed = TLAN_SPEED_100; 702 priv->speed = TLAN_SPEED_100;
627 } 703
628 debug = priv->debug = dev->mem_end; 704 debug = priv->debug = dev->mem_end;
629 } else { 705 } else {
630 priv->aui = aui[boards_found]; 706 priv->aui = aui[boards_found];
@@ -635,11 +711,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
635 711
636 /* This will be used when we get an adapter error from 712 /* This will be used when we get an adapter error from
637 * within our irq handler */ 713 * within our irq handler */
638 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); 714 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
639 715
640 spin_lock_init(&priv->lock); 716 spin_lock_init(&priv->lock);
641 717
642 rc = TLan_Init(dev); 718 rc = tlan_init(dev);
643 if (rc) { 719 if (rc) {
644 printk(KERN_ERR "TLAN: Could not set up device.\n"); 720 printk(KERN_ERR "TLAN: Could not set up device.\n");
645 goto err_out_free_dev; 721 goto err_out_free_dev;
@@ -652,29 +728,29 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
652 } 728 }
653 729
654 730
655 TLanDevicesInstalled++; 731 tlan_devices_installed++;
656 boards_found++; 732 boards_found++;
657 733
658 /* pdev is NULL if this is an EISA device */ 734 /* pdev is NULL if this is an EISA device */
659 if (pdev) 735 if (pdev)
660 tlan_have_pci++; 736 tlan_have_pci++;
661 else { 737 else {
662 priv->nextDevice = TLan_Eisa_Devices; 738 priv->next_device = tlan_eisa_devices;
663 TLan_Eisa_Devices = dev; 739 tlan_eisa_devices = dev;
664 tlan_have_eisa++; 740 tlan_have_eisa++;
665 } 741 }
666 742
667 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", 743 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
668 dev->name, 744 dev->name,
669 (int) dev->irq, 745 (int) dev->irq,
670 (int) dev->base_addr, 746 (int) dev->base_addr,
671 priv->adapter->deviceLabel, 747 priv->adapter->device_label,
672 priv->adapterRev); 748 priv->adapter_rev);
673 return 0; 749 return 0;
674 750
675err_out_uninit: 751err_out_uninit:
676 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, 752 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
677 priv->dmaStorageDMA ); 753 priv->dma_storage_dma);
678err_out_free_dev: 754err_out_free_dev:
679 free_netdev(dev); 755 free_netdev(dev);
680err_out_regions: 756err_out_regions:
@@ -689,22 +765,23 @@ err_out:
689} 765}
690 766
691 767
692static void TLan_Eisa_Cleanup(void) 768static void tlan_eisa_cleanup(void)
693{ 769{
694 struct net_device *dev; 770 struct net_device *dev;
695 TLanPrivateInfo *priv; 771 struct tlan_priv *priv;
696 772
697 while( tlan_have_eisa ) { 773 while (tlan_have_eisa) {
698 dev = TLan_Eisa_Devices; 774 dev = tlan_eisa_devices;
699 priv = netdev_priv(dev); 775 priv = netdev_priv(dev);
700 if (priv->dmaStorage) { 776 if (priv->dma_storage) {
701 pci_free_consistent(priv->pciDev, priv->dmaSize, 777 pci_free_consistent(priv->pci_dev, priv->dma_size,
702 priv->dmaStorage, priv->dmaStorageDMA ); 778 priv->dma_storage,
779 priv->dma_storage_dma);
703 } 780 }
704 release_region( dev->base_addr, 0x10); 781 release_region(dev->base_addr, 0x10);
705 unregister_netdev( dev ); 782 unregister_netdev(dev);
706 TLan_Eisa_Devices = priv->nextDevice; 783 tlan_eisa_devices = priv->next_device;
707 free_netdev( dev ); 784 free_netdev(dev);
708 tlan_have_eisa--; 785 tlan_have_eisa--;
709 } 786 }
710} 787}
@@ -715,7 +792,7 @@ static void __exit tlan_exit(void)
715 pci_unregister_driver(&tlan_driver); 792 pci_unregister_driver(&tlan_driver);
716 793
717 if (tlan_have_eisa) 794 if (tlan_have_eisa)
718 TLan_Eisa_Cleanup(); 795 tlan_eisa_cleanup();
719 796
720} 797}
721 798
@@ -726,24 +803,24 @@ module_exit(tlan_exit);
726 803
727 804
728 805
729 /************************************************************** 806/**************************************************************
730 * TLan_EisaProbe 807 * tlan_eisa_probe
731 * 808 *
732 * Returns: 0 on success, 1 otherwise 809 * Returns: 0 on success, 1 otherwise
733 * 810 *
734 * Parms: None 811 * Parms: None
735 * 812 *
736 * 813 *
737 * This functions probes for EISA devices and calls 814 * This functions probes for EISA devices and calls
738 * TLan_probe1 when one is found. 815 * TLan_probe1 when one is found.
739 * 816 *
740 *************************************************************/ 817 *************************************************************/
741 818
742static void __init TLan_EisaProbe (void) 819static void __init tlan_eisa_probe(void)
743{ 820{
744 long ioaddr; 821 long ioaddr;
745 int rc = -ENODEV; 822 int rc = -ENODEV;
746 int irq; 823 int irq;
747 u16 device_id; 824 u16 device_id;
748 825
749 if (!EISA_bus) { 826 if (!EISA_bus) {
@@ -754,15 +831,16 @@ static void __init TLan_EisaProbe (void)
754 /* Loop through all slots of the EISA bus */ 831 /* Loop through all slots of the EISA bus */
755 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 832 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
756 833
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 834 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 835 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
759 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 836 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
760 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 837 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
761 838
762 839
763 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 840 TLAN_DBG(TLAN_DEBUG_PROBE,
764 (int) ioaddr); 841 "Probing for EISA adapter at IO: 0x%4x : ",
765 if (request_region(ioaddr, 0x10, TLanSignature) == NULL) 842 (int) ioaddr);
843 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
766 goto out; 844 goto out;
767 845
768 if (inw(ioaddr + EISA_ID) != 0x110E) { 846 if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +850,326 @@ static void __init TLan_EisaProbe (void)
772 850
773 device_id = inw(ioaddr + EISA_ID2); 851 device_id = inw(ioaddr + EISA_ID2);
774 if (device_id != 0x20F1 && device_id != 0x40F1) { 852 if (device_id != 0x20F1 && device_id != 0x40F1) {
775 release_region (ioaddr, 0x10); 853 release_region(ioaddr, 0x10);
776 goto out; 854 goto out;
777 } 855 }
778 856
779 if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */ 857 /* check if adapter is enabled */
780 release_region (ioaddr, 0x10); 858 if (inb(ioaddr + EISA_CR) != 0x1) {
859 release_region(ioaddr, 0x10);
781 goto out2; 860 goto out2;
782 } 861 }
783 862
784 if (debug == 0x10) 863 if (debug == 0x10)
785 printk("Found one\n"); 864 printk(KERN_INFO "Found one\n");
786 865
787 866
788 /* Get irq from board */ 867 /* Get irq from board */
789 switch (inb(ioaddr + 0xCC0)) { 868 switch (inb(ioaddr + 0xcc0)) {
790 case(0x10): 869 case(0x10):
791 irq=5; 870 irq = 5;
792 break; 871 break;
793 case(0x20): 872 case(0x20):
794 irq=9; 873 irq = 9;
795 break; 874 break;
796 case(0x40): 875 case(0x40):
797 irq=10; 876 irq = 10;
798 break; 877 break;
799 case(0x80): 878 case(0x80):
800 irq=11; 879 irq = 11;
801 break; 880 break;
802 default: 881 default:
803 goto out; 882 goto out;
804 } 883 }
805 884
806 885
807 /* Setup the newly found eisa adapter */ 886 /* Setup the newly found eisa adapter */
808 rc = TLan_probe1( NULL, ioaddr, irq, 887 rc = tlan_probe1(NULL, ioaddr, irq,
809 12, NULL); 888 12, NULL);
810 continue; 889 continue;
811 890
812 out: 891out:
813 if (debug == 0x10) 892 if (debug == 0x10)
814 printk("None found\n"); 893 printk(KERN_INFO "None found\n");
815 continue; 894 continue;
816 895
817 out2: if (debug == 0x10) 896out2:
818 printk("Card found but it is not enabled, skipping\n"); 897 if (debug == 0x10)
819 continue; 898 printk(KERN_INFO "Card found but it is not enabled, skipping\n");
899 continue;
820 900
821 } 901 }
822 902
823} /* TLan_EisaProbe */ 903}
824 904
825#ifdef CONFIG_NET_POLL_CONTROLLER 905#ifdef CONFIG_NET_POLL_CONTROLLER
826static void TLan_Poll(struct net_device *dev) 906static void tlan_poll(struct net_device *dev)
827{ 907{
828 disable_irq(dev->irq); 908 disable_irq(dev->irq);
829 TLan_HandleInterrupt(dev->irq, dev); 909 tlan_handle_interrupt(dev->irq, dev);
830 enable_irq(dev->irq); 910 enable_irq(dev->irq);
831} 911}
832#endif 912#endif
833 913
834static const struct net_device_ops TLan_netdev_ops = { 914static const struct net_device_ops tlan_netdev_ops = {
835 .ndo_open = TLan_Open, 915 .ndo_open = tlan_open,
836 .ndo_stop = TLan_Close, 916 .ndo_stop = tlan_close,
837 .ndo_start_xmit = TLan_StartTx, 917 .ndo_start_xmit = tlan_start_tx,
838 .ndo_tx_timeout = TLan_tx_timeout, 918 .ndo_tx_timeout = tlan_tx_timeout,
839 .ndo_get_stats = TLan_GetStats, 919 .ndo_get_stats = tlan_get_stats,
840 .ndo_set_multicast_list = TLan_SetMulticastList, 920 .ndo_set_multicast_list = tlan_set_multicast_list,
841 .ndo_do_ioctl = TLan_ioctl, 921 .ndo_do_ioctl = tlan_ioctl,
842 .ndo_change_mtu = eth_change_mtu, 922 .ndo_change_mtu = eth_change_mtu,
843 .ndo_set_mac_address = eth_mac_addr, 923 .ndo_set_mac_address = eth_mac_addr,
844 .ndo_validate_addr = eth_validate_addr, 924 .ndo_validate_addr = eth_validate_addr,
845#ifdef CONFIG_NET_POLL_CONTROLLER 925#ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = TLan_Poll, 926 .ndo_poll_controller = tlan_poll,
847#endif 927#endif
848}; 928};
849 929
850 930
851 931
852 /*************************************************************** 932/***************************************************************
853 * TLan_Init 933 * tlan_init
854 * 934 *
855 * Returns: 935 * Returns:
856 * 0 on success, error code otherwise. 936 * 0 on success, error code otherwise.
857 * Parms: 937 * Parms:
858 * dev The structure of the device to be 938 * dev The structure of the device to be
859 * init'ed. 939 * init'ed.
860 * 940 *
861 * This function completes the initialization of the 941 * This function completes the initialization of the
862 * device structure and driver. It reserves the IO 942 * device structure and driver. It reserves the IO
863 * addresses, allocates memory for the lists and bounce 943 * addresses, allocates memory for the lists and bounce
864 * buffers, retrieves the MAC address from the eeprom 944 * buffers, retrieves the MAC address from the eeprom
865 * and assignes the device's methods. 945 * and assignes the device's methods.
866 * 946 *
867 **************************************************************/ 947 **************************************************************/
868 948
869static int TLan_Init( struct net_device *dev ) 949static int tlan_init(struct net_device *dev)
870{ 950{
871 int dma_size; 951 int dma_size;
872 int err; 952 int err;
873 int i; 953 int i;
874 TLanPrivateInfo *priv; 954 struct tlan_priv *priv;
875 955
876 priv = netdev_priv(dev); 956 priv = netdev_priv(dev);
877 957
878 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 958 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
879 * ( sizeof(TLanList) ); 959 * (sizeof(struct tlan_list));
880 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, 960 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
881 dma_size, &priv->dmaStorageDMA); 961 dma_size,
882 priv->dmaSize = dma_size; 962 &priv->dma_storage_dma);
883 963 priv->dma_size = dma_size;
884 if ( priv->dmaStorage == NULL ) { 964
885 printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", 965 if (priv->dma_storage == NULL) {
886 dev->name ); 966 printk(KERN_ERR
967 "TLAN: Could not allocate lists and buffers for %s.\n",
968 dev->name);
887 return -ENOMEM; 969 return -ENOMEM;
888 } 970 }
889 memset( priv->dmaStorage, 0, dma_size ); 971 memset(priv->dma_storage, 0, dma_size);
890 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8); 972 priv->rx_list = (struct tlan_list *)
891 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8); 973 ALIGN((unsigned long)priv->dma_storage, 8);
892 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 974 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
893 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 975 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
976 priv->tx_list_dma =
977 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
894 978
895 err = 0; 979 err = 0;
896 for ( i = 0; i < 6 ; i++ ) 980 for (i = 0; i < 6 ; i++)
897 err |= TLan_EeReadByte( dev, 981 err |= tlan_ee_read_byte(dev,
898 (u8) priv->adapter->addrOfs + i, 982 (u8) priv->adapter->addr_ofs + i,
899 (u8 *) &dev->dev_addr[i] ); 983 (u8 *) &dev->dev_addr[i]);
900 if ( err ) { 984 if (err) {
901 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", 985 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
902 dev->name, 986 dev->name,
903 err ); 987 err);
904 } 988 }
905 dev->addr_len = 6; 989 dev->addr_len = 6;
906 990
907 netif_carrier_off(dev); 991 netif_carrier_off(dev);
908 992
909 /* Device methods */ 993 /* Device methods */
910 dev->netdev_ops = &TLan_netdev_ops; 994 dev->netdev_ops = &tlan_netdev_ops;
911 dev->watchdog_timeo = TX_TIMEOUT; 995 dev->watchdog_timeo = TX_TIMEOUT;
912 996
913 return 0; 997 return 0;
914 998
915} /* TLan_Init */ 999}
916 1000
917 1001
918 1002
919 1003
920 /*************************************************************** 1004/***************************************************************
921 * TLan_Open 1005 * tlan_open
922 * 1006 *
923 * Returns: 1007 * Returns:
924 * 0 on success, error code otherwise. 1008 * 0 on success, error code otherwise.
925 * Parms: 1009 * Parms:
926 * dev Structure of device to be opened. 1010 * dev Structure of device to be opened.
927 * 1011 *
928 * This routine puts the driver and TLAN adapter in a 1012 * This routine puts the driver and TLAN adapter in a
929 * state where it is ready to send and receive packets. 1013 * state where it is ready to send and receive packets.
930 * It allocates the IRQ, resets and brings the adapter 1014 * It allocates the IRQ, resets and brings the adapter
931 * out of reset, and allows interrupts. It also delays 1015 * out of reset, and allows interrupts. It also delays
932 * the startup for autonegotiation or sends a Rx GO 1016 * the startup for autonegotiation or sends a Rx GO
933 * command to the adapter, as appropriate. 1017 * command to the adapter, as appropriate.
934 * 1018 *
935 **************************************************************/ 1019 **************************************************************/
936 1020
937static int TLan_Open( struct net_device *dev ) 1021static int tlan_open(struct net_device *dev)
938{ 1022{
939 TLanPrivateInfo *priv = netdev_priv(dev); 1023 struct tlan_priv *priv = netdev_priv(dev);
940 int err; 1024 int err;
941 1025
942 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 1026 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
943 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, 1027 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
944 dev->name, dev ); 1028 dev->name, dev);
945 1029
946 if ( err ) { 1030 if (err) {
947 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", 1031 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
948 dev->name, dev->irq ); 1032 dev->name, dev->irq);
949 return err; 1033 return err;
950 } 1034 }
951 1035
952 init_timer(&priv->timer); 1036 init_timer(&priv->timer);
953 netif_start_queue(dev);
954 1037
955 /* NOTE: It might not be necessary to read the stats before a 1038 tlan_start(dev);
956 reset if you don't care what the values are.
957 */
958 TLan_ResetLists( dev );
959 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
960 TLan_ResetAdapter( dev );
961 1039
962 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", 1040 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
963 dev->name, priv->tlanRev ); 1041 dev->name, priv->tlan_rev);
964 1042
965 return 0; 1043 return 0;
966 1044
967} /* TLan_Open */ 1045}
968 1046
969 1047
970 1048
971 /************************************************************** 1049/**************************************************************
972 * TLan_ioctl 1050 * tlan_ioctl
973 * 1051 *
974 * Returns: 1052 * Returns:
975 * 0 on success, error code otherwise 1053 * 0 on success, error code otherwise
976 * Params: 1054 * Params:
977 * dev structure of device to receive ioctl. 1055 * dev structure of device to receive ioctl.
978 * 1056 *
979 * rq ifreq structure to hold userspace data. 1057 * rq ifreq structure to hold userspace data.
980 * 1058 *
981 * cmd ioctl command. 1059 * cmd ioctl command.
982 * 1060 *
983 * 1061 *
984 *************************************************************/ 1062 *************************************************************/
985 1063
986static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1064static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
987{ 1065{
988 TLanPrivateInfo *priv = netdev_priv(dev); 1066 struct tlan_priv *priv = netdev_priv(dev);
989 struct mii_ioctl_data *data = if_mii(rq); 1067 struct mii_ioctl_data *data = if_mii(rq);
990 u32 phy = priv->phy[priv->phyNum]; 1068 u32 phy = priv->phy[priv->phy_num];
991 1069
992 if (!priv->phyOnline) 1070 if (!priv->phy_online)
993 return -EAGAIN; 1071 return -EAGAIN;
994 1072
995 switch(cmd) { 1073 switch (cmd) {
996 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 1074 case SIOCGMIIPHY: /* get address of MII PHY in use. */
997 data->phy_id = phy; 1075 data->phy_id = phy;
998 1076
999 1077
1000 case SIOCGMIIREG: /* Read MII PHY register. */ 1078 case SIOCGMIIREG: /* read MII PHY register. */
1001 TLan_MiiReadReg(dev, data->phy_id & 0x1f, 1079 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
1002 data->reg_num & 0x1f, &data->val_out); 1080 data->reg_num & 0x1f, &data->val_out);
1003 return 0; 1081 return 0;
1004 1082
1005 1083
1006 case SIOCSMIIREG: /* Write MII PHY register. */ 1084 case SIOCSMIIREG: /* write MII PHY register. */
1007 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, 1085 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
1008 data->reg_num & 0x1f, data->val_in); 1086 data->reg_num & 0x1f, data->val_in);
1009 return 0; 1087 return 0;
1010 default: 1088 default:
1011 return -EOPNOTSUPP; 1089 return -EOPNOTSUPP;
1012 } 1090 }
1013} /* tlan_ioctl */ 1091}
1014 1092
1015 1093
1016 /*************************************************************** 1094/***************************************************************
1017 * TLan_tx_timeout 1095 * tlan_tx_timeout
1018 * 1096 *
1019 * Returns: nothing 1097 * Returns: nothing
1020 * 1098 *
1021 * Params: 1099 * Params:
1022 * dev structure of device which timed out 1100 * dev structure of device which timed out
1023 * during transmit. 1101 * during transmit.
1024 * 1102 *
1025 **************************************************************/ 1103 **************************************************************/
1026 1104
1027static void TLan_tx_timeout(struct net_device *dev) 1105static void tlan_tx_timeout(struct net_device *dev)
1028{ 1106{
1029 1107
1030 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); 1108 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1031 1109
1032 /* Ok so we timed out, lets see what we can do about it...*/ 1110 /* Ok so we timed out, lets see what we can do about it...*/
1033 TLan_FreeLists( dev ); 1111 tlan_free_lists(dev);
1034 TLan_ResetLists( dev ); 1112 tlan_reset_lists(dev);
1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 1113 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
1036 TLan_ResetAdapter( dev ); 1114 tlan_reset_adapter(dev);
1037 dev->trans_start = jiffies; /* prevent tx timeout */ 1115 dev->trans_start = jiffies; /* prevent tx timeout */
1038 netif_wake_queue( dev ); 1116 netif_wake_queue(dev);
1039 1117
1040} 1118}
1041 1119
1042 1120
1043 /*************************************************************** 1121/***************************************************************
1044 * TLan_tx_timeout_work 1122 * tlan_tx_timeout_work
1045 * 1123 *
1046 * Returns: nothing 1124 * Returns: nothing
1047 * 1125 *
1048 * Params: 1126 * Params:
1049 * work work item of device which timed out 1127 * work work item of device which timed out
1050 * 1128 *
1051 **************************************************************/ 1129 **************************************************************/
1052 1130
1053static void TLan_tx_timeout_work(struct work_struct *work) 1131static void tlan_tx_timeout_work(struct work_struct *work)
1054{ 1132{
1055 TLanPrivateInfo *priv = 1133 struct tlan_priv *priv =
1056 container_of(work, TLanPrivateInfo, tlan_tqueue); 1134 container_of(work, struct tlan_priv, tlan_tqueue);
1057 1135
1058 TLan_tx_timeout(priv->dev); 1136 tlan_tx_timeout(priv->dev);
1059} 1137}
1060 1138
1061 1139
1062 1140
1063 /*************************************************************** 1141/***************************************************************
1064 * TLan_StartTx 1142 * tlan_start_tx
1065 * 1143 *
1066 * Returns: 1144 * Returns:
1067 * 0 on success, non-zero on failure. 1145 * 0 on success, non-zero on failure.
1068 * Parms: 1146 * Parms:
1069 * skb A pointer to the sk_buff containing the 1147 * skb A pointer to the sk_buff containing the
1070 * frame to be sent. 1148 * frame to be sent.
1071 * dev The device to send the data on. 1149 * dev The device to send the data on.
1072 * 1150 *
1073 * This function adds a frame to the Tx list to be sent 1151 * This function adds a frame to the Tx list to be sent
1074 * ASAP. First it verifies that the adapter is ready and 1152 * ASAP. First it verifies that the adapter is ready and
1075 * there is room in the queue. Then it sets up the next 1153 * there is room in the queue. Then it sets up the next
1076 * available list, copies the frame to the corresponding 1154 * available list, copies the frame to the corresponding
1077 * buffer. If the adapter Tx channel is idle, it gives 1155 * buffer. If the adapter Tx channel is idle, it gives
1078 * the adapter a Tx Go command on the list, otherwise it 1156 * the adapter a Tx Go command on the list, otherwise it
1079 * sets the forward address of the previous list to point 1157 * sets the forward address of the previous list to point
1080 * to this one. Then it frees the sk_buff. 1158 * to this one. Then it frees the sk_buff.
1081 * 1159 *
1082 **************************************************************/ 1160 **************************************************************/
1083 1161
1084static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1162static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1085{ 1163{
1086 TLanPrivateInfo *priv = netdev_priv(dev); 1164 struct tlan_priv *priv = netdev_priv(dev);
1087 dma_addr_t tail_list_phys; 1165 dma_addr_t tail_list_phys;
1088 TLanList *tail_list; 1166 struct tlan_list *tail_list;
1089 unsigned long flags; 1167 unsigned long flags;
1090 unsigned int txlen; 1168 unsigned int txlen;
1091 1169
1092 if ( ! priv->phyOnline ) { 1170 if (!priv->phy_online) {
1093 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1171 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1094 dev->name ); 1172 dev->name);
1095 dev_kfree_skb_any(skb); 1173 dev_kfree_skb_any(skb);
1096 return NETDEV_TX_OK; 1174 return NETDEV_TX_OK;
1097 } 1175 }
@@ -1100,218 +1178,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1100 return NETDEV_TX_OK; 1178 return NETDEV_TX_OK;
1101 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); 1179 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1102 1180
1103 tail_list = priv->txList + priv->txTail; 1181 tail_list = priv->tx_list + priv->tx_tail;
1104 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1182 tail_list_phys =
1183 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1105 1184
1106 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1185 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1107 TLAN_DBG( TLAN_DEBUG_TX, 1186 TLAN_DBG(TLAN_DEBUG_TX,
1108 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", 1187 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1109 dev->name, priv->txHead, priv->txTail ); 1188 dev->name, priv->tx_head, priv->tx_tail);
1110 netif_stop_queue(dev); 1189 netif_stop_queue(dev);
1111 priv->txBusyCount++; 1190 priv->tx_busy_count++;
1112 return NETDEV_TX_BUSY; 1191 return NETDEV_TX_BUSY;
1113 } 1192 }
1114 1193
1115 tail_list->forward = 0; 1194 tail_list->forward = 0;
1116 1195
1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1196 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1118 skb->data, txlen, 1197 skb->data, txlen,
1119 PCI_DMA_TODEVICE); 1198 PCI_DMA_TODEVICE);
1120 TLan_StoreSKB(tail_list, skb); 1199 tlan_store_skb(tail_list, skb);
1121 1200
1122 tail_list->frameSize = (u16) txlen; 1201 tail_list->frame_size = (u16) txlen;
1123 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; 1202 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1124 tail_list->buffer[1].count = 0; 1203 tail_list->buffer[1].count = 0;
1125 tail_list->buffer[1].address = 0; 1204 tail_list->buffer[1].address = 0;
1126 1205
1127 spin_lock_irqsave(&priv->lock, flags); 1206 spin_lock_irqsave(&priv->lock, flags);
1128 tail_list->cStat = TLAN_CSTAT_READY; 1207 tail_list->c_stat = TLAN_CSTAT_READY;
1129 if ( ! priv->txInProgress ) { 1208 if (!priv->tx_in_progress) {
1130 priv->txInProgress = 1; 1209 priv->tx_in_progress = 1;
1131 TLAN_DBG( TLAN_DEBUG_TX, 1210 TLAN_DBG(TLAN_DEBUG_TX,
1132 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1211 "TRANSMIT: Starting TX on buffer %d\n",
1133 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1212 priv->tx_tail);
1134 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1213 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1214 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1135 } else { 1215 } else {
1136 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", 1216 TLAN_DBG(TLAN_DEBUG_TX,
1137 priv->txTail ); 1217 "TRANSMIT: Adding buffer %d to TX channel\n",
1138 if ( priv->txTail == 0 ) { 1218 priv->tx_tail);
1139 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward 1219 if (priv->tx_tail == 0) {
1220 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1140 = tail_list_phys; 1221 = tail_list_phys;
1141 } else { 1222 } else {
1142 ( priv->txList + ( priv->txTail - 1 ) )->forward 1223 (priv->tx_list + (priv->tx_tail - 1))->forward
1143 = tail_list_phys; 1224 = tail_list_phys;
1144 } 1225 }
1145 } 1226 }
1146 spin_unlock_irqrestore(&priv->lock, flags); 1227 spin_unlock_irqrestore(&priv->lock, flags);
1147 1228
1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1229 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1149 1230
1150 return NETDEV_TX_OK; 1231 return NETDEV_TX_OK;
1151 1232
1152} /* TLan_StartTx */ 1233}
1153 1234
1154 1235
1155 1236
1156 1237
1157 /*************************************************************** 1238/***************************************************************
1158 * TLan_HandleInterrupt 1239 * tlan_handle_interrupt
1159 * 1240 *
1160 * Returns: 1241 * Returns:
1161 * Nothing 1242 * Nothing
1162 * Parms: 1243 * Parms:
1163 * irq The line on which the interrupt 1244 * irq The line on which the interrupt
1164 * occurred. 1245 * occurred.
1165 * dev_id A pointer to the device assigned to 1246 * dev_id A pointer to the device assigned to
1166 * this irq line. 1247 * this irq line.
1167 * 1248 *
1168 * This function handles an interrupt generated by its 1249 * This function handles an interrupt generated by its
1169 * assigned TLAN adapter. The function deactivates 1250 * assigned TLAN adapter. The function deactivates
1170 * interrupts on its adapter, records the type of 1251 * interrupts on its adapter, records the type of
1171 * interrupt, executes the appropriate subhandler, and 1252 * interrupt, executes the appropriate subhandler, and
1172 * acknowdges the interrupt to the adapter (thus 1253 * acknowdges the interrupt to the adapter (thus
1173 * re-enabling adapter interrupts. 1254 * re-enabling adapter interrupts.
1174 * 1255 *
1175 **************************************************************/ 1256 **************************************************************/
1176 1257
1177static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1258static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1178{ 1259{
1179 struct net_device *dev = dev_id; 1260 struct net_device *dev = dev_id;
1180 TLanPrivateInfo *priv = netdev_priv(dev); 1261 struct tlan_priv *priv = netdev_priv(dev);
1181 u16 host_int; 1262 u16 host_int;
1182 u16 type; 1263 u16 type;
1183 1264
1184 spin_lock(&priv->lock); 1265 spin_lock(&priv->lock);
1185 1266
1186 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1267 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1187 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1268 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1188 if ( type ) { 1269 if (type) {
1189 u32 ack; 1270 u32 ack;
1190 u32 host_cmd; 1271 u32 host_cmd;
1191 1272
1192 outw( host_int, dev->base_addr + TLAN_HOST_INT ); 1273 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1193 ack = TLanIntVector[type]( dev, host_int ); 1274 ack = tlan_int_vector[type](dev, host_int);
1194 1275
1195 if ( ack ) { 1276 if (ack) {
1196 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1277 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1197 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1278 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1198 } 1279 }
1199 } 1280 }
1200 1281
1201 spin_unlock(&priv->lock); 1282 spin_unlock(&priv->lock);
1202 1283
1203 return IRQ_RETVAL(type); 1284 return IRQ_RETVAL(type);
1204} /* TLan_HandleInterrupts */ 1285}
1205 1286
1206 1287
1207 1288
1208 1289
1209 /*************************************************************** 1290/***************************************************************
1210 * TLan_Close 1291 * tlan_close
1211 * 1292 *
1212 * Returns: 1293 * Returns:
1213 * An error code. 1294 * An error code.
1214 * Parms: 1295 * Parms:
1215 * dev The device structure of the device to 1296 * dev The device structure of the device to
1216 * close. 1297 * close.
1217 * 1298 *
1218 * This function shuts down the adapter. It records any 1299 * This function shuts down the adapter. It records any
1219 * stats, puts the adapter into reset state, deactivates 1300 * stats, puts the adapter into reset state, deactivates
1220 * its time as needed, and frees the irq it is using. 1301 * its time as needed, and frees the irq it is using.
1221 * 1302 *
1222 **************************************************************/ 1303 **************************************************************/
1223 1304
1224static int TLan_Close(struct net_device *dev) 1305static int tlan_close(struct net_device *dev)
1225{ 1306{
1226 TLanPrivateInfo *priv = netdev_priv(dev); 1307 struct tlan_priv *priv = netdev_priv(dev);
1227 1308
1228 netif_stop_queue(dev);
1229 priv->neg_be_verbose = 0; 1309 priv->neg_be_verbose = 0;
1310 tlan_stop(dev);
1230 1311
1231 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1312 free_irq(dev->irq, dev);
1232 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1313 tlan_free_lists(dev);
1233 if ( priv->timer.function != NULL ) { 1314 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1234 del_timer_sync( &priv->timer );
1235 priv->timer.function = NULL;
1236 }
1237
1238 free_irq( dev->irq, dev );
1239 TLan_FreeLists( dev );
1240 TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
1241 1315
1242 return 0; 1316 return 0;
1243 1317
1244} /* TLan_Close */ 1318}
1245 1319
1246 1320
1247 1321
1248 1322
1249 /*************************************************************** 1323/***************************************************************
1250 * TLan_GetStats 1324 * tlan_get_stats
1251 * 1325 *
1252 * Returns: 1326 * Returns:
1253 * A pointer to the device's statistics structure. 1327 * A pointer to the device's statistics structure.
1254 * Parms: 1328 * Parms:
1255 * dev The device structure to return the 1329 * dev The device structure to return the
1256 * stats for. 1330 * stats for.
1257 * 1331 *
1258 * This function updates the devices statistics by reading 1332 * This function updates the devices statistics by reading
1259 * the TLAN chip's onboard registers. Then it returns the 1333 * the TLAN chip's onboard registers. Then it returns the
1260 * address of the statistics structure. 1334 * address of the statistics structure.
1261 * 1335 *
1262 **************************************************************/ 1336 **************************************************************/
1263 1337
1264static struct net_device_stats *TLan_GetStats( struct net_device *dev ) 1338static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1265{ 1339{
1266 TLanPrivateInfo *priv = netdev_priv(dev); 1340 struct tlan_priv *priv = netdev_priv(dev);
1267 int i; 1341 int i;
1268 1342
1269 /* Should only read stats if open ? */ 1343 /* Should only read stats if open ? */
1270 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1344 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1271 1345
1272 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, 1346 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1273 priv->rxEocCount ); 1347 priv->rx_eoc_count);
1274 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, 1348 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1275 priv->txBusyCount ); 1349 priv->tx_busy_count);
1276 if ( debug & TLAN_DEBUG_GNRL ) { 1350 if (debug & TLAN_DEBUG_GNRL) {
1277 TLan_PrintDio( dev->base_addr ); 1351 tlan_print_dio(dev->base_addr);
1278 TLan_PhyPrint( dev ); 1352 tlan_phy_print(dev);
1279 } 1353 }
1280 if ( debug & TLAN_DEBUG_LIST ) { 1354 if (debug & TLAN_DEBUG_LIST) {
1281 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) 1355 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1282 TLan_PrintList( priv->rxList + i, "RX", i ); 1356 tlan_print_list(priv->rx_list + i, "RX", i);
1283 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) 1357 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1284 TLan_PrintList( priv->txList + i, "TX", i ); 1358 tlan_print_list(priv->tx_list + i, "TX", i);
1285 } 1359 }
1286 1360
1287 return &dev->stats; 1361 return &dev->stats;
1288 1362
1289} /* TLan_GetStats */ 1363}
1290 1364
1291 1365
1292 1366
1293 1367
1294 /*************************************************************** 1368/***************************************************************
1295 * TLan_SetMulticastList 1369 * tlan_set_multicast_list
1296 * 1370 *
1297 * Returns: 1371 * Returns:
1298 * Nothing 1372 * Nothing
1299 * Parms: 1373 * Parms:
1300 * dev The device structure to set the 1374 * dev The device structure to set the
1301 * multicast list for. 1375 * multicast list for.
1302 * 1376 *
1303 * This function sets the TLAN adaptor to various receive 1377 * This function sets the TLAN adaptor to various receive
1304 * modes. If the IFF_PROMISC flag is set, promiscuous 1378 * modes. If the IFF_PROMISC flag is set, promiscuous
1305 * mode is acitviated. Otherwise, promiscuous mode is 1379 * mode is acitviated. Otherwise, promiscuous mode is
1306 * turned off. If the IFF_ALLMULTI flag is set, then 1380 * turned off. If the IFF_ALLMULTI flag is set, then
1307 * the hash table is set to receive all group addresses. 1381 * the hash table is set to receive all group addresses.
1308 * Otherwise, the first three multicast addresses are 1382 * Otherwise, the first three multicast addresses are
1309 * stored in AREG_1-3, and the rest are selected via the 1383 * stored in AREG_1-3, and the rest are selected via the
1310 * hash table, as necessary. 1384 * hash table, as necessary.
1311 * 1385 *
1312 **************************************************************/ 1386 **************************************************************/
1313 1387
1314static void TLan_SetMulticastList( struct net_device *dev ) 1388static void tlan_set_multicast_list(struct net_device *dev)
1315{ 1389{
1316 struct netdev_hw_addr *ha; 1390 struct netdev_hw_addr *ha;
1317 u32 hash1 = 0; 1391 u32 hash1 = 0;
@@ -1320,53 +1394,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
1320 u32 offset; 1394 u32 offset;
1321 u8 tmp; 1395 u8 tmp;
1322 1396
1323 if ( dev->flags & IFF_PROMISC ) { 1397 if (dev->flags & IFF_PROMISC) {
1324 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1398 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1325 TLan_DioWrite8( dev->base_addr, 1399 tlan_dio_write8(dev->base_addr,
1326 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1400 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1327 } else { 1401 } else {
1328 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1402 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1329 TLan_DioWrite8( dev->base_addr, 1403 tlan_dio_write8(dev->base_addr,
1330 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1404 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1331 if ( dev->flags & IFF_ALLMULTI ) { 1405 if (dev->flags & IFF_ALLMULTI) {
1332 for ( i = 0; i < 3; i++ ) 1406 for (i = 0; i < 3; i++)
1333 TLan_SetMac( dev, i + 1, NULL ); 1407 tlan_set_mac(dev, i + 1, NULL);
1334 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); 1408 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1409 0xffffffff);
1410 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1411 0xffffffff);
1336 } else { 1412 } else {
1337 i = 0; 1413 i = 0;
1338 netdev_for_each_mc_addr(ha, dev) { 1414 netdev_for_each_mc_addr(ha, dev) {
1339 if ( i < 3 ) { 1415 if (i < 3) {
1340 TLan_SetMac( dev, i + 1, 1416 tlan_set_mac(dev, i + 1,
1341 (char *) &ha->addr); 1417 (char *) &ha->addr);
1342 } else { 1418 } else {
1343 offset = TLan_HashFunc((u8 *)&ha->addr); 1419 offset =
1344 if ( offset < 32 ) 1420 tlan_hash_func((u8 *)&ha->addr);
1345 hash1 |= ( 1 << offset ); 1421 if (offset < 32)
1422 hash1 |= (1 << offset);
1346 else 1423 else
1347 hash2 |= ( 1 << ( offset - 32 ) ); 1424 hash2 |= (1 << (offset - 32));
1348 } 1425 }
1349 i++; 1426 i++;
1350 } 1427 }
1351 for ( ; i < 3; i++ ) 1428 for ( ; i < 3; i++)
1352 TLan_SetMac( dev, i + 1, NULL ); 1429 tlan_set_mac(dev, i + 1, NULL);
1353 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 ); 1430 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1354 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 ); 1431 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1355 } 1432 }
1356 } 1433 }
1357 1434
1358} /* TLan_SetMulticastList */ 1435}
1359 1436
1360 1437
1361 1438
1362/***************************************************************************** 1439/*****************************************************************************
1363****************************************************************************** 1440******************************************************************************
1364 1441
1365 ThunderLAN Driver Interrupt Vectors and Table 1442ThunderLAN driver interrupt vectors and table
1366 1443
1367 Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN 1444please see chap. 4, "Interrupt Handling" of the "ThunderLAN
1368 Programmer's Guide" for more informations on handling interrupts 1445Programmer's Guide" for more informations on handling interrupts
1369 generated by TLAN based adapters. 1446generated by TLAN based adapters.
1370 1447
1371****************************************************************************** 1448******************************************************************************
1372*****************************************************************************/ 1449*****************************************************************************/
@@ -1374,46 +1451,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
1374 1451
1375 1452
1376 1453
1377 /*************************************************************** 1454/***************************************************************
1378 * TLan_HandleTxEOF 1455 * tlan_handle_tx_eof
1379 * 1456 *
1380 * Returns: 1457 * Returns:
1381 * 1 1458 * 1
1382 * Parms: 1459 * Parms:
1383 * dev Device assigned the IRQ that was 1460 * dev Device assigned the IRQ that was
1384 * raised. 1461 * raised.
1385 * host_int The contents of the HOST_INT 1462 * host_int The contents of the HOST_INT
1386 * port. 1463 * port.
1387 * 1464 *
1388 * This function handles Tx EOF interrupts which are raised 1465 * This function handles Tx EOF interrupts which are raised
1389 * by the adapter when it has completed sending the 1466 * by the adapter when it has completed sending the
1390 * contents of a buffer. If detemines which list/buffer 1467 * contents of a buffer. If detemines which list/buffer
1391 * was completed and resets it. If the buffer was the last 1468 * was completed and resets it. If the buffer was the last
1392 * in the channel (EOC), then the function checks to see if 1469 * in the channel (EOC), then the function checks to see if
1393 * another buffer is ready to send, and if so, sends a Tx 1470 * another buffer is ready to send, and if so, sends a Tx
1394 * Go command. Finally, the driver activates/continues the 1471 * Go command. Finally, the driver activates/continues the
1395 * activity LED. 1472 * activity LED.
1396 * 1473 *
1397 **************************************************************/ 1474 **************************************************************/
1398 1475
1399static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) 1476static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1400{ 1477{
1401 TLanPrivateInfo *priv = netdev_priv(dev); 1478 struct tlan_priv *priv = netdev_priv(dev);
1402 int eoc = 0; 1479 int eoc = 0;
1403 TLanList *head_list; 1480 struct tlan_list *head_list;
1404 dma_addr_t head_list_phys; 1481 dma_addr_t head_list_phys;
1405 u32 ack = 0; 1482 u32 ack = 0;
1406 u16 tmpCStat; 1483 u16 tmp_c_stat;
1407 1484
1408 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", 1485 TLAN_DBG(TLAN_DEBUG_TX,
1409 priv->txHead, priv->txTail ); 1486 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1410 head_list = priv->txList + priv->txHead; 1487 priv->tx_head, priv->tx_tail);
1488 head_list = priv->tx_list + priv->tx_head;
1411 1489
1412 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1490 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1413 struct sk_buff *skb = TLan_GetSKB(head_list); 1491 && (ack < 255)) {
1492 struct sk_buff *skb = tlan_get_skb(head_list);
1414 1493
1415 ack++; 1494 ack++;
1416 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1495 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1417 max(skb->len, 1496 max(skb->len,
1418 (unsigned int)TLAN_MIN_FRAME_SIZE), 1497 (unsigned int)TLAN_MIN_FRAME_SIZE),
1419 PCI_DMA_TODEVICE); 1498 PCI_DMA_TODEVICE);
@@ -1421,304 +1500,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1421 head_list->buffer[8].address = 0; 1500 head_list->buffer[8].address = 0;
1422 head_list->buffer[9].address = 0; 1501 head_list->buffer[9].address = 0;
1423 1502
1424 if ( tmpCStat & TLAN_CSTAT_EOC ) 1503 if (tmp_c_stat & TLAN_CSTAT_EOC)
1425 eoc = 1; 1504 eoc = 1;
1426 1505
1427 dev->stats.tx_bytes += head_list->frameSize; 1506 dev->stats.tx_bytes += head_list->frame_size;
1428 1507
1429 head_list->cStat = TLAN_CSTAT_UNUSED; 1508 head_list->c_stat = TLAN_CSTAT_UNUSED;
1430 netif_start_queue(dev); 1509 netif_start_queue(dev);
1431 CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS ); 1510 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1432 head_list = priv->txList + priv->txHead; 1511 head_list = priv->tx_list + priv->tx_head;
1433 } 1512 }
1434 1513
1435 if (!ack) 1514 if (!ack)
1436 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1515 printk(KERN_INFO
1437 1516 "TLAN: Received interrupt for uncompleted TX frame.\n");
1438 if ( eoc ) { 1517
1439 TLAN_DBG( TLAN_DEBUG_TX, 1518 if (eoc) {
1440 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", 1519 TLAN_DBG(TLAN_DEBUG_TX,
1441 priv->txHead, priv->txTail ); 1520 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1442 head_list = priv->txList + priv->txHead; 1521 priv->tx_head, priv->tx_tail);
1443 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1522 head_list = priv->tx_list + priv->tx_head;
1444 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1523 head_list_phys = priv->tx_list_dma
1445 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1524 + sizeof(struct tlan_list)*priv->tx_head;
1525 if ((head_list->c_stat & TLAN_CSTAT_READY)
1526 == TLAN_CSTAT_READY) {
1527 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1446 ack |= TLAN_HC_GO; 1528 ack |= TLAN_HC_GO;
1447 } else { 1529 } else {
1448 priv->txInProgress = 0; 1530 priv->tx_in_progress = 0;
1449 } 1531 }
1450 } 1532 }
1451 1533
1452 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1534 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1453 TLan_DioWrite8( dev->base_addr, 1535 tlan_dio_write8(dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1536 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1455 if ( priv->timer.function == NULL ) { 1537 if (priv->timer.function == NULL) {
1456 priv->timer.function = TLan_Timer; 1538 priv->timer.function = tlan_timer;
1457 priv->timer.data = (unsigned long) dev; 1539 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1540 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies; 1541 priv->timer_set_at = jiffies;
1460 priv->timerType = TLAN_TIMER_ACTIVITY; 1542 priv->timer_type = TLAN_TIMER_ACTIVITY;
1461 add_timer(&priv->timer); 1543 add_timer(&priv->timer);
1462 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1544 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1463 priv->timerSetAt = jiffies; 1545 priv->timer_set_at = jiffies;
1464 } 1546 }
1465 } 1547 }
1466 1548
1467 return ack; 1549 return ack;
1468 1550
1469} /* TLan_HandleTxEOF */ 1551}
1470 1552
1471 1553
1472 1554
1473 1555
1474 /*************************************************************** 1556/***************************************************************
1475 * TLan_HandleStatOverflow 1557 * TLan_HandleStatOverflow
1476 * 1558 *
1477 * Returns: 1559 * Returns:
1478 * 1 1560 * 1
1479 * Parms: 1561 * Parms:
1480 * dev Device assigned the IRQ that was 1562 * dev Device assigned the IRQ that was
1481 * raised. 1563 * raised.
1482 * host_int The contents of the HOST_INT 1564 * host_int The contents of the HOST_INT
1483 * port. 1565 * port.
1484 * 1566 *
1485 * This function handles the Statistics Overflow interrupt 1567 * This function handles the Statistics Overflow interrupt
1486 * which means that one or more of the TLAN statistics 1568 * which means that one or more of the TLAN statistics
1487 * registers has reached 1/2 capacity and needs to be read. 1569 * registers has reached 1/2 capacity and needs to be read.
1488 * 1570 *
1489 **************************************************************/ 1571 **************************************************************/
1490 1572
1491static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) 1573static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1492{ 1574{
1493 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1575 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1494 1576
1495 return 1; 1577 return 1;
1496 1578
1497} /* TLan_HandleStatOverflow */ 1579}
1498 1580
1499 1581
1500 1582
1501 1583
1502 /*************************************************************** 1584/***************************************************************
1503 * TLan_HandleRxEOF 1585 * TLan_HandleRxEOF
1504 * 1586 *
1505 * Returns: 1587 * Returns:
1506 * 1 1588 * 1
1507 * Parms: 1589 * Parms:
1508 * dev Device assigned the IRQ that was 1590 * dev Device assigned the IRQ that was
1509 * raised. 1591 * raised.
1510 * host_int The contents of the HOST_INT 1592 * host_int The contents of the HOST_INT
1511 * port. 1593 * port.
1512 * 1594 *
1513 * This function handles the Rx EOF interrupt which 1595 * This function handles the Rx EOF interrupt which
1514 * indicates a frame has been received by the adapter from 1596 * indicates a frame has been received by the adapter from
1515 * the net and the frame has been transferred to memory. 1597 * the net and the frame has been transferred to memory.
1516 * The function determines the bounce buffer the frame has 1598 * The function determines the bounce buffer the frame has
1517 * been loaded into, creates a new sk_buff big enough to 1599 * been loaded into, creates a new sk_buff big enough to
1518 * hold the frame, and sends it to protocol stack. It 1600 * hold the frame, and sends it to protocol stack. It
1519 * then resets the used buffer and appends it to the end 1601 * then resets the used buffer and appends it to the end
1520 * of the list. If the frame was the last in the Rx 1602 * of the list. If the frame was the last in the Rx
1521 * channel (EOC), the function restarts the receive channel 1603 * channel (EOC), the function restarts the receive channel
1522 * by sending an Rx Go command to the adapter. Then it 1604 * by sending an Rx Go command to the adapter. Then it
1523 * activates/continues the activity LED. 1605 * activates/continues the activity LED.
1524 * 1606 *
1525 **************************************************************/ 1607 **************************************************************/
1526 1608
1527static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) 1609static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1528{ 1610{
1529 TLanPrivateInfo *priv = netdev_priv(dev); 1611 struct tlan_priv *priv = netdev_priv(dev);
1530 u32 ack = 0; 1612 u32 ack = 0;
1531 int eoc = 0; 1613 int eoc = 0;
1532 TLanList *head_list; 1614 struct tlan_list *head_list;
1533 struct sk_buff *skb; 1615 struct sk_buff *skb;
1534 TLanList *tail_list; 1616 struct tlan_list *tail_list;
1535 u16 tmpCStat; 1617 u16 tmp_c_stat;
1536 dma_addr_t head_list_phys; 1618 dma_addr_t head_list_phys;
1537 1619
1538 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", 1620 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1539 priv->rxHead, priv->rxTail ); 1621 priv->rx_head, priv->rx_tail);
1540 head_list = priv->rxList + priv->rxHead; 1622 head_list = priv->rx_list + priv->rx_head;
1541 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1623 head_list_phys =
1624 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1542 1625
1543 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1626 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1544 dma_addr_t frameDma = head_list->buffer[0].address; 1627 && (ack < 255)) {
1545 u32 frameSize = head_list->frameSize; 1628 dma_addr_t frame_dma = head_list->buffer[0].address;
1629 u32 frame_size = head_list->frame_size;
1546 struct sk_buff *new_skb; 1630 struct sk_buff *new_skb;
1547 1631
1548 ack++; 1632 ack++;
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1633 if (tmp_c_stat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1634 eoc = 1;
1551 1635
1552 new_skb = netdev_alloc_skb_ip_align(dev, 1636 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5); 1637 TLAN_MAX_FRAME_SIZE + 5);
1554 if ( !new_skb ) 1638 if (!new_skb)
1555 goto drop_and_reuse; 1639 goto drop_and_reuse;
1556 1640
1557 skb = TLan_GetSKB(head_list); 1641 skb = tlan_get_skb(head_list);
1558 pci_unmap_single(priv->pciDev, frameDma, 1642 pci_unmap_single(priv->pci_dev, frame_dma,
1559 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1643 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1560 skb_put( skb, frameSize ); 1644 skb_put(skb, frame_size);
1561 1645
1562 dev->stats.rx_bytes += frameSize; 1646 dev->stats.rx_bytes += frame_size;
1563 1647
1564 skb->protocol = eth_type_trans( skb, dev ); 1648 skb->protocol = eth_type_trans(skb, dev);
1565 netif_rx( skb ); 1649 netif_rx(skb);
1566 1650
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1651 head_list->buffer[0].address =
1568 new_skb->data, 1652 pci_map_single(priv->pci_dev, new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1653 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1570 PCI_DMA_FROMDEVICE);
1571 1654
1572 TLan_StoreSKB(head_list, new_skb); 1655 tlan_store_skb(head_list, new_skb);
1573drop_and_reuse: 1656drop_and_reuse:
1574 head_list->forward = 0; 1657 head_list->forward = 0;
1575 head_list->cStat = 0; 1658 head_list->c_stat = 0;
1576 tail_list = priv->rxList + priv->rxTail; 1659 tail_list = priv->rx_list + priv->rx_tail;
1577 tail_list->forward = head_list_phys; 1660 tail_list->forward = head_list_phys;
1578 1661
1579 CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); 1662 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1580 CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); 1663 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1581 head_list = priv->rxList + priv->rxHead; 1664 head_list = priv->rx_list + priv->rx_head;
1582 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1665 head_list_phys = priv->rx_list_dma
1666 + sizeof(struct tlan_list)*priv->rx_head;
1583 } 1667 }
1584 1668
1585 if (!ack) 1669 if (!ack)
1586 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1670 printk(KERN_INFO
1587 1671 "TLAN: Received interrupt for uncompleted RX frame.\n");
1588 1672
1589 if ( eoc ) { 1673
1590 TLAN_DBG( TLAN_DEBUG_RX, 1674 if (eoc) {
1591 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", 1675 TLAN_DBG(TLAN_DEBUG_RX,
1592 priv->rxHead, priv->rxTail ); 1676 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1593 head_list = priv->rxList + priv->rxHead; 1677 priv->rx_head, priv->rx_tail);
1594 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1678 head_list = priv->rx_list + priv->rx_head;
1595 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1679 head_list_phys = priv->rx_list_dma
1680 + sizeof(struct tlan_list)*priv->rx_head;
1681 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1596 ack |= TLAN_HC_GO | TLAN_HC_RT; 1682 ack |= TLAN_HC_GO | TLAN_HC_RT;
1597 priv->rxEocCount++; 1683 priv->rx_eoc_count++;
1598 } 1684 }
1599 1685
1600 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1686 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1601 TLan_DioWrite8( dev->base_addr, 1687 tlan_dio_write8(dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1688 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1603 if ( priv->timer.function == NULL ) { 1689 if (priv->timer.function == NULL) {
1604 priv->timer.function = TLan_Timer; 1690 priv->timer.function = tlan_timer;
1605 priv->timer.data = (unsigned long) dev; 1691 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1692 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies; 1693 priv->timer_set_at = jiffies;
1608 priv->timerType = TLAN_TIMER_ACTIVITY; 1694 priv->timer_type = TLAN_TIMER_ACTIVITY;
1609 add_timer(&priv->timer); 1695 add_timer(&priv->timer);
1610 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1696 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1611 priv->timerSetAt = jiffies; 1697 priv->timer_set_at = jiffies;
1612 } 1698 }
1613 } 1699 }
1614 1700
1615 return ack; 1701 return ack;
1616 1702
1617} /* TLan_HandleRxEOF */ 1703}
1618 1704
1619 1705
1620 1706
1621 1707
1622 /*************************************************************** 1708/***************************************************************
1623 * TLan_HandleDummy 1709 * tlan_handle_dummy
1624 * 1710 *
1625 * Returns: 1711 * Returns:
1626 * 1 1712 * 1
1627 * Parms: 1713 * Parms:
1628 * dev Device assigned the IRQ that was 1714 * dev Device assigned the IRQ that was
1629 * raised. 1715 * raised.
1630 * host_int The contents of the HOST_INT 1716 * host_int The contents of the HOST_INT
1631 * port. 1717 * port.
1632 * 1718 *
1633 * This function handles the Dummy interrupt, which is 1719 * This function handles the Dummy interrupt, which is
1634 * raised whenever a test interrupt is generated by setting 1720 * raised whenever a test interrupt is generated by setting
1635 * the Req_Int bit of HOST_CMD to 1. 1721 * the Req_Int bit of HOST_CMD to 1.
1636 * 1722 *
1637 **************************************************************/ 1723 **************************************************************/
1638 1724
1639static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) 1725static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1640{ 1726{
1641 printk( "TLAN: Test interrupt on %s.\n", dev->name ); 1727 pr_info("TLAN: Test interrupt on %s.\n", dev->name);
1642 return 1; 1728 return 1;
1643 1729
1644} /* TLan_HandleDummy */ 1730}
1645 1731
1646 1732
1647 1733
1648 1734
1649 /*************************************************************** 1735/***************************************************************
1650 * TLan_HandleTxEOC 1736 * tlan_handle_tx_eoc
1651 * 1737 *
1652 * Returns: 1738 * Returns:
1653 * 1 1739 * 1
1654 * Parms: 1740 * Parms:
1655 * dev Device assigned the IRQ that was 1741 * dev Device assigned the IRQ that was
1656 * raised. 1742 * raised.
1657 * host_int The contents of the HOST_INT 1743 * host_int The contents of the HOST_INT
1658 * port. 1744 * port.
1659 * 1745 *
1660 * This driver is structured to determine EOC occurrences by 1746 * This driver is structured to determine EOC occurrences by
1661 * reading the CSTAT member of the list structure. Tx EOC 1747 * reading the CSTAT member of the list structure. Tx EOC
1662 * interrupts are disabled via the DIO INTDIS register. 1748 * interrupts are disabled via the DIO INTDIS register.
1663 * However, TLAN chips before revision 3.0 didn't have this 1749 * However, TLAN chips before revision 3.0 didn't have this
1664 * functionality, so process EOC events if this is the 1750 * functionality, so process EOC events if this is the
1665 * case. 1751 * case.
1666 * 1752 *
1667 **************************************************************/ 1753 **************************************************************/
1668 1754
1669static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) 1755static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1670{ 1756{
1671 TLanPrivateInfo *priv = netdev_priv(dev); 1757 struct tlan_priv *priv = netdev_priv(dev);
1672 TLanList *head_list; 1758 struct tlan_list *head_list;
1673 dma_addr_t head_list_phys; 1759 dma_addr_t head_list_phys;
1674 u32 ack = 1; 1760 u32 ack = 1;
1675 1761
1676 host_int = 0; 1762 host_int = 0;
1677 if ( priv->tlanRev < 0x30 ) { 1763 if (priv->tlan_rev < 0x30) {
1678 TLAN_DBG( TLAN_DEBUG_TX, 1764 TLAN_DBG(TLAN_DEBUG_TX,
1679 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", 1765 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1680 priv->txHead, priv->txTail ); 1766 priv->tx_head, priv->tx_tail);
1681 head_list = priv->txList + priv->txHead; 1767 head_list = priv->tx_list + priv->tx_head;
1682 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1768 head_list_phys = priv->tx_list_dma
1683 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1769 + sizeof(struct tlan_list)*priv->tx_head;
1770 if ((head_list->c_stat & TLAN_CSTAT_READY)
1771 == TLAN_CSTAT_READY) {
1684 netif_stop_queue(dev); 1772 netif_stop_queue(dev);
1685 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1773 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1686 ack |= TLAN_HC_GO; 1774 ack |= TLAN_HC_GO;
1687 } else { 1775 } else {
1688 priv->txInProgress = 0; 1776 priv->tx_in_progress = 0;
1689 } 1777 }
1690 } 1778 }
1691 1779
1692 return ack; 1780 return ack;
1693 1781
1694} /* TLan_HandleTxEOC */ 1782}
1695 1783
1696 1784
1697 1785
1698 1786
1699 /*************************************************************** 1787/***************************************************************
1700 * TLan_HandleStatusCheck 1788 * tlan_handle_status_check
1701 * 1789 *
1702 * Returns: 1790 * Returns:
1703 * 0 if Adapter check, 1 if Network Status check. 1791 * 0 if Adapter check, 1 if Network Status check.
1704 * Parms: 1792 * Parms:
1705 * dev Device assigned the IRQ that was 1793 * dev Device assigned the IRQ that was
1706 * raised. 1794 * raised.
1707 * host_int The contents of the HOST_INT 1795 * host_int The contents of the HOST_INT
1708 * port. 1796 * port.
1709 * 1797 *
1710 * This function handles Adapter Check/Network Status 1798 * This function handles Adapter Check/Network Status
1711 * interrupts generated by the adapter. It checks the 1799 * interrupts generated by the adapter. It checks the
1712 * vector in the HOST_INT register to determine if it is 1800 * vector in the HOST_INT register to determine if it is
1713 * an Adapter Check interrupt. If so, it resets the 1801 * an Adapter Check interrupt. If so, it resets the
1714 * adapter. Otherwise it clears the status registers 1802 * adapter. Otherwise it clears the status registers
1715 * and services the PHY. 1803 * and services the PHY.
1716 * 1804 *
1717 **************************************************************/ 1805 **************************************************************/
1718 1806
1719static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) 1807static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1720{ 1808{
1721 TLanPrivateInfo *priv = netdev_priv(dev); 1809 struct tlan_priv *priv = netdev_priv(dev);
1722 u32 ack; 1810 u32 ack;
1723 u32 error; 1811 u32 error;
1724 u8 net_sts; 1812 u8 net_sts;
@@ -1727,92 +1815,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1727 u16 tlphy_sts; 1815 u16 tlphy_sts;
1728 1816
1729 ack = 1; 1817 ack = 1;
1730 if ( host_int & TLAN_HI_IV_MASK ) { 1818 if (host_int & TLAN_HI_IV_MASK) {
1731 netif_stop_queue( dev ); 1819 netif_stop_queue(dev);
1732 error = inl( dev->base_addr + TLAN_CH_PARM ); 1820 error = inl(dev->base_addr + TLAN_CH_PARM);
1733 printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error ); 1821 pr_info("TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error);
1734 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1822 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1735 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1823 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1736 1824
1737 schedule_work(&priv->tlan_tqueue); 1825 schedule_work(&priv->tlan_tqueue);
1738 1826
1739 netif_wake_queue(dev); 1827 netif_wake_queue(dev);
1740 ack = 0; 1828 ack = 0;
1741 } else { 1829 } else {
1742 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name ); 1830 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1743 phy = priv->phy[priv->phyNum]; 1831 phy = priv->phy[priv->phy_num];
1744 1832
1745 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1833 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1746 if ( net_sts ) { 1834 if (net_sts) {
1747 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1835 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1748 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", 1836 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1749 dev->name, (unsigned) net_sts ); 1837 dev->name, (unsigned) net_sts);
1750 } 1838 }
1751 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1839 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1752 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1840 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1753 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1841 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1754 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && 1842 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1755 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1843 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1756 tlphy_ctl |= TLAN_TC_SWAPOL; 1844 tlphy_ctl |= TLAN_TC_SWAPOL;
1757 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1845 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1758 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && 1846 tlphy_ctl);
1759 ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1847 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1760 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1848 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1761 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1849 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1762 } 1850 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1763 1851 tlphy_ctl);
1764 if (debug) {
1765 TLan_PhyPrint( dev );
1766 } 1852 }
1853
1854 if (debug)
1855 tlan_phy_print(dev);
1767 } 1856 }
1768 } 1857 }
1769 1858
1770 return ack; 1859 return ack;
1771 1860
1772} /* TLan_HandleStatusCheck */ 1861}
1773 1862
1774 1863
1775 1864
1776 1865
1777 /*************************************************************** 1866/***************************************************************
1778 * TLan_HandleRxEOC 1867 * tlan_handle_rx_eoc
1779 * 1868 *
1780 * Returns: 1869 * Returns:
1781 * 1 1870 * 1
1782 * Parms: 1871 * Parms:
1783 * dev Device assigned the IRQ that was 1872 * dev Device assigned the IRQ that was
1784 * raised. 1873 * raised.
1785 * host_int The contents of the HOST_INT 1874 * host_int The contents of the HOST_INT
1786 * port. 1875 * port.
1787 * 1876 *
1788 * This driver is structured to determine EOC occurrences by 1877 * This driver is structured to determine EOC occurrences by
1789 * reading the CSTAT member of the list structure. Rx EOC 1878 * reading the CSTAT member of the list structure. Rx EOC
1790 * interrupts are disabled via the DIO INTDIS register. 1879 * interrupts are disabled via the DIO INTDIS register.
1791 * However, TLAN chips before revision 3.0 didn't have this 1880 * However, TLAN chips before revision 3.0 didn't have this
1792 * CSTAT member or a INTDIS register, so if this chip is 1881 * CSTAT member or a INTDIS register, so if this chip is
1793 * pre-3.0, process EOC interrupts normally. 1882 * pre-3.0, process EOC interrupts normally.
1794 * 1883 *
1795 **************************************************************/ 1884 **************************************************************/
1796 1885
1797static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) 1886static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1798{ 1887{
1799 TLanPrivateInfo *priv = netdev_priv(dev); 1888 struct tlan_priv *priv = netdev_priv(dev);
1800 dma_addr_t head_list_phys; 1889 dma_addr_t head_list_phys;
1801 u32 ack = 1; 1890 u32 ack = 1;
1802 1891
1803 if ( priv->tlanRev < 0x30 ) { 1892 if (priv->tlan_rev < 0x30) {
1804 TLAN_DBG( TLAN_DEBUG_RX, 1893 TLAN_DBG(TLAN_DEBUG_RX,
1805 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", 1894 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1806 priv->rxHead, priv->rxTail ); 1895 priv->rx_head, priv->rx_tail);
1807 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1896 head_list_phys = priv->rx_list_dma
1808 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1897 + sizeof(struct tlan_list)*priv->rx_head;
1898 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1809 ack |= TLAN_HC_GO | TLAN_HC_RT; 1899 ack |= TLAN_HC_GO | TLAN_HC_RT;
1810 priv->rxEocCount++; 1900 priv->rx_eoc_count++;
1811 } 1901 }
1812 1902
1813 return ack; 1903 return ack;
1814 1904
1815} /* TLan_HandleRxEOC */ 1905}
1816 1906
1817 1907
1818 1908
@@ -1820,98 +1910,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1820/***************************************************************************** 1910/*****************************************************************************
1821****************************************************************************** 1911******************************************************************************
1822 1912
1823 ThunderLAN Driver Timer Function 1913ThunderLAN driver timer function
1824 1914
1825****************************************************************************** 1915******************************************************************************
1826*****************************************************************************/ 1916*****************************************************************************/
1827 1917
1828 1918
1829 /*************************************************************** 1919/***************************************************************
1830 * TLan_Timer 1920 * tlan_timer
1831 * 1921 *
1832 * Returns: 1922 * Returns:
1833 * Nothing 1923 * Nothing
1834 * Parms: 1924 * Parms:
1835 * data A value given to add timer when 1925 * data A value given to add timer when
1836 * add_timer was called. 1926 * add_timer was called.
1837 * 1927 *
1838 * This function handles timed functionality for the 1928 * This function handles timed functionality for the
1839 * TLAN driver. The two current timer uses are for 1929 * TLAN driver. The two current timer uses are for
1840 * delaying for autonegotionation and driving the ACT LED. 1930 * delaying for autonegotionation and driving the ACT LED.
1841 * - Autonegotiation requires being allowed about 1931 * - Autonegotiation requires being allowed about
1842 * 2 1/2 seconds before attempting to transmit a 1932 * 2 1/2 seconds before attempting to transmit a
1843 * packet. It would be a very bad thing to hang 1933 * packet. It would be a very bad thing to hang
1844 * the kernel this long, so the driver doesn't 1934 * the kernel this long, so the driver doesn't
1845 * allow transmission 'til after this time, for 1935 * allow transmission 'til after this time, for
1846 * certain PHYs. It would be much nicer if all 1936 * certain PHYs. It would be much nicer if all
1847 * PHYs were interrupt-capable like the internal 1937 * PHYs were interrupt-capable like the internal
1848 * PHY. 1938 * PHY.
1849 * - The ACT LED, which shows adapter activity, is 1939 * - The ACT LED, which shows adapter activity, is
1850 * driven by the driver, and so must be left on 1940 * driven by the driver, and so must be left on
1851 * for a short period to power up the LED so it 1941 * for a short period to power up the LED so it
1852 * can be seen. This delay can be changed by 1942 * can be seen. This delay can be changed by
1853 * changing the TLAN_TIMER_ACT_DELAY in tlan.h, 1943 * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
1854 * if desired. 100 ms produces a slightly 1944 * if desired. 100 ms produces a slightly
1855 * sluggish response. 1945 * sluggish response.
1856 * 1946 *
1857 **************************************************************/ 1947 **************************************************************/
1858 1948
1859static void TLan_Timer( unsigned long data ) 1949static void tlan_timer(unsigned long data)
1860{ 1950{
1861 struct net_device *dev = (struct net_device *) data; 1951 struct net_device *dev = (struct net_device *) data;
1862 TLanPrivateInfo *priv = netdev_priv(dev); 1952 struct tlan_priv *priv = netdev_priv(dev);
1863 u32 elapsed; 1953 u32 elapsed;
1864 unsigned long flags = 0; 1954 unsigned long flags = 0;
1865 1955
1866 priv->timer.function = NULL; 1956 priv->timer.function = NULL;
1867 1957
1868 switch ( priv->timerType ) { 1958 switch (priv->timer_type) {
1869#ifdef MONITOR 1959#ifdef MONITOR
1870 case TLAN_TIMER_LINK_BEAT: 1960 case TLAN_TIMER_LINK_BEAT:
1871 TLan_PhyMonitor( dev ); 1961 tlan_phy_monitor(dev);
1872 break; 1962 break;
1873#endif 1963#endif
1874 case TLAN_TIMER_PHY_PDOWN: 1964 case TLAN_TIMER_PHY_PDOWN:
1875 TLan_PhyPowerDown( dev ); 1965 tlan_phy_power_down(dev);
1876 break; 1966 break;
1877 case TLAN_TIMER_PHY_PUP: 1967 case TLAN_TIMER_PHY_PUP:
1878 TLan_PhyPowerUp( dev ); 1968 tlan_phy_power_up(dev);
1879 break; 1969 break;
1880 case TLAN_TIMER_PHY_RESET: 1970 case TLAN_TIMER_PHY_RESET:
1881 TLan_PhyReset( dev ); 1971 tlan_phy_reset(dev);
1882 break; 1972 break;
1883 case TLAN_TIMER_PHY_START_LINK: 1973 case TLAN_TIMER_PHY_START_LINK:
1884 TLan_PhyStartLink( dev ); 1974 tlan_phy_start_link(dev);
1885 break; 1975 break;
1886 case TLAN_TIMER_PHY_FINISH_AN: 1976 case TLAN_TIMER_PHY_FINISH_AN:
1887 TLan_PhyFinishAutoNeg( dev ); 1977 tlan_phy_finish_auto_neg(dev);
1888 break; 1978 break;
1889 case TLAN_TIMER_FINISH_RESET: 1979 case TLAN_TIMER_FINISH_RESET:
1890 TLan_FinishReset( dev ); 1980 tlan_finish_reset(dev);
1891 break; 1981 break;
1892 case TLAN_TIMER_ACTIVITY: 1982 case TLAN_TIMER_ACTIVITY:
1893 spin_lock_irqsave(&priv->lock, flags); 1983 spin_lock_irqsave(&priv->lock, flags);
1894 if ( priv->timer.function == NULL ) { 1984 if (priv->timer.function == NULL) {
1895 elapsed = jiffies - priv->timerSetAt; 1985 elapsed = jiffies - priv->timer_set_at;
1896 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1986 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1897 TLan_DioWrite8( dev->base_addr, 1987 tlan_dio_write8(dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK ); 1988 TLAN_LED_REG, TLAN_LED_LINK);
1899 } else { 1989 } else {
1900 priv->timer.function = TLan_Timer; 1990 priv->timer.function = tlan_timer;
1901 priv->timer.expires = priv->timerSetAt 1991 priv->timer.expires = priv->timer_set_at
1902 + TLAN_TIMER_ACT_DELAY; 1992 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags); 1993 spin_unlock_irqrestore(&priv->lock, flags);
1904 add_timer( &priv->timer ); 1994 add_timer(&priv->timer);
1905 break; 1995 break;
1906 }
1907 } 1996 }
1908 spin_unlock_irqrestore(&priv->lock, flags); 1997 }
1909 break; 1998 spin_unlock_irqrestore(&priv->lock, flags);
1910 default: 1999 break;
1911 break; 2000 default:
2001 break;
1912 } 2002 }
1913 2003
1914} /* TLan_Timer */ 2004}
1915 2005
1916 2006
1917 2007
@@ -1919,39 +2009,39 @@ static void TLan_Timer( unsigned long data )
1919/***************************************************************************** 2009/*****************************************************************************
1920****************************************************************************** 2010******************************************************************************
1921 2011
1922 ThunderLAN Driver Adapter Related Routines 2012ThunderLAN driver adapter related routines
1923 2013
1924****************************************************************************** 2014******************************************************************************
1925*****************************************************************************/ 2015*****************************************************************************/
1926 2016
1927 2017
1928 /*************************************************************** 2018/***************************************************************
1929 * TLan_ResetLists 2019 * tlan_reset_lists
1930 * 2020 *
1931 * Returns: 2021 * Returns:
1932 * Nothing 2022 * Nothing
1933 * Parms: 2023 * Parms:
1934 * dev The device structure with the list 2024 * dev The device structure with the list
1935 * stuctures to be reset. 2025 * stuctures to be reset.
1936 * 2026 *
1937 * This routine sets the variables associated with managing 2027 * This routine sets the variables associated with managing
1938 * the TLAN lists to their initial values. 2028 * the TLAN lists to their initial values.
1939 * 2029 *
1940 **************************************************************/ 2030 **************************************************************/
1941 2031
1942static void TLan_ResetLists( struct net_device *dev ) 2032static void tlan_reset_lists(struct net_device *dev)
1943{ 2033{
1944 TLanPrivateInfo *priv = netdev_priv(dev); 2034 struct tlan_priv *priv = netdev_priv(dev);
1945 int i; 2035 int i;
1946 TLanList *list; 2036 struct tlan_list *list;
1947 dma_addr_t list_phys; 2037 dma_addr_t list_phys;
1948 struct sk_buff *skb; 2038 struct sk_buff *skb;
1949 2039
1950 priv->txHead = 0; 2040 priv->tx_head = 0;
1951 priv->txTail = 0; 2041 priv->tx_tail = 0;
1952 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 2042 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1953 list = priv->txList + i; 2043 list = priv->tx_list + i;
1954 list->cStat = TLAN_CSTAT_UNUSED; 2044 list->c_stat = TLAN_CSTAT_UNUSED;
1955 list->buffer[0].address = 0; 2045 list->buffer[0].address = 0;
1956 list->buffer[2].count = 0; 2046 list->buffer[2].count = 0;
1957 list->buffer[2].address = 0; 2047 list->buffer[2].address = 0;
@@ -1959,169 +2049,169 @@ static void TLan_ResetLists( struct net_device *dev )
1959 list->buffer[9].address = 0; 2049 list->buffer[9].address = 0;
1960 } 2050 }
1961 2051
1962 priv->rxHead = 0; 2052 priv->rx_head = 0;
1963 priv->rxTail = TLAN_NUM_RX_LISTS - 1; 2053 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1964 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 2054 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1965 list = priv->rxList + i; 2055 list = priv->rx_list + i;
1966 list_phys = priv->rxListDMA + sizeof(TLanList) * i; 2056 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1967 list->cStat = TLAN_CSTAT_READY; 2057 list->c_stat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 2058 list->frame_size = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 2059 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 2060 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 2061 if (!skb) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 2062 pr_err("TLAN: out of memory for received data.\n");
1973 break; 2063 break;
1974 } 2064 }
1975 2065
1976 list->buffer[0].address = pci_map_single(priv->pciDev, 2066 list->buffer[0].address = pci_map_single(priv->pci_dev,
1977 skb->data, 2067 skb->data,
1978 TLAN_MAX_FRAME_SIZE, 2068 TLAN_MAX_FRAME_SIZE,
1979 PCI_DMA_FROMDEVICE); 2069 PCI_DMA_FROMDEVICE);
1980 TLan_StoreSKB(list, skb); 2070 tlan_store_skb(list, skb);
1981 list->buffer[1].count = 0; 2071 list->buffer[1].count = 0;
1982 list->buffer[1].address = 0; 2072 list->buffer[1].address = 0;
1983 list->forward = list_phys + sizeof(TLanList); 2073 list->forward = list_phys + sizeof(struct tlan_list);
1984 } 2074 }
1985 2075
1986 /* in case ran out of memory early, clear bits */ 2076 /* in case ran out of memory early, clear bits */
1987 while (i < TLAN_NUM_RX_LISTS) { 2077 while (i < TLAN_NUM_RX_LISTS) {
1988 TLan_StoreSKB(priv->rxList + i, NULL); 2078 tlan_store_skb(priv->rx_list + i, NULL);
1989 ++i; 2079 ++i;
1990 } 2080 }
1991 list->forward = 0; 2081 list->forward = 0;
1992 2082
1993} /* TLan_ResetLists */ 2083}
1994 2084
1995 2085
1996static void TLan_FreeLists( struct net_device *dev ) 2086static void tlan_free_lists(struct net_device *dev)
1997{ 2087{
1998 TLanPrivateInfo *priv = netdev_priv(dev); 2088 struct tlan_priv *priv = netdev_priv(dev);
1999 int i; 2089 int i;
2000 TLanList *list; 2090 struct tlan_list *list;
2001 struct sk_buff *skb; 2091 struct sk_buff *skb;
2002 2092
2003 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 2093 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
2004 list = priv->txList + i; 2094 list = priv->tx_list + i;
2005 skb = TLan_GetSKB(list); 2095 skb = tlan_get_skb(list);
2006 if ( skb ) { 2096 if (skb) {
2007 pci_unmap_single( 2097 pci_unmap_single(
2008 priv->pciDev, 2098 priv->pci_dev,
2009 list->buffer[0].address, 2099 list->buffer[0].address,
2010 max(skb->len, 2100 max(skb->len,
2011 (unsigned int)TLAN_MIN_FRAME_SIZE), 2101 (unsigned int)TLAN_MIN_FRAME_SIZE),
2012 PCI_DMA_TODEVICE); 2102 PCI_DMA_TODEVICE);
2013 dev_kfree_skb_any( skb ); 2103 dev_kfree_skb_any(skb);
2014 list->buffer[8].address = 0; 2104 list->buffer[8].address = 0;
2015 list->buffer[9].address = 0; 2105 list->buffer[9].address = 0;
2016 } 2106 }
2017 } 2107 }
2018 2108
2019 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 2109 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
2020 list = priv->rxList + i; 2110 list = priv->rx_list + i;
2021 skb = TLan_GetSKB(list); 2111 skb = tlan_get_skb(list);
2022 if ( skb ) { 2112 if (skb) {
2023 pci_unmap_single(priv->pciDev, 2113 pci_unmap_single(priv->pci_dev,
2024 list->buffer[0].address, 2114 list->buffer[0].address,
2025 TLAN_MAX_FRAME_SIZE, 2115 TLAN_MAX_FRAME_SIZE,
2026 PCI_DMA_FROMDEVICE); 2116 PCI_DMA_FROMDEVICE);
2027 dev_kfree_skb_any( skb ); 2117 dev_kfree_skb_any(skb);
2028 list->buffer[8].address = 0; 2118 list->buffer[8].address = 0;
2029 list->buffer[9].address = 0; 2119 list->buffer[9].address = 0;
2030 } 2120 }
2031 } 2121 }
2032} /* TLan_FreeLists */ 2122}
2033 2123
2034 2124
2035 2125
2036 2126
2037 /*************************************************************** 2127/***************************************************************
2038 * TLan_PrintDio 2128 * tlan_print_dio
2039 * 2129 *
2040 * Returns: 2130 * Returns:
2041 * Nothing 2131 * Nothing
2042 * Parms: 2132 * Parms:
2043 * io_base Base IO port of the device of 2133 * io_base Base IO port of the device of
2044 * which to print DIO registers. 2134 * which to print DIO registers.
2045 * 2135 *
2046 * This function prints out all the internal (DIO) 2136 * This function prints out all the internal (DIO)
2047 * registers of a TLAN chip. 2137 * registers of a TLAN chip.
2048 * 2138 *
2049 **************************************************************/ 2139 **************************************************************/
2050 2140
2051static void TLan_PrintDio( u16 io_base ) 2141static void tlan_print_dio(u16 io_base)
2052{ 2142{
2053 u32 data0, data1; 2143 u32 data0, data1;
2054 int i; 2144 int i;
2055 2145
2056 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", 2146 pr_info("TLAN: Contents of internal registers for io base 0x%04hx.\n",
2057 io_base ); 2147 io_base);
2058 printk( "TLAN: Off. +0 +4\n" ); 2148 pr_info("TLAN: Off. +0 +4\n");
2059 for ( i = 0; i < 0x4C; i+= 8 ) { 2149 for (i = 0; i < 0x4C; i += 8) {
2060 data0 = TLan_DioRead32( io_base, i ); 2150 data0 = tlan_dio_read32(io_base, i);
2061 data1 = TLan_DioRead32( io_base, i + 0x4 ); 2151 data1 = tlan_dio_read32(io_base, i + 0x4);
2062 printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 ); 2152 pr_info("TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2063 } 2153 }
2064 2154
2065} /* TLan_PrintDio */ 2155}
2066 2156
2067 2157
2068 2158
2069 2159
2070 /*************************************************************** 2160/***************************************************************
2071 * TLan_PrintList 2161 * TLan_PrintList
2072 * 2162 *
2073 * Returns: 2163 * Returns:
2074 * Nothing 2164 * Nothing
2075 * Parms: 2165 * Parms:
2076 * list A pointer to the TLanList structure to 2166 * list A pointer to the struct tlan_list structure to
2077 * be printed. 2167 * be printed.
2078 * type A string to designate type of list, 2168 * type A string to designate type of list,
2079 * "Rx" or "Tx". 2169 * "Rx" or "Tx".
2080 * num The index of the list. 2170 * num The index of the list.
2081 * 2171 *
2082 * This function prints out the contents of the list 2172 * This function prints out the contents of the list
2083 * pointed to by the list parameter. 2173 * pointed to by the list parameter.
2084 * 2174 *
2085 **************************************************************/ 2175 **************************************************************/
2086 2176
2087static void TLan_PrintList( TLanList *list, char *type, int num) 2177static void tlan_print_list(struct tlan_list *list, char *type, int num)
2088{ 2178{
2089 int i; 2179 int i;
2090 2180
2091 printk( "TLAN: %s List %d at %p\n", type, num, list ); 2181 pr_info("TLAN: %s List %d at %p\n", type, num, list);
2092 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2182 pr_info("TLAN: Forward = 0x%08x\n", list->forward);
2093 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2183 pr_info("TLAN: CSTAT = 0x%04hx\n", list->c_stat);
2094 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2184 pr_info("TLAN: Frame Size = 0x%04hx\n", list->frame_size);
2095 /* for ( i = 0; i < 10; i++ ) { */ 2185 /* for (i = 0; i < 10; i++) { */
2096 for ( i = 0; i < 2; i++ ) { 2186 for (i = 0; i < 2; i++) {
2097 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", 2187 pr_info("TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2098 i, list->buffer[i].count, list->buffer[i].address ); 2188 i, list->buffer[i].count, list->buffer[i].address);
2099 } 2189 }
2100 2190
2101} /* TLan_PrintList */ 2191}
2102 2192
2103 2193
2104 2194
2105 2195
2106 /*************************************************************** 2196/***************************************************************
2107 * TLan_ReadAndClearStats 2197 * tlan_read_and_clear_stats
2108 * 2198 *
2109 * Returns: 2199 * Returns:
2110 * Nothing 2200 * Nothing
2111 * Parms: 2201 * Parms:
2112 * dev Pointer to device structure of adapter 2202 * dev Pointer to device structure of adapter
2113 * to which to read stats. 2203 * to which to read stats.
2114 * record Flag indicating whether to add 2204 * record Flag indicating whether to add
2115 * 2205 *
2116 * This functions reads all the internal status registers 2206 * This functions reads all the internal status registers
2117 * of the TLAN chip, which clears them as a side effect. 2207 * of the TLAN chip, which clears them as a side effect.
2118 * It then either adds the values to the device's status 2208 * It then either adds the values to the device's status
2119 * struct, or discards them, depending on whether record 2209 * struct, or discards them, depending on whether record
2120 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). 2210 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
2121 * 2211 *
2122 **************************************************************/ 2212 **************************************************************/
2123 2213
2124static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2214static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2125{ 2215{
2126 u32 tx_good, tx_under; 2216 u32 tx_good, tx_under;
2127 u32 rx_good, rx_over; 2217 u32 rx_good, rx_over;
@@ -2129,41 +2219,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2129 u32 multi_col, single_col; 2219 u32 multi_col, single_col;
2130 u32 excess_col, late_col, loss; 2220 u32 excess_col, late_col, loss;
2131 2221
2132 outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2222 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2133 tx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2223 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2134 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2224 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2135 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2225 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2136 tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2226 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2137 2227
2138 outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2228 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2139 rx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2229 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2140 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2230 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2141 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2231 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2142 rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2232 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2143 2233
2144 outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR ); 2234 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2145 def_tx = inb( dev->base_addr + TLAN_DIO_DATA ); 2235 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2146 def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2236 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2147 crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2237 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2148 code = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2238 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2149 2239
2150 outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2240 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2151 multi_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2241 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2152 multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2242 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2153 single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2243 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2154 single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8; 2244 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2155 2245
2156 outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2246 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2157 excess_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2247 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2158 late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 ); 2248 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2159 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2249 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2160 2250
2161 if ( record ) { 2251 if (record) {
2162 dev->stats.rx_packets += rx_good; 2252 dev->stats.rx_packets += rx_good;
2163 dev->stats.rx_errors += rx_over + crc + code; 2253 dev->stats.rx_errors += rx_over + crc + code;
2164 dev->stats.tx_packets += tx_good; 2254 dev->stats.tx_packets += tx_good;
2165 dev->stats.tx_errors += tx_under + loss; 2255 dev->stats.tx_errors += tx_under + loss;
2166 dev->stats.collisions += multi_col + single_col + excess_col + late_col; 2256 dev->stats.collisions += multi_col
2257 + single_col + excess_col + late_col;
2167 2258
2168 dev->stats.rx_over_errors += rx_over; 2259 dev->stats.rx_over_errors += rx_over;
2169 dev->stats.rx_crc_errors += crc; 2260 dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2264,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2173 dev->stats.tx_carrier_errors += loss; 2264 dev->stats.tx_carrier_errors += loss;
2174 } 2265 }
2175 2266
2176} /* TLan_ReadAndClearStats */ 2267}
2177 2268
2178 2269
2179 2270
2180 2271
2181 /*************************************************************** 2272/***************************************************************
2182 * TLan_Reset 2273 * TLan_Reset
2183 * 2274 *
2184 * Returns: 2275 * Returns:
2185 * 0 2276 * 0
2186 * Parms: 2277 * Parms:
2187 * dev Pointer to device structure of adapter 2278 * dev Pointer to device structure of adapter
2188 * to be reset. 2279 * to be reset.
2189 * 2280 *
2190 * This function resets the adapter and it's physical 2281 * This function resets the adapter and it's physical
2191 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN 2282 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
2192 * Programmer's Guide" for details. The routine tries to 2283 * Programmer's Guide" for details. The routine tries to
2193 * implement what is detailed there, though adjustments 2284 * implement what is detailed there, though adjustments
2194 * have been made. 2285 * have been made.
2195 * 2286 *
2196 **************************************************************/ 2287 **************************************************************/
2197 2288
2198static void 2289static void
2199TLan_ResetAdapter( struct net_device *dev ) 2290tlan_reset_adapter(struct net_device *dev)
2200{ 2291{
2201 TLanPrivateInfo *priv = netdev_priv(dev); 2292 struct tlan_priv *priv = netdev_priv(dev);
2202 int i; 2293 int i;
2203 u32 addr; 2294 u32 addr;
2204 u32 data; 2295 u32 data;
2205 u8 data8; 2296 u8 data8;
2206 2297
2207 priv->tlanFullDuplex = false; 2298 priv->tlan_full_duplex = false;
2208 priv->phyOnline=0; 2299 priv->phy_online = 0;
2209 netif_carrier_off(dev); 2300 netif_carrier_off(dev);
2210 2301
2211/* 1. Assert reset bit. */ 2302/* 1. Assert reset bit. */
@@ -2216,7 +2307,7 @@ TLan_ResetAdapter( struct net_device *dev )
2216 2307
2217 udelay(1000); 2308 udelay(1000);
2218 2309
2219/* 2. Turn off interrupts. ( Probably isn't necessary ) */ 2310/* 2. Turn off interrupts. (Probably isn't necessary) */
2220 2311
2221 data = inl(dev->base_addr + TLAN_HOST_CMD); 2312 data = inl(dev->base_addr + TLAN_HOST_CMD);
2222 data |= TLAN_HC_INT_OFF; 2313 data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2315,208 @@ TLan_ResetAdapter( struct net_device *dev )
2224 2315
2225/* 3. Clear AREGs and HASHs. */ 2316/* 3. Clear AREGs and HASHs. */
2226 2317
2227 for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) { 2318 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2228 TLan_DioWrite32( dev->base_addr, (u16) i, 0 ); 2319 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2229 }
2230 2320
2231/* 4. Setup NetConfig register. */ 2321/* 4. Setup NetConfig register. */
2232 2322
2233 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2323 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2234 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); 2324 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2235 2325
2236/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ 2326/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
2237 2327
2238 outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD ); 2328 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2239 outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD ); 2329 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2240 2330
2241/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ 2331/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
2242 2332
2243 outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR ); 2333 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2244 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2334 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2245 TLan_SetBit( TLAN_NET_SIO_NMRST, addr ); 2335 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2246 2336
2247/* 7. Setup the remaining registers. */ 2337/* 7. Setup the remaining registers. */
2248 2338
2249 if ( priv->tlanRev >= 0x30 ) { 2339 if (priv->tlan_rev >= 0x30) {
2250 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; 2340 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2251 TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 ); 2341 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2252 } 2342 }
2253 TLan_PhyDetect( dev ); 2343 tlan_phy_detect(dev);
2254 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; 2344 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2255 2345
2256 if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) { 2346 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2257 data |= TLAN_NET_CFG_BIT; 2347 data |= TLAN_NET_CFG_BIT;
2258 if ( priv->aui == 1 ) { 2348 if (priv->aui == 1) {
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2349 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2350 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2351 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2262 priv->tlanFullDuplex = true; 2352 priv->tlan_full_duplex = true;
2263 } else { 2353 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2354 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2265 } 2355 }
2266 } 2356 }
2267 2357
2268 if ( priv->phyNum == 0 ) { 2358 if (priv->phy_num == 0)
2269 data |= TLAN_NET_CFG_PHY_EN; 2359 data |= TLAN_NET_CFG_PHY_EN;
2270 } 2360 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2271 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
2272 2361
2273 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2362 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2274 TLan_FinishReset( dev ); 2363 tlan_finish_reset(dev);
2275 } else { 2364 else
2276 TLan_PhyPowerDown( dev ); 2365 tlan_phy_power_down(dev);
2277 }
2278 2366
2279} /* TLan_ResetAdapter */ 2367}
2280 2368
2281 2369
2282 2370
2283 2371
2284static void 2372static void
2285TLan_FinishReset( struct net_device *dev ) 2373tlan_finish_reset(struct net_device *dev)
2286{ 2374{
2287 TLanPrivateInfo *priv = netdev_priv(dev); 2375 struct tlan_priv *priv = netdev_priv(dev);
2288 u8 data; 2376 u8 data;
2289 u32 phy; 2377 u32 phy;
2290 u8 sio; 2378 u8 sio;
2291 u16 status; 2379 u16 status;
2292 u16 partner; 2380 u16 partner;
2293 u16 tlphy_ctl; 2381 u16 tlphy_ctl;
2294 u16 tlphy_par; 2382 u16 tlphy_par;
2295 u16 tlphy_id1, tlphy_id2; 2383 u16 tlphy_id1, tlphy_id2;
2296 int i; 2384 int i;
2297 2385
2298 phy = priv->phy[priv->phyNum]; 2386 phy = priv->phy[priv->phy_num];
2299 2387
2300 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; 2388 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2301 if ( priv->tlanFullDuplex ) { 2389 if (priv->tlan_full_duplex)
2302 data |= TLAN_NET_CMD_DUPLEX; 2390 data |= TLAN_NET_CMD_DUPLEX;
2303 } 2391 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2304 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
2305 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; 2392 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2306 if ( priv->phyNum == 0 ) { 2393 if (priv->phy_num == 0)
2307 data |= TLAN_NET_MASK_MASK7; 2394 data |= TLAN_NET_MASK_MASK7;
2308 } 2395 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2309 TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data ); 2396 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2310 TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 ); 2397 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2311 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2398 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2312 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2313 2399
2314 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || 2400 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2315 ( priv->aui ) ) { 2401 (priv->aui)) {
2316 status = MII_GS_LINK; 2402 status = MII_GS_LINK;
2317 printk( "TLAN: %s: Link forced.\n", dev->name ); 2403 pr_info("TLAN: %s: Link forced.\n", dev->name);
2318 } else { 2404 } else {
2319 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2405 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2320 udelay( 1000 ); 2406 udelay(1000);
2321 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2407 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2322 if ( (status & MII_GS_LINK) && 2408 if ((status & MII_GS_LINK) &&
2323 /* We only support link info on Nat.Sem. PHY's */ 2409 /* We only support link info on Nat.Sem. PHY's */
2324 (tlphy_id1 == NAT_SEM_ID1) && 2410 (tlphy_id1 == NAT_SEM_ID1) &&
2325 (tlphy_id2 == NAT_SEM_ID2) ) { 2411 (tlphy_id2 == NAT_SEM_ID2)) {
2326 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2412 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2327 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par ); 2413 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2328 2414
2329 printk( "TLAN: %s: Link active with ", dev->name ); 2415 pr_info("TLAN: %s: Link active with ", dev->name);
2330 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2416 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
2331 printk( "forced 10%sMbps %s-Duplex\n", 2417 pr_info("forced 10%sMbps %s-Duplex\n",
2332 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2418 tlphy_par & TLAN_PHY_SPEED_100
2333 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2419 ? "" : "0",
2420 tlphy_par & TLAN_PHY_DUPLEX_FULL
2421 ? "Full" : "Half");
2334 } else { 2422 } else {
2335 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2423 pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n",
2336 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2424 tlphy_par & TLAN_PHY_SPEED_100
2337 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2425 ? "" : "0",
2338 printk("TLAN: Partner capability: "); 2426 tlphy_par & TLAN_PHY_DUPLEX_FULL
2339 for (i = 5; i <= 10; i++) 2427 ? "Full" : "half");
2340 if (partner & (1<<i)) 2428 pr_info("TLAN: Partner capability: ");
2341 printk("%s",media[i-5]); 2429 for (i = 5; i <= 10; i++)
2430 if (partner & (1<<i))
2431 printk("%s", media[i-5]);
2342 printk("\n"); 2432 printk("\n");
2343 } 2433 }
2344 2434
2345 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2435 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2436 TLAN_LED_LINK);
2346#ifdef MONITOR 2437#ifdef MONITOR
2347 /* We have link beat..for now anyway */ 2438 /* We have link beat..for now anyway */
2348 priv->link = 1; 2439 priv->link = 1;
2349 /*Enabling link beat monitoring */ 2440 /*Enabling link beat monitoring */
2350 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT ); 2441 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2351#endif 2442#endif
2352 } else if (status & MII_GS_LINK) { 2443 } else if (status & MII_GS_LINK) {
2353 printk( "TLAN: %s: Link active\n", dev->name ); 2444 pr_info("TLAN: %s: Link active\n", dev->name);
2354 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2445 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2446 TLAN_LED_LINK);
2355 } 2447 }
2356 } 2448 }
2357 2449
2358 if ( priv->phyNum == 0 ) { 2450 if (priv->phy_num == 0) {
2359 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 2451 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2360 tlphy_ctl |= TLAN_TC_INTEN; 2452 tlphy_ctl |= TLAN_TC_INTEN;
2361 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl ); 2453 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2362 sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO ); 2454 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2363 sio |= TLAN_NET_SIO_MINTEN; 2455 sio |= TLAN_NET_SIO_MINTEN;
2364 TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio ); 2456 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2365 } 2457 }
2366 2458
2367 if ( status & MII_GS_LINK ) { 2459 if (status & MII_GS_LINK) {
2368 TLan_SetMac( dev, 0, dev->dev_addr ); 2460 tlan_set_mac(dev, 0, dev->dev_addr);
2369 priv->phyOnline = 1; 2461 priv->phy_online = 1;
2370 outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2462 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2371 if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { 2463 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2372 outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2464 outb((TLAN_HC_REQ_INT >> 8),
2373 } 2465 dev->base_addr + TLAN_HOST_CMD + 1);
2374 outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); 2466 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2375 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2467 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2376 netif_carrier_on(dev); 2468 netif_carrier_on(dev);
2377 } else { 2469 } else {
2378 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", 2470 pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n",
2379 dev->name ); 2471 dev->name);
2380 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); 2472 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2381 return; 2473 return;
2382 } 2474 }
2383 TLan_SetMulticastList(dev); 2475 tlan_set_multicast_list(dev);
2384 2476
2385} /* TLan_FinishReset */ 2477}
2386 2478
2387 2479
2388 2480
2389 2481
2390 /*************************************************************** 2482/***************************************************************
2391 * TLan_SetMac 2483 * tlan_set_mac
2392 * 2484 *
2393 * Returns: 2485 * Returns:
2394 * Nothing 2486 * Nothing
2395 * Parms: 2487 * Parms:
2396 * dev Pointer to device structure of adapter 2488 * dev Pointer to device structure of adapter
2397 * on which to change the AREG. 2489 * on which to change the AREG.
2398 * areg The AREG to set the address in (0 - 3). 2490 * areg The AREG to set the address in (0 - 3).
2399 * mac A pointer to an array of chars. Each 2491 * mac A pointer to an array of chars. Each
2400 * element stores one byte of the address. 2492 * element stores one byte of the address.
2401 * IE, it isn't in ascii. 2493 * IE, it isn't in ascii.
2402 * 2494 *
2403 * This function transfers a MAC address to one of the 2495 * This function transfers a MAC address to one of the
2404 * TLAN AREGs (address registers). The TLAN chip locks 2496 * TLAN AREGs (address registers). The TLAN chip locks
2405 * the register on writing to offset 0 and unlocks the 2497 * the register on writing to offset 0 and unlocks the
2406 * register after writing to offset 5. If NULL is passed 2498 * register after writing to offset 5. If NULL is passed
2407 * in mac, then the AREG is filled with 0's. 2499 * in mac, then the AREG is filled with 0's.
2408 * 2500 *
2409 **************************************************************/ 2501 **************************************************************/
2410 2502
2411static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) 2503static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2412{ 2504{
2413 int i; 2505 int i;
2414 2506
2415 areg *= 6; 2507 areg *= 6;
2416 2508
2417 if ( mac != NULL ) { 2509 if (mac != NULL) {
2418 for ( i = 0; i < 6; i++ ) 2510 for (i = 0; i < 6; i++)
2419 TLan_DioWrite8( dev->base_addr, 2511 tlan_dio_write8(dev->base_addr,
2420 TLAN_AREG_0 + areg + i, mac[i] ); 2512 TLAN_AREG_0 + areg + i, mac[i]);
2421 } else { 2513 } else {
2422 for ( i = 0; i < 6; i++ ) 2514 for (i = 0; i < 6; i++)
2423 TLan_DioWrite8( dev->base_addr, 2515 tlan_dio_write8(dev->base_addr,
2424 TLAN_AREG_0 + areg + i, 0 ); 2516 TLAN_AREG_0 + areg + i, 0);
2425 } 2517 }
2426 2518
2427} /* TLan_SetMac */ 2519}
2428 2520
2429 2521
2430 2522
@@ -2432,205 +2524,202 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2432/***************************************************************************** 2524/*****************************************************************************
2433****************************************************************************** 2525******************************************************************************
2434 2526
2435 ThunderLAN Driver PHY Layer Routines 2527ThunderLAN driver PHY layer routines
2436 2528
2437****************************************************************************** 2529******************************************************************************
2438*****************************************************************************/ 2530*****************************************************************************/
2439 2531
2440 2532
2441 2533
2442 /********************************************************************* 2534/*********************************************************************
2443 * TLan_PhyPrint 2535 * tlan_phy_print
2444 * 2536 *
2445 * Returns: 2537 * Returns:
2446 * Nothing 2538 * Nothing
2447 * Parms: 2539 * Parms:
2448 * dev A pointer to the device structure of the 2540 * dev A pointer to the device structure of the
2449 * TLAN device having the PHYs to be detailed. 2541 * TLAN device having the PHYs to be detailed.
2450 * 2542 *
2451 * This function prints the registers a PHY (aka transceiver). 2543 * This function prints the registers a PHY (aka transceiver).
2452 * 2544 *
2453 ********************************************************************/ 2545 ********************************************************************/
2454 2546
2455static void TLan_PhyPrint( struct net_device *dev ) 2547static void tlan_phy_print(struct net_device *dev)
2456{ 2548{
2457 TLanPrivateInfo *priv = netdev_priv(dev); 2549 struct tlan_priv *priv = netdev_priv(dev);
2458 u16 i, data0, data1, data2, data3, phy; 2550 u16 i, data0, data1, data2, data3, phy;
2459 2551
2460 phy = priv->phy[priv->phyNum]; 2552 phy = priv->phy[priv->phy_num];
2461 2553
2462 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2554 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2463 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); 2555 pr_info("TLAN: Device %s, Unmanaged PHY.\n", dev->name);
2464 } else if ( phy <= TLAN_PHY_MAX_ADDR ) { 2556 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2465 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); 2557 pr_info("TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy);
2466 printk( "TLAN: Off. +0 +1 +2 +3\n" ); 2558 pr_info("TLAN: Off. +0 +1 +2 +3\n");
2467 for ( i = 0; i < 0x20; i+= 4 ) { 2559 for (i = 0; i < 0x20; i += 4) {
2468 printk( "TLAN: 0x%02x", i ); 2560 pr_info("TLAN: 0x%02x", i);
2469 TLan_MiiReadReg( dev, phy, i, &data0 ); 2561 tlan_mii_read_reg(dev, phy, i, &data0);
2470 printk( " 0x%04hx", data0 ); 2562 printk(" 0x%04hx", data0);
2471 TLan_MiiReadReg( dev, phy, i + 1, &data1 ); 2563 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2472 printk( " 0x%04hx", data1 ); 2564 printk(" 0x%04hx", data1);
2473 TLan_MiiReadReg( dev, phy, i + 2, &data2 ); 2565 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2474 printk( " 0x%04hx", data2 ); 2566 printk(" 0x%04hx", data2);
2475 TLan_MiiReadReg( dev, phy, i + 3, &data3 ); 2567 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2476 printk( " 0x%04hx\n", data3 ); 2568 printk(" 0x%04hx\n", data3);
2477 } 2569 }
2478 } else { 2570 } else {
2479 printk( "TLAN: Device %s, Invalid PHY.\n", dev->name ); 2571 pr_info("TLAN: Device %s, Invalid PHY.\n", dev->name);
2480 } 2572 }
2481 2573
2482} /* TLan_PhyPrint */ 2574}
2483 2575
2484 2576
2485 2577
2486 2578
2487 /********************************************************************* 2579/*********************************************************************
2488 * TLan_PhyDetect 2580 * tlan_phy_detect
2489 * 2581 *
2490 * Returns: 2582 * Returns:
2491 * Nothing 2583 * Nothing
2492 * Parms: 2584 * Parms:
2493 * dev A pointer to the device structure of the adapter 2585 * dev A pointer to the device structure of the adapter
2494 * for which the PHY needs determined. 2586 * for which the PHY needs determined.
2495 * 2587 *
2496 * So far I've found that adapters which have external PHYs 2588 * So far I've found that adapters which have external PHYs
2497 * may also use the internal PHY for part of the functionality. 2589 * may also use the internal PHY for part of the functionality.
2498 * (eg, AUI/Thinnet). This function finds out if this TLAN 2590 * (eg, AUI/Thinnet). This function finds out if this TLAN
2499 * chip has an internal PHY, and then finds the first external 2591 * chip has an internal PHY, and then finds the first external
2500 * PHY (starting from address 0) if it exists). 2592 * PHY (starting from address 0) if it exists).
2501 * 2593 *
2502 ********************************************************************/ 2594 ********************************************************************/
2503 2595
2504static void TLan_PhyDetect( struct net_device *dev ) 2596static void tlan_phy_detect(struct net_device *dev)
2505{ 2597{
2506 TLanPrivateInfo *priv = netdev_priv(dev); 2598 struct tlan_priv *priv = netdev_priv(dev);
2507 u16 control; 2599 u16 control;
2508 u16 hi; 2600 u16 hi;
2509 u16 lo; 2601 u16 lo;
2510 u32 phy; 2602 u32 phy;
2511 2603
2512 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2604 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2513 priv->phyNum = 0xFFFF; 2605 priv->phy_num = 0xffff;
2514 return; 2606 return;
2515 } 2607 }
2516 2608
2517 TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi ); 2609 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2518 2610
2519 if ( hi != 0xFFFF ) { 2611 if (hi != 0xffff)
2520 priv->phy[0] = TLAN_PHY_MAX_ADDR; 2612 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2521 } else { 2613 else
2522 priv->phy[0] = TLAN_PHY_NONE; 2614 priv->phy[0] = TLAN_PHY_NONE;
2523 }
2524 2615
2525 priv->phy[1] = TLAN_PHY_NONE; 2616 priv->phy[1] = TLAN_PHY_NONE;
2526 for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) { 2617 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2527 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2618 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2528 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2619 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2529 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2620 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2530 if ( ( control != 0xFFFF ) || 2621 if ((control != 0xffff) ||
2531 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2622 (hi != 0xffff) || (lo != 0xffff)) {
2532 TLAN_DBG( TLAN_DEBUG_GNRL, 2623 TLAN_DBG(TLAN_DEBUG_GNRL,
2533 "PHY found at %02x %04x %04x %04x\n", 2624 "PHY found at %02x %04x %04x %04x\n",
2534 phy, control, hi, lo ); 2625 phy, control, hi, lo);
2535 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && 2626 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2536 ( phy != TLAN_PHY_MAX_ADDR ) ) { 2627 (phy != TLAN_PHY_MAX_ADDR)) {
2537 priv->phy[1] = phy; 2628 priv->phy[1] = phy;
2538 } 2629 }
2539 } 2630 }
2540 } 2631 }
2541 2632
2542 if ( priv->phy[1] != TLAN_PHY_NONE ) { 2633 if (priv->phy[1] != TLAN_PHY_NONE)
2543 priv->phyNum = 1; 2634 priv->phy_num = 1;
2544 } else if ( priv->phy[0] != TLAN_PHY_NONE ) { 2635 else if (priv->phy[0] != TLAN_PHY_NONE)
2545 priv->phyNum = 0; 2636 priv->phy_num = 0;
2546 } else { 2637 else
2547 printk( "TLAN: Cannot initialize device, no PHY was found!\n" ); 2638 pr_info("TLAN: Cannot initialize device, no PHY was found!\n");
2548 }
2549 2639
2550} /* TLan_PhyDetect */ 2640}
2551 2641
2552 2642
2553 2643
2554 2644
2555static void TLan_PhyPowerDown( struct net_device *dev ) 2645static void tlan_phy_power_down(struct net_device *dev)
2556{ 2646{
2557 TLanPrivateInfo *priv = netdev_priv(dev); 2647 struct tlan_priv *priv = netdev_priv(dev);
2558 u16 value; 2648 u16 value;
2559 2649
2560 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name ); 2650 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2561 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2651 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2562 TLan_MiiSync( dev->base_addr ); 2652 tlan_mii_sync(dev->base_addr);
2563 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2653 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2564 if ( ( priv->phyNum == 0 ) && 2654 if ((priv->phy_num == 0) &&
2565 ( priv->phy[1] != TLAN_PHY_NONE ) && 2655 (priv->phy[1] != TLAN_PHY_NONE) &&
2566 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2656 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2567 TLan_MiiSync( dev->base_addr ); 2657 tlan_mii_sync(dev->base_addr);
2568 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2658 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2569 } 2659 }
2570 2660
2571 /* Wait for 50 ms and powerup 2661 /* Wait for 50 ms and powerup
2572 * This is abitrary. It is intended to make sure the 2662 * This is abitrary. It is intended to make sure the
2573 * transceiver settles. 2663 * transceiver settles.
2574 */ 2664 */
2575 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); 2665 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2576 2666
2577} /* TLan_PhyPowerDown */ 2667}
2578 2668
2579 2669
2580 2670
2581 2671
2582static void TLan_PhyPowerUp( struct net_device *dev ) 2672static void tlan_phy_power_up(struct net_device *dev)
2583{ 2673{
2584 TLanPrivateInfo *priv = netdev_priv(dev); 2674 struct tlan_priv *priv = netdev_priv(dev);
2585 u16 value; 2675 u16 value;
2586 2676
2587 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name ); 2677 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2588 TLan_MiiSync( dev->base_addr ); 2678 tlan_mii_sync(dev->base_addr);
2589 value = MII_GC_LOOPBK; 2679 value = MII_GC_LOOPBK;
2590 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2680 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2591 TLan_MiiSync(dev->base_addr); 2681 tlan_mii_sync(dev->base_addr);
2592 /* Wait for 500 ms and reset the 2682 /* Wait for 500 ms and reset the
2593 * transceiver. The TLAN docs say both 50 ms and 2683 * transceiver. The TLAN docs say both 50 ms and
2594 * 500 ms, so do the longer, just in case. 2684 * 500 ms, so do the longer, just in case.
2595 */ 2685 */
2596 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); 2686 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2597 2687
2598} /* TLan_PhyPowerUp */ 2688}
2599 2689
2600 2690
2601 2691
2602 2692
2603static void TLan_PhyReset( struct net_device *dev ) 2693static void tlan_phy_reset(struct net_device *dev)
2604{ 2694{
2605 TLanPrivateInfo *priv = netdev_priv(dev); 2695 struct tlan_priv *priv = netdev_priv(dev);
2606 u16 phy; 2696 u16 phy;
2607 u16 value; 2697 u16 value;
2608 2698
2609 phy = priv->phy[priv->phyNum]; 2699 phy = priv->phy[priv->phy_num];
2610 2700
2611 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name ); 2701 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
2612 TLan_MiiSync( dev->base_addr ); 2702 tlan_mii_sync(dev->base_addr);
2613 value = MII_GC_LOOPBK | MII_GC_RESET; 2703 value = MII_GC_LOOPBK | MII_GC_RESET;
2614 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value ); 2704 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2615 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2705 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2616 while ( value & MII_GC_RESET ) { 2706 while (value & MII_GC_RESET)
2617 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2707 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2618 }
2619 2708
2620 /* Wait for 500 ms and initialize. 2709 /* Wait for 500 ms and initialize.
2621 * I don't remember why I wait this long. 2710 * I don't remember why I wait this long.
2622 * I've changed this to 50ms, as it seems long enough. 2711 * I've changed this to 50ms, as it seems long enough.
2623 */ 2712 */
2624 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK ); 2713 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2625 2714
2626} /* TLan_PhyReset */ 2715}
2627 2716
2628 2717
2629 2718
2630 2719
2631static void TLan_PhyStartLink( struct net_device *dev ) 2720static void tlan_phy_start_link(struct net_device *dev)
2632{ 2721{
2633 TLanPrivateInfo *priv = netdev_priv(dev); 2722 struct tlan_priv *priv = netdev_priv(dev);
2634 u16 ability; 2723 u16 ability;
2635 u16 control; 2724 u16 control;
2636 u16 data; 2725 u16 data;
@@ -2638,86 +2727,88 @@ static void TLan_PhyStartLink( struct net_device *dev )
2638 u16 status; 2727 u16 status;
2639 u16 tctl; 2728 u16 tctl;
2640 2729
2641 phy = priv->phy[priv->phyNum]; 2730 phy = priv->phy[priv->phy_num];
2642 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name ); 2731 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2643 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2732 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2644 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability ); 2733 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2645 2734
2646 if ( ( status & MII_GS_AUTONEG ) && 2735 if ((status & MII_GS_AUTONEG) &&
2647 ( ! priv->aui ) ) { 2736 (!priv->aui)) {
2648 ability = status >> 11; 2737 ability = status >> 11;
2649 if ( priv->speed == TLAN_SPEED_10 && 2738 if (priv->speed == TLAN_SPEED_10 &&
2650 priv->duplex == TLAN_DUPLEX_HALF) { 2739 priv->duplex == TLAN_DUPLEX_HALF) {
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2740 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2741 } else if (priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2742 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = true; 2743 priv->tlan_full_duplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2744 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2745 } else if (priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2746 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2747 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2748 } else if (priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2749 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = true; 2750 priv->tlan_full_duplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2751 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2752 } else {
2664 2753
2665 /* Set Auto-Neg advertisement */ 2754 /* Set Auto-Neg advertisement */
2666 TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1); 2755 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2756 (ability << 5) | 1);
2667 /* Enablee Auto-Neg */ 2757 /* Enablee Auto-Neg */
2668 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 ); 2758 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2669 /* Restart Auto-Neg */ 2759 /* Restart Auto-Neg */
2670 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 ); 2760 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2671 /* Wait for 4 sec for autonegotiation 2761 /* Wait for 4 sec for autonegotiation
2672 * to complete. The max spec time is less than this 2762 * to complete. The max spec time is less than this
2673 * but the card need additional time to start AN. 2763 * but the card need additional time to start AN.
2674 * .5 sec should be plenty extra. 2764 * .5 sec should be plenty extra.
2675 */ 2765 */
2676 printk( "TLAN: %s: Starting autonegotiation.\n", dev->name ); 2766 pr_info("TLAN: %s: Starting autonegotiation.\n",
2677 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2767 dev->name);
2768 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2678 return; 2769 return;
2679 } 2770 }
2680 2771
2681 } 2772 }
2682 2773
2683 if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) { 2774 if ((priv->aui) && (priv->phy_num != 0)) {
2684 priv->phyNum = 0; 2775 priv->phy_num = 0;
2685 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2776 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2686 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2777 | TLAN_NET_CFG_PHY_EN;
2687 TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2778 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2779 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2688 return; 2780 return;
2689 } else if ( priv->phyNum == 0 ) { 2781 } else if (priv->phy_num == 0) {
2690 control = 0; 2782 control = 0;
2691 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl ); 2783 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2692 if ( priv->aui ) { 2784 if (priv->aui) {
2693 tctl |= TLAN_TC_AUISEL; 2785 tctl |= TLAN_TC_AUISEL;
2694 } else { 2786 } else {
2695 tctl &= ~TLAN_TC_AUISEL; 2787 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2788 if (priv->duplex == TLAN_DUPLEX_FULL) {
2697 control |= MII_GC_DUPLEX; 2789 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = true; 2790 priv->tlan_full_duplex = true;
2699 } 2791 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2792 if (priv->speed == TLAN_SPEED_100)
2701 control |= MII_GC_SPEEDSEL; 2793 control |= MII_GC_SPEEDSEL;
2702 }
2703 } 2794 }
2704 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control ); 2795 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2705 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); 2796 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2706 } 2797 }
2707 2798
2708 /* Wait for 2 sec to give the transceiver time 2799 /* Wait for 2 sec to give the transceiver time
2709 * to establish link. 2800 * to establish link.
2710 */ 2801 */
2711 TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); 2802 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2712 2803
2713} /* TLan_PhyStartLink */ 2804}
2714 2805
2715 2806
2716 2807
2717 2808
2718static void TLan_PhyFinishAutoNeg( struct net_device *dev ) 2809static void tlan_phy_finish_auto_neg(struct net_device *dev)
2719{ 2810{
2720 TLanPrivateInfo *priv = netdev_priv(dev); 2811 struct tlan_priv *priv = netdev_priv(dev);
2721 u16 an_adv; 2812 u16 an_adv;
2722 u16 an_lpa; 2813 u16 an_lpa;
2723 u16 data; 2814 u16 data;
@@ -2725,115 +2816,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2725 u16 phy; 2816 u16 phy;
2726 u16 status; 2817 u16 status;
2727 2818
2728 phy = priv->phy[priv->phyNum]; 2819 phy = priv->phy[priv->phy_num];
2729 2820
2730 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2821 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2731 udelay( 1000 ); 2822 udelay(1000);
2732 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2823 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2733 2824
2734 if ( ! ( status & MII_GS_AUTOCMPLT ) ) { 2825 if (!(status & MII_GS_AUTOCMPLT)) {
2735 /* Wait for 8 sec to give the process 2826 /* Wait for 8 sec to give the process
2736 * more time. Perhaps we should fail after a while. 2827 * more time. Perhaps we should fail after a while.
2737 */ 2828 */
2738 if (!priv->neg_be_verbose++) { 2829 if (!priv->neg_be_verbose++) {
2739 pr_info("TLAN: Giving autonegotiation more time.\n"); 2830 pr_info("TLAN: Giving autonegotiation more time.\n");
2740 pr_info("TLAN: Please check that your adapter has\n"); 2831 pr_info("TLAN: Please check that your adapter has\n");
2741 pr_info("TLAN: been properly connected to a HUB or Switch.\n"); 2832 pr_info("TLAN: been properly connected to a HUB or Switch.\n");
2742 pr_info("TLAN: Trying to establish link in the background...\n"); 2833 pr_info("TLAN: Trying to establish link in the background...\n");
2743 } 2834 }
2744 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2835 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2745 return; 2836 return;
2746 } 2837 }
2747 2838
2748 printk( "TLAN: %s: Autonegotiation complete.\n", dev->name ); 2839 pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name);
2749 TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv ); 2840 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2841 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2751 mode = an_adv & an_lpa & 0x03E0; 2842 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2843 if (mode & 0x0100)
2753 priv->tlanFullDuplex = true; 2844 priv->tlan_full_duplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2845 else if (!(mode & 0x0080) && (mode & 0x0040))
2755 priv->tlanFullDuplex = true; 2846 priv->tlan_full_duplex = true;
2756 } 2847
2757 2848 if ((!(mode & 0x0180)) &&
2758 if ( ( ! ( mode & 0x0180 ) ) && 2849 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2759 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && 2850 (priv->phy_num != 0)) {
2760 ( priv->phyNum != 0 ) ) { 2851 priv->phy_num = 0;
2761 priv->phyNum = 0; 2852 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2762 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2853 | TLAN_NET_CFG_PHY_EN;
2763 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2854 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2764 TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2855 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2765 return; 2856 return;
2766 } 2857 }
2767 2858
2768 if ( priv->phyNum == 0 ) { 2859 if (priv->phy_num == 0) {
2769 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || 2860 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2770 ( an_adv & an_lpa & 0x0040 ) ) { 2861 (an_adv & an_lpa & 0x0040)) {
2771 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 2862 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2772 MII_GC_AUTOENB | MII_GC_DUPLEX ); 2863 MII_GC_AUTOENB | MII_GC_DUPLEX);
2773 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2864 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n");
2774 } else { 2865 } else {
2775 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2866 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2776 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2867 MII_GC_AUTOENB);
2868 pr_info("TLAN: Starting internal PHY with HALF-DUPLEX\n");
2777 } 2869 }
2778 } 2870 }
2779 2871
2780 /* Wait for 100 ms. No reason in partiticular. 2872 /* Wait for 100 ms. No reason in partiticular.
2781 */ 2873 */
2782 TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET ); 2874 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2783 2875
2784} /* TLan_PhyFinishAutoNeg */ 2876}
2785 2877
2786#ifdef MONITOR 2878#ifdef MONITOR
2787 2879
2788 /********************************************************************* 2880/*********************************************************************
2789 * 2881 *
2790 * TLan_phyMonitor 2882 * tlan_phy_monitor
2791 * 2883 *
2792 * Returns: 2884 * Returns:
2793 * None 2885 * None
2794 * 2886 *
2795 * Params: 2887 * Params:
2796 * dev The device structure of this device. 2888 * dev The device structure of this device.
2797 * 2889 *
2798 * 2890 *
2799 * This function monitors PHY condition by reading the status 2891 * This function monitors PHY condition by reading the status
2800 * register via the MII bus. This can be used to give info 2892 * register via the MII bus. This can be used to give info
2801 * about link changes (up/down), and possible switch to alternate 2893 * about link changes (up/down), and possible switch to alternate
2802 * media. 2894 * media.
2803 * 2895 *
2804 * ******************************************************************/ 2896 *******************************************************************/
2805 2897
2806void TLan_PhyMonitor( struct net_device *dev ) 2898void tlan_phy_monitor(struct net_device *dev)
2807{ 2899{
2808 TLanPrivateInfo *priv = netdev_priv(dev); 2900 struct tlan_priv *priv = netdev_priv(dev);
2809 u16 phy; 2901 u16 phy;
2810 u16 phy_status; 2902 u16 phy_status;
2811 2903
2812 phy = priv->phy[priv->phyNum]; 2904 phy = priv->phy[priv->phy_num];
2813 2905
2814 /* Get PHY status register */ 2906 /* Get PHY status register */
2815 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status ); 2907 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2816 2908
2817 /* Check if link has been lost */ 2909 /* Check if link has been lost */
2818 if (!(phy_status & MII_GS_LINK)) { 2910 if (!(phy_status & MII_GS_LINK)) {
2819 if (priv->link) { 2911 if (priv->link) {
2820 priv->link = 0; 2912 priv->link = 0;
2821 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); 2913 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2822 netif_carrier_off(dev); 2914 dev->name);
2823 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2915 netif_carrier_off(dev);
2824 return; 2916 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2917 return;
2825 } 2918 }
2826 } 2919 }
2827 2920
2828 /* Link restablished? */ 2921 /* Link restablished? */
2829 if ((phy_status & MII_GS_LINK) && !priv->link) { 2922 if ((phy_status & MII_GS_LINK) && !priv->link) {
2830 priv->link = 1; 2923 priv->link = 1;
2831 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); 2924 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2925 dev->name);
2832 netif_carrier_on(dev); 2926 netif_carrier_on(dev);
2833 } 2927 }
2834 2928
2835 /* Setup a new monitor */ 2929 /* Setup a new monitor */
2836 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2930 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2837} 2931}
2838 2932
2839#endif /* MONITOR */ 2933#endif /* MONITOR */
@@ -2842,47 +2936,48 @@ void TLan_PhyMonitor( struct net_device *dev )
2842/***************************************************************************** 2936/*****************************************************************************
2843****************************************************************************** 2937******************************************************************************
2844 2938
2845 ThunderLAN Driver MII Routines 2939ThunderLAN driver MII routines
2846 2940
2847 These routines are based on the information in Chap. 2 of the 2941these routines are based on the information in chap. 2 of the
2848 "ThunderLAN Programmer's Guide", pp. 15-24. 2942"ThunderLAN Programmer's Guide", pp. 15-24.
2849 2943
2850****************************************************************************** 2944******************************************************************************
2851*****************************************************************************/ 2945*****************************************************************************/
2852 2946
2853 2947
2854 /*************************************************************** 2948/***************************************************************
2855 * TLan_MiiReadReg 2949 * tlan_mii_read_reg
2856 * 2950 *
2857 * Returns: 2951 * Returns:
2858 * false if ack received ok 2952 * false if ack received ok
2859 * true if no ack received or other error 2953 * true if no ack received or other error
2860 * 2954 *
2861 * Parms: 2955 * Parms:
2862 * dev The device structure containing 2956 * dev The device structure containing
2863 * The io address and interrupt count 2957 * The io address and interrupt count
2864 * for this device. 2958 * for this device.
2865 * phy The address of the PHY to be queried. 2959 * phy The address of the PHY to be queried.
2866 * reg The register whose contents are to be 2960 * reg The register whose contents are to be
2867 * retrieved. 2961 * retrieved.
2868 * val A pointer to a variable to store the 2962 * val A pointer to a variable to store the
2869 * retrieved value. 2963 * retrieved value.
2870 * 2964 *
2871 * This function uses the TLAN's MII bus to retrieve the contents 2965 * This function uses the TLAN's MII bus to retrieve the contents
2872 * of a given register on a PHY. It sends the appropriate info 2966 * of a given register on a PHY. It sends the appropriate info
2873 * and then reads the 16-bit register value from the MII bus via 2967 * and then reads the 16-bit register value from the MII bus via
2874 * the TLAN SIO register. 2968 * the TLAN SIO register.
2875 * 2969 *
2876 **************************************************************/ 2970 **************************************************************/
2877 2971
2878static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2972static bool
2973tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2879{ 2974{
2880 u8 nack; 2975 u8 nack;
2881 u16 sio, tmp; 2976 u16 sio, tmp;
2882 u32 i; 2977 u32 i;
2883 bool err; 2978 bool err;
2884 int minten; 2979 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2980 struct tlan_priv *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2981 unsigned long flags = 0;
2887 2982
2888 err = false; 2983 err = false;
@@ -2892,48 +2987,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2892 if (!in_irq()) 2987 if (!in_irq())
2893 spin_lock_irqsave(&priv->lock, flags); 2988 spin_lock_irqsave(&priv->lock, flags);
2894 2989
2895 TLan_MiiSync(dev->base_addr); 2990 tlan_mii_sync(dev->base_addr);
2896 2991
2897 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 2992 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2898 if ( minten ) 2993 if (minten)
2899 TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio); 2994 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2900 2995
2901 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 2996 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
2902 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */ 2997 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
2903 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 2998 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
2904 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 2999 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
2905 3000
2906 3001
2907 TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */ 3002 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
2908 3003
2909 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */ 3004 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
2910 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 3005 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2911 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */ 3006 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
2912 3007
2913 nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */ 3008 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
2914 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */ 3009 tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
2915 if (nack) { /* No ACK, so fake it */ 3010 if (nack) { /* no ACK, so fake it */
2916 for (i = 0; i < 16; i++) { 3011 for (i = 0; i < 16; i++) {
2917 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 3012 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 3013 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2919 } 3014 }
2920 tmp = 0xffff; 3015 tmp = 0xffff;
2921 err = true; 3016 err = true;
2922 } else { /* ACK, so read data */ 3017 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 3018 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 3019 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2925 if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio)) 3020 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2926 tmp |= i; 3021 tmp |= i;
2927 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 3022 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2928 } 3023 }
2929 } 3024 }
2930 3025
2931 3026
2932 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */ 3027 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
2933 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 3028 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2934 3029
2935 if ( minten ) 3030 if (minten)
2936 TLan_SetBit(TLAN_NET_SIO_MINTEN, sio); 3031 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2937 3032
2938 *val = tmp; 3033 *val = tmp;
2939 3034
@@ -2942,116 +3037,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2942 3037
2943 return err; 3038 return err;
2944 3039
2945} /* TLan_MiiReadReg */ 3040}
2946 3041
2947 3042
2948 3043
2949 3044
2950 /*************************************************************** 3045/***************************************************************
2951 * TLan_MiiSendData 3046 * tlan_mii_send_data
2952 * 3047 *
2953 * Returns: 3048 * Returns:
2954 * Nothing 3049 * Nothing
2955 * Parms: 3050 * Parms:
2956 * base_port The base IO port of the adapter in 3051 * base_port The base IO port of the adapter in
2957 * question. 3052 * question.
2958 * dev The address of the PHY to be queried. 3053 * dev The address of the PHY to be queried.
2959 * data The value to be placed on the MII bus. 3054 * data The value to be placed on the MII bus.
2960 * num_bits The number of bits in data that are to 3055 * num_bits The number of bits in data that are to
2961 * be placed on the MII bus. 3056 * be placed on the MII bus.
2962 * 3057 *
2963 * This function sends on sequence of bits on the MII 3058 * This function sends on sequence of bits on the MII
2964 * configuration bus. 3059 * configuration bus.
2965 * 3060 *
2966 **************************************************************/ 3061 **************************************************************/
2967 3062
2968static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) 3063static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2969{ 3064{
2970 u16 sio; 3065 u16 sio;
2971 u32 i; 3066 u32 i;
2972 3067
2973 if ( num_bits == 0 ) 3068 if (num_bits == 0)
2974 return; 3069 return;
2975 3070
2976 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 3071 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2977 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 3072 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2978 TLan_SetBit( TLAN_NET_SIO_MTXEN, sio ); 3073 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2979 3074
2980 for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) { 3075 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2981 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 3076 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2982 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 3077 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2983 if ( data & i ) 3078 if (data & i)
2984 TLan_SetBit( TLAN_NET_SIO_MDATA, sio ); 3079 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2985 else 3080 else
2986 TLan_ClearBit( TLAN_NET_SIO_MDATA, sio ); 3081 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2987 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 3082 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2988 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 3083 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2989 } 3084 }
2990 3085
2991} /* TLan_MiiSendData */ 3086}
2992 3087
2993 3088
2994 3089
2995 3090
2996 /*************************************************************** 3091/***************************************************************
2997 * TLan_MiiSync 3092 * TLan_MiiSync
2998 * 3093 *
2999 * Returns: 3094 * Returns:
3000 * Nothing 3095 * Nothing
3001 * Parms: 3096 * Parms:
3002 * base_port The base IO port of the adapter in 3097 * base_port The base IO port of the adapter in
3003 * question. 3098 * question.
3004 * 3099 *
3005 * This functions syncs all PHYs in terms of the MII configuration 3100 * This functions syncs all PHYs in terms of the MII configuration
3006 * bus. 3101 * bus.
3007 * 3102 *
3008 **************************************************************/ 3103 **************************************************************/
3009 3104
3010static void TLan_MiiSync( u16 base_port ) 3105static void tlan_mii_sync(u16 base_port)
3011{ 3106{
3012 int i; 3107 int i;
3013 u16 sio; 3108 u16 sio;
3014 3109
3015 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 3110 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
3016 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 3111 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
3017 3112
3018 TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio ); 3113 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
3019 for ( i = 0; i < 32; i++ ) { 3114 for (i = 0; i < 32; i++) {
3020 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 3115 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3021 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 3116 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3022 } 3117 }
3023 3118
3024} /* TLan_MiiSync */ 3119}
3025 3120
3026 3121
3027 3122
3028 3123
3029 /*************************************************************** 3124/***************************************************************
3030 * TLan_MiiWriteReg 3125 * tlan_mii_write_reg
3031 * 3126 *
3032 * Returns: 3127 * Returns:
3033 * Nothing 3128 * Nothing
3034 * Parms: 3129 * Parms:
3035 * dev The device structure for the device 3130 * dev The device structure for the device
3036 * to write to. 3131 * to write to.
3037 * phy The address of the PHY to be written to. 3132 * phy The address of the PHY to be written to.
3038 * reg The register whose contents are to be 3133 * reg The register whose contents are to be
3039 * written. 3134 * written.
3040 * val The value to be written to the register. 3135 * val The value to be written to the register.
3041 * 3136 *
3042 * This function uses the TLAN's MII bus to write the contents of a 3137 * This function uses the TLAN's MII bus to write the contents of a
3043 * given register on a PHY. It sends the appropriate info and then 3138 * given register on a PHY. It sends the appropriate info and then
3044 * writes the 16-bit register value from the MII configuration bus 3139 * writes the 16-bit register value from the MII configuration bus
3045 * via the TLAN SIO register. 3140 * via the TLAN SIO register.
3046 * 3141 *
3047 **************************************************************/ 3142 **************************************************************/
3048 3143
3049static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) 3144static void
3145tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3050{ 3146{
3051 u16 sio; 3147 u16 sio;
3052 int minten; 3148 int minten;
3053 unsigned long flags = 0; 3149 unsigned long flags = 0;
3054 TLanPrivateInfo *priv = netdev_priv(dev); 3150 struct tlan_priv *priv = netdev_priv(dev);
3055 3151
3056 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 3152 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3057 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 3153 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3155,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3059 if (!in_irq()) 3155 if (!in_irq())
3060 spin_lock_irqsave(&priv->lock, flags); 3156 spin_lock_irqsave(&priv->lock, flags);
3061 3157
3062 TLan_MiiSync( dev->base_addr ); 3158 tlan_mii_sync(dev->base_addr);
3063 3159
3064 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 3160 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3065 if ( minten ) 3161 if (minten)
3066 TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio ); 3162 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3067 3163
3068 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 3164 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
3069 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */ 3165 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
3070 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 3166 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
3071 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 3167 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
3072 3168
3073 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */ 3169 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
3074 TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */ 3170 tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
3075 3171
3076 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */ 3172 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
3077 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 3173 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3078 3174
3079 if ( minten ) 3175 if (minten)
3080 TLan_SetBit( TLAN_NET_SIO_MINTEN, sio ); 3176 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3081 3177
3082 if (!in_irq()) 3178 if (!in_irq())
3083 spin_unlock_irqrestore(&priv->lock, flags); 3179 spin_unlock_irqrestore(&priv->lock, flags);
3084 3180
3085} /* TLan_MiiWriteReg */ 3181}
3086 3182
3087 3183
3088 3184
@@ -3090,229 +3186,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3090/***************************************************************************** 3186/*****************************************************************************
3091****************************************************************************** 3187******************************************************************************
3092 3188
3093 ThunderLAN Driver Eeprom routines 3189ThunderLAN driver eeprom routines
3094 3190
3095 The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A 3191the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
3096 EEPROM. These functions are based on information in Microchip's 3192EEPROM. these functions are based on information in microchip's
3097 data sheet. I don't know how well this functions will work with 3193data sheet. I don't know how well this functions will work with
3098 other EEPROMs. 3194other Eeproms.
3099 3195
3100****************************************************************************** 3196******************************************************************************
3101*****************************************************************************/ 3197*****************************************************************************/
3102 3198
3103 3199
3104 /*************************************************************** 3200/***************************************************************
3105 * TLan_EeSendStart 3201 * tlan_ee_send_start
3106 * 3202 *
3107 * Returns: 3203 * Returns:
3108 * Nothing 3204 * Nothing
3109 * Parms: 3205 * Parms:
3110 * io_base The IO port base address for the 3206 * io_base The IO port base address for the
3111 * TLAN device with the EEPROM to 3207 * TLAN device with the EEPROM to
3112 * use. 3208 * use.
3113 * 3209 *
3114 * This function sends a start cycle to an EEPROM attached 3210 * This function sends a start cycle to an EEPROM attached
3115 * to a TLAN chip. 3211 * to a TLAN chip.
3116 * 3212 *
3117 **************************************************************/ 3213 **************************************************************/
3118 3214
3119static void TLan_EeSendStart( u16 io_base ) 3215static void tlan_ee_send_start(u16 io_base)
3120{ 3216{
3121 u16 sio; 3217 u16 sio;
3122 3218
3123 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3219 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3124 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3220 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3125 3221
3126 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3222 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3127 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3223 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3128 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3224 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3129 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3225 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3130 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3226 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3131 3227
3132} /* TLan_EeSendStart */ 3228}
3133 3229
3134 3230
3135 3231
3136 3232
3137 /*************************************************************** 3233/***************************************************************
3138 * TLan_EeSendByte 3234 * tlan_ee_send_byte
3139 * 3235 *
3140 * Returns: 3236 * Returns:
3141 * If the correct ack was received, 0, otherwise 1 3237 * If the correct ack was received, 0, otherwise 1
3142 * Parms: io_base The IO port base address for the 3238 * Parms: io_base The IO port base address for the
3143 * TLAN device with the EEPROM to 3239 * TLAN device with the EEPROM to
3144 * use. 3240 * use.
3145 * data The 8 bits of information to 3241 * data The 8 bits of information to
3146 * send to the EEPROM. 3242 * send to the EEPROM.
3147 * stop If TLAN_EEPROM_STOP is passed, a 3243 * stop If TLAN_EEPROM_STOP is passed, a
3148 * stop cycle is sent after the 3244 * stop cycle is sent after the
3149 * byte is sent after the ack is 3245 * byte is sent after the ack is
3150 * read. 3246 * read.
3151 * 3247 *
3152 * This function sends a byte on the serial EEPROM line, 3248 * This function sends a byte on the serial EEPROM line,
3153 * driving the clock to send each bit. The function then 3249 * driving the clock to send each bit. The function then
3154 * reverses transmission direction and reads an acknowledge 3250 * reverses transmission direction and reads an acknowledge
3155 * bit. 3251 * bit.
3156 * 3252 *
3157 **************************************************************/ 3253 **************************************************************/
3158 3254
3159static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) 3255static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3160{ 3256{
3161 int err; 3257 int err;
3162 u8 place; 3258 u8 place;
3163 u16 sio; 3259 u16 sio;
3164 3260
3165 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3261 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3166 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3262 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3167 3263
3168 /* Assume clock is low, tx is enabled; */ 3264 /* Assume clock is low, tx is enabled; */
3169 for ( place = 0x80; place != 0; place >>= 1 ) { 3265 for (place = 0x80; place != 0; place >>= 1) {
3170 if ( place & data ) 3266 if (place & data)
3171 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3267 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3172 else 3268 else
3173 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3269 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3174 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3270 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3175 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3271 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3176 } 3272 }
3177 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3273 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3178 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3274 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3179 err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio ); 3275 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3180 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3276 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3181 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3277 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3182 3278
3183 if ( ( ! err ) && stop ) { 3279 if ((!err) && stop) {
3184 /* STOP, raise data while clock is high */ 3280 /* STOP, raise data while clock is high */
3185 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3281 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3186 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3282 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3283 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3188 } 3284 }
3189 3285
3190 return err; 3286 return err;
3191 3287
3192} /* TLan_EeSendByte */ 3288}
3193 3289
3194 3290
3195 3291
3196 3292
3197 /*************************************************************** 3293/***************************************************************
3198 * TLan_EeReceiveByte 3294 * tlan_ee_receive_byte
3199 * 3295 *
3200 * Returns: 3296 * Returns:
3201 * Nothing 3297 * Nothing
3202 * Parms: 3298 * Parms:
3203 * io_base The IO port base address for the 3299 * io_base The IO port base address for the
3204 * TLAN device with the EEPROM to 3300 * TLAN device with the EEPROM to
3205 * use. 3301 * use.
3206 * data An address to a char to hold the 3302 * data An address to a char to hold the
3207 * data sent from the EEPROM. 3303 * data sent from the EEPROM.
3208 * stop If TLAN_EEPROM_STOP is passed, a 3304 * stop If TLAN_EEPROM_STOP is passed, a
3209 * stop cycle is sent after the 3305 * stop cycle is sent after the
3210 * byte is received, and no ack is 3306 * byte is received, and no ack is
3211 * sent. 3307 * sent.
3212 * 3308 *
3213 * This function receives 8 bits of data from the EEPROM 3309 * This function receives 8 bits of data from the EEPROM
3214 * over the serial link. It then sends and ack bit, or no 3310 * over the serial link. It then sends and ack bit, or no
3215 * ack and a stop bit. This function is used to retrieve 3311 * ack and a stop bit. This function is used to retrieve
3216 * data after the address of a byte in the EEPROM has been 3312 * data after the address of a byte in the EEPROM has been
3217 * sent. 3313 * sent.
3218 * 3314 *
3219 **************************************************************/ 3315 **************************************************************/
3220 3316
3221static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) 3317static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3222{ 3318{
3223 u8 place; 3319 u8 place;
3224 u16 sio; 3320 u16 sio;
3225 3321
3226 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3322 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3227 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3323 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3228 *data = 0; 3324 *data = 0;
3229 3325
3230 /* Assume clock is low, tx is enabled; */ 3326 /* Assume clock is low, tx is enabled; */
3231 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3327 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3232 for ( place = 0x80; place; place >>= 1 ) { 3328 for (place = 0x80; place; place >>= 1) {
3233 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3329 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3234 if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) ) 3330 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3235 *data |= place; 3331 *data |= place;
3236 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3332 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3237 } 3333 }
3238 3334
3239 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3335 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3240 if ( ! stop ) { 3336 if (!stop) {
3241 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */ 3337 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
3242 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3338 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3243 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3339 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3244 } else { 3340 } else {
3245 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3341 tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
3246 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3342 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3247 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3343 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3248 /* STOP, raise data while clock is high */ 3344 /* STOP, raise data while clock is high */
3249 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3345 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3250 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3346 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3251 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3347 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3252 } 3348 }
3253 3349
3254} /* TLan_EeReceiveByte */ 3350}
3255 3351
3256 3352
3257 3353
3258 3354
3259 /*************************************************************** 3355/***************************************************************
3260 * TLan_EeReadByte 3356 * tlan_ee_read_byte
3261 * 3357 *
3262 * Returns: 3358 * Returns:
3263 * No error = 0, else, the stage at which the error 3359 * No error = 0, else, the stage at which the error
3264 * occurred. 3360 * occurred.
3265 * Parms: 3361 * Parms:
3266 * io_base The IO port base address for the 3362 * io_base The IO port base address for the
3267 * TLAN device with the EEPROM to 3363 * TLAN device with the EEPROM to
3268 * use. 3364 * use.
3269 * ee_addr The address of the byte in the 3365 * ee_addr The address of the byte in the
3270 * EEPROM whose contents are to be 3366 * EEPROM whose contents are to be
3271 * retrieved. 3367 * retrieved.
3272 * data An address to a char to hold the 3368 * data An address to a char to hold the
3273 * data obtained from the EEPROM. 3369 * data obtained from the EEPROM.
3274 * 3370 *
3275 * This function reads a byte of information from an byte 3371 * This function reads a byte of information from an byte
3276 * cell in the EEPROM. 3372 * cell in the EEPROM.
3277 * 3373 *
3278 **************************************************************/ 3374 **************************************************************/
3279 3375
3280static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) 3376static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3281{ 3377{
3282 int err; 3378 int err;
3283 TLanPrivateInfo *priv = netdev_priv(dev); 3379 struct tlan_priv *priv = netdev_priv(dev);
3284 unsigned long flags = 0; 3380 unsigned long flags = 0;
3285 int ret=0; 3381 int ret = 0;
3286 3382
3287 spin_lock_irqsave(&priv->lock, flags); 3383 spin_lock_irqsave(&priv->lock, flags);
3288 3384
3289 TLan_EeSendStart( dev->base_addr ); 3385 tlan_ee_send_start(dev->base_addr);
3290 err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); 3386 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3291 if (err) 3387 if (err) {
3292 { 3388 ret = 1;
3293 ret=1;
3294 goto fail; 3389 goto fail;
3295 } 3390 }
3296 err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); 3391 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3297 if (err) 3392 if (err) {
3298 { 3393 ret = 2;
3299 ret=2;
3300 goto fail; 3394 goto fail;
3301 } 3395 }
3302 TLan_EeSendStart( dev->base_addr ); 3396 tlan_ee_send_start(dev->base_addr);
3303 err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); 3397 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3304 if (err) 3398 if (err) {
3305 { 3399 ret = 3;
3306 ret=3;
3307 goto fail; 3400 goto fail;
3308 } 3401 }
3309 TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); 3402 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3310fail: 3403fail:
3311 spin_unlock_irqrestore(&priv->lock, flags); 3404 spin_unlock_irqrestore(&priv->lock, flags);
3312 3405
3313 return ret; 3406 return ret;
3314 3407
3315} /* TLan_EeReadByte */ 3408}
3316 3409
3317 3410
3318 3411
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e2..5fc98a8e4889 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
20 ********************************************************************/ 20 ********************************************************************/
21 21
22 22
23#include <asm/io.h> 23#include <linux/io.h>
24#include <asm/types.h> 24#include <linux/types.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26 26
27 27
@@ -40,8 +40,11 @@
40#define TLAN_IGNORE 0 40#define TLAN_IGNORE 0
41#define TLAN_RECORD 1 41#define TLAN_RECORD 1
42 42
43#define TLAN_DBG(lvl, format, args...) \ 43#define TLAN_DBG(lvl, format, args...) \
44 do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) 44 do { \
45 if (debug&lvl) \
46 printk(KERN_DEBUG "TLAN: " format, ##args); \
47 } while (0)
45 48
46#define TLAN_DEBUG_GNRL 0x0001 49#define TLAN_DEBUG_GNRL 0x0001
47#define TLAN_DEBUG_TX 0x0002 50#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
50#define TLAN_DEBUG_PROBE 0x0010 53#define TLAN_DEBUG_PROBE 0x0010
51 54
52#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ 55#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
53#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */ 56#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
57 at a time */
54 58
55 59
56 /***************************************************************** 60 /*****************************************************************
@@ -70,13 +74,13 @@
70#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 74#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
71#endif 75#endif
72 76
73typedef struct tlan_adapter_entry { 77struct tlan_adapter_entry {
74 u16 vendorId; 78 u16 vendor_id;
75 u16 deviceId; 79 u16 device_id;
76 char *deviceLabel; 80 char *device_label;
77 u32 flags; 81 u32 flags;
78 u16 addrOfs; 82 u16 addr_ofs;
79} TLanAdapterEntry; 83};
80 84
81#define TLAN_ADAPTER_NONE 0x00000000 85#define TLAN_ADAPTER_NONE 0x00000000
82#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 86#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
129#define TLAN_CSTAT_DP_PR 0x0100 133#define TLAN_CSTAT_DP_PR 0x0100
130 134
131 135
132typedef struct tlan_buffer_ref_tag { 136struct tlan_buffer {
133 u32 count; 137 u32 count;
134 u32 address; 138 u32 address;
135} TLanBufferRef; 139};
136 140
137 141
138typedef struct tlan_list_tag { 142struct tlan_list {
139 u32 forward; 143 u32 forward;
140 u16 cStat; 144 u16 c_stat;
141 u16 frameSize; 145 u16 frame_size;
142 TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST]; 146 struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
143} TLanList; 147};
144 148
145 149
146typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; 150typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
164 * 168 *
165 ****************************************************************/ 169 ****************************************************************/
166 170
167typedef struct tlan_private_tag { 171struct tlan_priv {
168 struct net_device *nextDevice; 172 struct net_device *next_device;
169 struct pci_dev *pciDev; 173 struct pci_dev *pci_dev;
170 struct net_device *dev; 174 struct net_device *dev;
171 void *dmaStorage; 175 void *dma_storage;
172 dma_addr_t dmaStorageDMA; 176 dma_addr_t dma_storage_dma;
173 unsigned int dmaSize; 177 unsigned int dma_size;
174 u8 *padBuffer; 178 u8 *pad_buffer;
175 TLanList *rxList; 179 struct tlan_list *rx_list;
176 dma_addr_t rxListDMA; 180 dma_addr_t rx_list_dma;
177 u8 *rxBuffer; 181 u8 *rx_buffer;
178 dma_addr_t rxBufferDMA; 182 dma_addr_t rx_buffer_dma;
179 u32 rxHead; 183 u32 rx_head;
180 u32 rxTail; 184 u32 rx_tail;
181 u32 rxEocCount; 185 u32 rx_eoc_count;
182 TLanList *txList; 186 struct tlan_list *tx_list;
183 dma_addr_t txListDMA; 187 dma_addr_t tx_list_dma;
184 u8 *txBuffer; 188 u8 *tx_buffer;
185 dma_addr_t txBufferDMA; 189 dma_addr_t tx_buffer_dma;
186 u32 txHead; 190 u32 tx_head;
187 u32 txInProgress; 191 u32 tx_in_progress;
188 u32 txTail; 192 u32 tx_tail;
189 u32 txBusyCount; 193 u32 tx_busy_count;
190 u32 phyOnline; 194 u32 phy_online;
191 u32 timerSetAt; 195 u32 timer_set_at;
192 u32 timerType; 196 u32 timer_type;
193 struct timer_list timer; 197 struct timer_list timer;
194 struct board *adapter; 198 struct board *adapter;
195 u32 adapterRev; 199 u32 adapter_rev;
196 u32 aui; 200 u32 aui;
197 u32 debug; 201 u32 debug;
198 u32 duplex; 202 u32 duplex;
199 u32 phy[2]; 203 u32 phy[2];
200 u32 phyNum; 204 u32 phy_num;
201 u32 speed; 205 u32 speed;
202 u8 tlanRev; 206 u8 tlan_rev;
203 u8 tlanFullDuplex; 207 u8 tlan_full_duplex;
204 spinlock_t lock; 208 spinlock_t lock;
205 u8 link; 209 u8 link;
206 u8 is_eisa; 210 u8 is_eisa;
207 struct work_struct tlan_tqueue; 211 struct work_struct tlan_tqueue;
208 u8 neg_be_verbose; 212 u8 neg_be_verbose;
209} TLanPrivateInfo; 213};
210 214
211 215
212 216
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
247 ****************************************************************/ 251 ****************************************************************/
248 252
249#define TLAN_HOST_CMD 0x00 253#define TLAN_HOST_CMD 0x00
250#define TLAN_HC_GO 0x80000000 254#define TLAN_HC_GO 0x80000000
251#define TLAN_HC_STOP 0x40000000 255#define TLAN_HC_STOP 0x40000000
252#define TLAN_HC_ACK 0x20000000 256#define TLAN_HC_ACK 0x20000000
253#define TLAN_HC_CS_MASK 0x1FE00000 257#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
283#define TLAN_NET_CMD_TRFRAM 0x02 287#define TLAN_NET_CMD_TRFRAM 0x02
284#define TLAN_NET_CMD_TXPACE 0x01 288#define TLAN_NET_CMD_TXPACE 0x01
285#define TLAN_NET_SIO 0x01 289#define TLAN_NET_SIO 0x01
286#define TLAN_NET_SIO_MINTEN 0x80 290#define TLAN_NET_SIO_MINTEN 0x80
287#define TLAN_NET_SIO_ECLOK 0x40 291#define TLAN_NET_SIO_ECLOK 0x40
288#define TLAN_NET_SIO_ETXEN 0x20 292#define TLAN_NET_SIO_ETXEN 0x20
289#define TLAN_NET_SIO_EDATA 0x10 293#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
304#define TLAN_NET_MASK_MASK4 0x10 308#define TLAN_NET_MASK_MASK4 0x10
305#define TLAN_NET_MASK_RSRVD 0x0F 309#define TLAN_NET_MASK_RSRVD 0x0F
306#define TLAN_NET_CONFIG 0x04 310#define TLAN_NET_CONFIG 0x04
307#define TLAN_NET_CFG_RCLK 0x8000 311#define TLAN_NET_CFG_RCLK 0x8000
308#define TLAN_NET_CFG_TCLK 0x4000 312#define TLAN_NET_CFG_TCLK 0x4000
309#define TLAN_NET_CFG_BIT 0x2000 313#define TLAN_NET_CFG_BIT 0x2000
310#define TLAN_NET_CFG_RXCRC 0x1000 314#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
372/* Generic MII/PHY Registers */ 376/* Generic MII/PHY Registers */
373 377
374#define MII_GEN_CTL 0x00 378#define MII_GEN_CTL 0x00
375#define MII_GC_RESET 0x8000 379#define MII_GC_RESET 0x8000
376#define MII_GC_LOOPBK 0x4000 380#define MII_GC_LOOPBK 0x4000
377#define MII_GC_SPEEDSEL 0x2000 381#define MII_GC_SPEEDSEL 0x2000
378#define MII_GC_AUTOENB 0x1000 382#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
397#define MII_GS_EXTCAP 0x0001 401#define MII_GS_EXTCAP 0x0001
398#define MII_GEN_ID_HI 0x02 402#define MII_GEN_ID_HI 0x02
399#define MII_GEN_ID_LO 0x03 403#define MII_GEN_ID_LO 0x03
400#define MII_GIL_OUI 0xFC00 404#define MII_GIL_OUI 0xFC00
401#define MII_GIL_MODEL 0x03F0 405#define MII_GIL_MODEL 0x03F0
402#define MII_GIL_REVISION 0x000F 406#define MII_GIL_REVISION 0x000F
403#define MII_AN_ADV 0x04 407#define MII_AN_ADV 0x04
404#define MII_AN_LPA 0x05 408#define MII_AN_LPA 0x05
405#define MII_AN_EXP 0x06 409#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
408 412
409#define TLAN_TLPHY_ID 0x10 413#define TLAN_TLPHY_ID 0x10
410#define TLAN_TLPHY_CTL 0x11 414#define TLAN_TLPHY_CTL 0x11
411#define TLAN_TC_IGLINK 0x8000 415#define TLAN_TC_IGLINK 0x8000
412#define TLAN_TC_SWAPOL 0x4000 416#define TLAN_TC_SWAPOL 0x4000
413#define TLAN_TC_AUISEL 0x2000 417#define TLAN_TC_AUISEL 0x2000
414#define TLAN_TC_SQEEN 0x1000 418#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
435#define LEVEL1_ID1 0x7810 439#define LEVEL1_ID1 0x7810
436#define LEVEL1_ID2 0x0000 440#define LEVEL1_ID2 0x0000
437 441
438#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0 442#define CIRC_INC(a, b) if (++a >= b) a = 0
439 443
440/* Routines to access internal registers. */ 444/* Routines to access internal registers. */
441 445
442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) 446static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
443{ 447{
444 outw(internal_addr, base_addr + TLAN_DIO_ADR); 448 outw(internal_addr, base_addr + TLAN_DIO_ADR);
445 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); 449 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
446 450
447} /* TLan_DioRead8 */ 451}
448 452
449 453
450 454
451 455
452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) 456static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
453{ 457{
454 outw(internal_addr, base_addr + TLAN_DIO_ADR); 458 outw(internal_addr, base_addr + TLAN_DIO_ADR);
455 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); 459 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
456 460
457} /* TLan_DioRead16 */ 461}
458 462
459 463
460 464
461 465
462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) 466static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
463{ 467{
464 outw(internal_addr, base_addr + TLAN_DIO_ADR); 468 outw(internal_addr, base_addr + TLAN_DIO_ADR);
465 return inl(base_addr + TLAN_DIO_DATA); 469 return inl(base_addr + TLAN_DIO_DATA);
466 470
467} /* TLan_DioRead32 */ 471}
468 472
469 473
470 474
471 475
472static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) 476static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
473{ 477{
474 outw(internal_addr, base_addr + TLAN_DIO_ADR); 478 outw(internal_addr, base_addr + TLAN_DIO_ADR);
475 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); 479 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
479 483
480 484
481 485
482static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) 486static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
483{ 487{
484 outw(internal_addr, base_addr + TLAN_DIO_ADR); 488 outw(internal_addr, base_addr + TLAN_DIO_ADR);
485 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 489 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
489 493
490 494
491 495
492static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) 496static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
493{ 497{
494 outw(internal_addr, base_addr + TLAN_DIO_ADR); 498 outw(internal_addr, base_addr + TLAN_DIO_ADR);
495 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 499 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
496 500
497} 501}
498 502
499#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port) 503#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
500#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit)) 504#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
501#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port) 505#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
502 506
503/* 507/*
504 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those 508 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
506 * 510 *
507 * The original code was: 511 * The original code was:
508 * 512 *
509 * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); } 513 * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
510 * 514 *
511 * #define XOR8( a, b, c, d, e, f, g, h ) \ 515 * #define XOR8(a, b, c, d, e, f, g, h) \
512 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 516 * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
513 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 517 * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
514 * 518 *
515 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), 519 * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
516 * DA(a,30), DA(a,36), DA(a,42) ); 520 * DA(a,30), DA(a,36), DA(a,42));
517 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), 521 * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
518 * DA(a,31), DA(a,37), DA(a,43) ) << 1; 522 * DA(a,31), DA(a,37), DA(a,43)) << 1;
519 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), 523 * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
520 * DA(a,32), DA(a,38), DA(a,44) ) << 2; 524 * DA(a,32), DA(a,38), DA(a,44)) << 2;
521 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), 525 * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
522 * DA(a,33), DA(a,39), DA(a,45) ) << 3; 526 * DA(a,33), DA(a,39), DA(a,45)) << 3;
523 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), 527 * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
524 * DA(a,34), DA(a,40), DA(a,46) ) << 4; 528 * DA(a,34), DA(a,40), DA(a,46)) << 4;
525 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), 529 * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
526 * DA(a,35), DA(a,41), DA(a,47) ) << 5; 530 * DA(a,35), DA(a,41), DA(a,47)) << 5;
527 * 531 *
528 */ 532 */
529static inline u32 TLan_HashFunc( const u8 *a ) 533static inline u32 tlan_hash_func(const u8 *a)
530{ 534{
531 u8 hash; 535 u8 hash;
532 536
533 hash = (a[0]^a[3]); /* & 077 */ 537 hash = (a[0]^a[3]); /* & 077 */
534 hash ^= ((a[0]^a[3])>>6); /* & 003 */ 538 hash ^= ((a[0]^a[3])>>6); /* & 003 */
535 hash ^= ((a[1]^a[4])<<2); /* & 074 */ 539 hash ^= ((a[1]^a[4])<<2); /* & 074 */
536 hash ^= ((a[1]^a[4])>>4); /* & 017 */ 540 hash ^= ((a[1]^a[4])>>4); /* & 017 */
537 hash ^= ((a[2]^a[5])<<4); /* & 060 */ 541 hash ^= ((a[2]^a[5])<<4); /* & 060 */
538 hash ^= ((a[2]^a[5])>>2); /* & 077 */ 542 hash ^= ((a[2]^a[5])>>2); /* & 077 */
539 543
540 return hash & 077; 544 return hash & 077;
541} 545}
542#endif 546#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd50a0d7..55786a0efc41 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1142,7 +1142,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
1142 * privs required. */ 1142 * privs required. */
1143static int set_offload(struct net_device *dev, unsigned long arg) 1143static int set_offload(struct net_device *dev, unsigned long arg)
1144{ 1144{
1145 unsigned int old_features, features; 1145 u32 old_features, features;
1146 1146
1147 old_features = dev->features; 1147 old_features = dev->features;
1148 /* Unset features, set them as we chew on the arg. */ 1148 /* Unset features, set them as we chew on the arg. */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6a15e7..7fa5ec2de942 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
123#include <linux/in6.h> 123#include <linux/in6.h>
124#include <linux/dma-mapping.h> 124#include <linux/dma-mapping.h>
125#include <linux/firmware.h> 125#include <linux/firmware.h>
126#include <generated/utsrelease.h>
127 126
128#include "typhoon.h" 127#include "typhoon.h"
129 128
130MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131MODULE_VERSION(UTS_RELEASE); 130MODULE_VERSION("1.0");
132MODULE_LICENSE("GPL"); 131MODULE_LICENSE("GPL");
133MODULE_FIRMWARE(FIRMWARE_NAME); 132MODULE_FIRMWARE(FIRMWARE_NAME);
134MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); 133MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d776c4a8d3c1..7113168473cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cdc_ncm.c 2 * cdc_ncm.c
3 * 3 *
4 * Copyright (C) ST-Ericsson 2010 4 * Copyright (C) ST-Ericsson 2010-2011
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> 5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "30-Nov-2010" 57#define DRIVER_VERSION "7-Feb-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -77,6 +77,9 @@
77 */ 77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32 78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79 79
80/* Maximum amount of IN datagrams in NTB */
81#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
82
80/* Restart the timer, if amount of datagrams is less than given value */ 83/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 84#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82 85
@@ -85,11 +88,6 @@
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ 88 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) 89 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87 90
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data { 91struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16; 92 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16; 93 struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{ 196{
199 struct usb_cdc_notification req; 197 struct usb_cdc_notification req;
200 u32 val; 198 u32 val;
201 __le16 max_datagram_size;
202 u8 flags; 199 u8 flags;
203 u8 iface_no; 200 u8 iface_no;
204 int err; 201 int err;
202 u16 ntb_fmt_supported;
205 203
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 204 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207 205
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 221 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); 222 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 223 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
224 /* devices prior to NCM Errata shall set this field to zero */
225 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
226 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
226 227
227 if (ctx->func_desc != NULL) 228 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities; 229 flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
231 232
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " 233 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " 234 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n", 235 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 236 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags); 237 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
237 238
238 /* max count of tx datagrams without terminating NULL entry */ 239 /* max count of tx datagrams */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 240 if ((ctx->tx_max_datagrams == 0) ||
241 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
242 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240 243
241 /* verify maximum size of received NTB in bytes */ 244 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max < 245 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 246 pr_debug("Using min receive length=%d\n",
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { 247 USB_CDC_NCM_NTB_MIN_IN_SIZE);
248 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
249 }
250
251 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
245 pr_debug("Using default maximum receive length=%d\n", 252 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX); 253 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; 254 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 } 255 }
249 256
257 /* inform device about NTB input size changes */
258 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
259 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
260 USB_RECIP_INTERFACE;
261 req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
262 req.wValue = 0;
263 req.wIndex = cpu_to_le16(iface_no);
264
265 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
266 struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
267
268 req.wLength = 8;
269 ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
270 ndp_in_sz.wNtbInMaxDatagrams =
271 cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
272 ndp_in_sz.wReserved = 0;
273 err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
274 1000);
275 } else {
276 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
277
278 req.wLength = 4;
279 err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
280 NULL, 1000);
281 }
282
283 if (err)
284 pr_debug("Setting NTB Input Size failed\n");
285 }
286
250 /* verify maximum size of transmitted NTB in bytes */ 287 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max < 288 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 289 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
297 /* additional configuration */ 334 /* additional configuration */
298 335
299 /* set CRC Mode */ 336 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 337 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
301 req.bNotificationType = USB_CDC_SET_CRC_MODE; 338 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); 339 USB_RECIP_INTERFACE;
303 req.wIndex = cpu_to_le16(iface_no); 340 req.bNotificationType = USB_CDC_SET_CRC_MODE;
304 req.wLength = 0; 341 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
305 342 req.wIndex = cpu_to_le16(iface_no);
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 343 req.wLength = 0;
307 if (err) 344
308 pr_debug("Setting CRC mode off failed\n"); 345 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
346 if (err)
347 pr_debug("Setting CRC mode off failed\n");
348 }
309 349
310 /* set NTB format */ 350 /* set NTB format, if both formats are supported */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 351 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT; 352 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); 353 USB_RECIP_INTERFACE;
314 req.wIndex = cpu_to_le16(iface_no); 354 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
315 req.wLength = 0; 355 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
356 req.wIndex = cpu_to_le16(iface_no);
357 req.wLength = 0;
358
359 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
360 if (err)
361 pr_debug("Setting NTB format to 16-bit failed\n");
362 }
316 363
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 364 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320 365
321 /* set Max Datagram Size (MTU) */ 366 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; 367 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; 368 __le16 max_datagram_size;
324 req.wValue = 0; 369 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
325 req.wIndex = cpu_to_le16(iface_no); 370
326 req.wLength = cpu_to_le16(2); 371 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
372 USB_RECIP_INTERFACE;
373 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
374 req.wValue = 0;
375 req.wIndex = cpu_to_le16(iface_no);
376 req.wLength = cpu_to_le16(2);
377
378 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
379 1000);
380 if (err) {
381 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
382 CDC_NCM_MIN_DATAGRAM_SIZE);
383 } else {
384 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
385 /* Check Eth descriptor value */
386 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
387 if (ctx->max_datagram_size > eth_max_sz)
388 ctx->max_datagram_size = eth_max_sz;
389 } else {
390 if (ctx->max_datagram_size >
391 CDC_NCM_MAX_DATAGRAM_SIZE)
392 ctx->max_datagram_size =
393 CDC_NCM_MAX_DATAGRAM_SIZE;
394 }
327 395
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); 396 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
329 if (err) { 397 ctx->max_datagram_size =
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", 398 CDC_NCM_MIN_DATAGRAM_SIZE;
331 CDC_NCM_MIN_DATAGRAM_SIZE); 399
332 /* use default */ 400 /* if value changed, update device */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; 401 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
334 } else { 402 USB_RECIP_INTERFACE;
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 403 req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
404 req.wValue = 0;
405 req.wIndex = cpu_to_le16(iface_no);
406 req.wLength = 2;
407 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
408
409 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
410 0, NULL, 1000);
411 if (err)
412 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
413 }
336 414
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 } 415 }
342 416
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) 417 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
466 540
467 ctx->ether_desc = 541 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf; 542 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu = 543 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 544 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472 545
473 if (dev->hard_mtu < 546 if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) 547 dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
475 dev->hard_mtu = 548 else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; 549 dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break; 550 break;
483 551
484 case USB_CDC_NCM_TYPE: 552 case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
628 u32 offset; 696 u32 offset;
629 u32 last_offset; 697 u32 last_offset;
630 u16 n = 0; 698 u16 n = 0;
631 u8 timeout = 0; 699 u8 ready2send = 0;
632 700
633 /* if there is a remaining skb, it gets priority */ 701 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL) 702 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb); 703 swap(skb, ctx->tx_rem_skb);
636 else 704 else
637 timeout = 1; 705 ready2send = 1;
638 706
639 /* 707 /*
640 * +----------------+ 708 * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
682 750
683 for (; n < ctx->tx_max_datagrams; n++) { 751 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */ 752 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max) 753 if (offset >= ctx->tx_max) {
754 ready2send = 1;
686 break; 755 break;
687 756 }
688 /* compute maximum buffer size */ 757 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset; 758 rem = ctx->tx_max - offset;
690 759
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
711 } 780 }
712 ctx->tx_rem_skb = skb; 781 ctx->tx_rem_skb = skb;
713 skb = NULL; 782 skb = NULL;
714 783 ready2send = 1;
715 /* loop one more time */
716 timeout = 1;
717 } 784 }
718 break; 785 break;
719 } 786 }
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
756 ctx->tx_curr_last_offset = last_offset; 823 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb; 824 goto exit_no_skb;
758 825
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { 826 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
760 /* wait for more frames */ 827 /* wait for more frames */
761 /* push variables */ 828 /* push variables */
762 ctx->tx_curr_skb = skb_out; 829 ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 880 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); 881 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); 882 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), 883 ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus); 884 ctx->tx_ndp_modulus);
818 885
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); 886 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * 892 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16)); 893 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); 894 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ 895 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
829 896
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, 897 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
831 &(ctx->tx_ncm.ndp16), 898 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16)); 899 sizeof(ctx->tx_ncm.ndp16));
833 900
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + 901 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
835 sizeof(ctx->tx_ncm.ndp16), 902 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16), 903 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) * 904 (ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
868 if (ctx->tx_timer_pending != 0) { 935 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--; 936 ctx->tx_timer_pending--;
870 restart = 1; 937 restart = 1;
871 } else 938 } else {
872 restart = 0; 939 restart = 0;
940 }
873 941
874 spin_unlock(&ctx->mtx); 942 spin_unlock(&ctx->mtx);
875 943
876 if (restart) 944 if (restart) {
945 spin_lock(&ctx->mtx);
877 cdc_ncm_tx_timeout_start(ctx); 946 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL) 947 spin_unlock(&ctx->mtx);
948 } else if (ctx->netdev != NULL) {
879 usbnet_start_xmit(NULL, ctx->netdev); 949 usbnet_start_xmit(NULL, ctx->netdev);
950 }
880} 951}
881 952
882static struct sk_buff * 953static struct sk_buff *
@@ -900,7 +971,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb); 971 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL) 972 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1; 973 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904 974
905 /* Start timer, if there is a remaining skb */ 975 /* Start timer, if there is a remaining skb */
906 if (need_timer) 976 if (need_timer)
@@ -908,6 +978,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
908 978
909 if (skb_out) 979 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 980 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
981
982 spin_unlock(&ctx->mtx);
911 return skb_out; 983 return skb_out;
912 984
913error: 985error:
@@ -956,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
956 goto error; 1028 goto error;
957 } 1029 }
958 1030
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); 1031 temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { 1032 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n"); 1033 pr_debug("invalid DPT16 index\n");
962 goto error; 1034 goto error;
@@ -1020,8 +1092,8 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1020 if (((offset + temp) > actlen) || 1092 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { 1093 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)" 1094 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n", 1095 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb_in); 1096 x, offset, temp, skb_in);
1025 if (!x) 1097 if (!x)
1026 goto error; 1098 goto error;
1027 break; 1099 break;
@@ -1043,10 +1115,10 @@ error:
1043 1115
1044static void 1116static void
1045cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, 1117cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1046 struct connection_speed_change *data) 1118 struct usb_cdc_speed_change *data)
1047{ 1119{
1048 uint32_t rx_speed = le32_to_cpu(data->USBitRate); 1120 uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
1049 uint32_t tx_speed = le32_to_cpu(data->DSBitRate); 1121 uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
1050 1122
1051 /* 1123 /*
1052 * Currently the USB-NET API does not support reporting the actual 1124 * Currently the USB-NET API does not support reporting the actual
@@ -1087,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1087 /* test for split data in 8-byte chunks */ 1159 /* test for split data in 8-byte chunks */
1088 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { 1160 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1089 cdc_ncm_speed_change(ctx, 1161 cdc_ncm_speed_change(ctx,
1090 (struct connection_speed_change *)urb->transfer_buffer); 1162 (struct usb_cdc_speed_change *)urb->transfer_buffer);
1091 return; 1163 return;
1092 } 1164 }
1093 1165
@@ -1115,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1115 break; 1187 break;
1116 1188
1117 case USB_CDC_NOTIFY_SPEED_CHANGE: 1189 case USB_CDC_NOTIFY_SPEED_CHANGE:
1118 if (urb->actual_length < 1190 if (urb->actual_length < (sizeof(*event) +
1119 (sizeof(*event) + sizeof(struct connection_speed_change))) 1191 sizeof(struct usb_cdc_speed_change)))
1120 set_bit(EVENT_STS_SPLIT, &dev->flags); 1192 set_bit(EVENT_STS_SPLIT, &dev->flags);
1121 else 1193 else
1122 cdc_ncm_speed_change(ctx, 1194 cdc_ncm_speed_change(ctx,
1123 (struct connection_speed_change *) &event[1]); 1195 (struct usb_cdc_speed_change *) &event[1]);
1124 break; 1196 break;
1125 1197
1126 default: 1198 default:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff49..6d83812603b6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
2628 2628
2629static void hso_free_tiomget(struct hso_serial *serial) 2629static void hso_free_tiomget(struct hso_serial *serial)
2630{ 2630{
2631 struct hso_tiocmget *tiocmget = serial->tiocmget; 2631 struct hso_tiocmget *tiocmget;
2632 if (!serial)
2633 return;
2634 tiocmget = serial->tiocmget;
2632 if (tiocmget) { 2635 if (tiocmget) {
2633 if (tiocmget->urb) { 2636 usb_free_urb(tiocmget->urb);
2634 usb_free_urb(tiocmget->urb); 2637 tiocmget->urb = NULL;
2635 tiocmget->urb = NULL;
2636 }
2637 serial->tiocmget = NULL; 2638 serial->tiocmget = NULL;
2638 kfree(tiocmget); 2639 kfree(tiocmget);
2639
2640 } 2640 }
2641} 2641}
2642 2642
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a21..7dc84971f26f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
406 406
407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { 407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
408 err("Firmware too big: %zu", fw->size); 408 err("Firmware too big: %zu", fw->size);
409 release_firmware(fw);
409 return -ENOSPC; 410 return -ENOSPC;
410 } 411 }
411 data_len = fw->size; 412 data_len = fw->size;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff4..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
931 if (urb != NULL) { 931 if (urb != NULL) {
932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 932 clear_bit (EVENT_RX_MEMORY, &dev->flags);
933 status = usb_autopm_get_interface(dev->intf); 933 status = usb_autopm_get_interface(dev->intf);
934 if (status < 0) 934 if (status < 0) {
935 usb_free_urb(urb);
935 goto fail_lowmem; 936 goto fail_lowmem;
937 }
936 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
937 resched = 0; 939 resched = 0;
938 usb_autopm_put_interface(dev->intf); 940 usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa71c3ff..105d7f0630cc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
403 if (tb[IFLA_ADDRESS] == NULL) 403 if (tb[IFLA_ADDRESS] == NULL)
404 random_ether_addr(dev->dev_addr); 404 random_ether_addr(dev->dev_addr);
405 405
406 if (tb[IFLA_IFNAME])
407 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
408 else
409 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
410
411 if (strchr(dev->name, '%')) {
412 err = dev_alloc_name(dev, dev->name);
413 if (err < 0)
414 goto err_alloc_name;
415 }
416
417 err = register_netdevice(dev); 406 err = register_netdevice(dev);
418 if (err < 0) 407 if (err < 0)
419 goto err_register_dev; 408 goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
433 422
434err_register_dev: 423err_register_dev:
435 /* nothing to do */ 424 /* nothing to do */
436err_alloc_name:
437err_configure_peer: 425err_configure_peer:
438 unregister_netdevice(peer); 426 unregister_netdevice(peer);
439 return err; 427 return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac704fdd7..0d6fec6b7d93 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2923static int velocity_set_wol(struct velocity_info *vptr) 2923static int velocity_set_wol(struct velocity_info *vptr)
2924{ 2924{
2925 struct mac_regs __iomem *regs = vptr->mac_regs; 2925 struct mac_regs __iomem *regs = vptr->mac_regs;
2926 enum speed_opt spd_dpx = vptr->options.spd_dpx;
2926 static u8 buf[256]; 2927 static u8 buf[256];
2927 int i; 2928 int i;
2928 2929
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
2968 2969
2969 writew(0x0FFF, &regs->WOLSRClr); 2970 writew(0x0FFF, &regs->WOLSRClr);
2970 2971
2972 if (spd_dpx == SPD_DPX_1000_FULL)
2973 goto mac_done;
2974
2975 if (spd_dpx != SPD_DPX_AUTO)
2976 goto advertise_done;
2977
2971 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { 2978 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2972 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2979 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2973 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); 2980 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2978 if (vptr->mii_status & VELOCITY_SPEED_1000) 2985 if (vptr->mii_status & VELOCITY_SPEED_1000)
2979 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); 2986 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2980 2987
2988advertise_done:
2981 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 2989 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2982 2990
2983 { 2991 {
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2987 writeb(GCR, &regs->CHIPGCR); 2995 writeb(GCR, &regs->CHIPGCR);
2988 } 2996 }
2989 2997
2998mac_done:
2990 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR); 2999 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2991 /* Turn on SWPTAG just before entering power mode */ 3000 /* Turn on SWPTAG just before entering power mode */
2992 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW); 3001 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff61..d7227539484e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
361#define MAC_REG_CHIPGSR 0x9C 361#define MAC_REG_CHIPGSR 0x9C
362#define MAC_REG_TESTCFG 0x9D 362#define MAC_REG_TESTCFG 0x9D
363#define MAC_REG_DEBUG 0x9E 363#define MAC_REG_DEBUG 0x9E
364#define MAC_REG_CHIPGCR 0x9F 364#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
365#define MAC_REG_WOLCR0_SET 0xA0 365#define MAC_REG_WOLCR0_SET 0xA0
366#define MAC_REG_WOLCR1_SET 0xA1 366#define MAC_REG_WOLCR1_SET 0xA1
367#define MAC_REG_PWCFG_SET 0xA2 367#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
848 * Bits in CHIPGCR register 848 * Bits in CHIPGCR register
849 */ 849 */
850 850
851#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ 851#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
852#define CHIPGCR_FCFDX 0x40 852#define CHIPGCR_FCFDX 0x40 /* force full duplex */
853#define CHIPGCR_FCRESV 0x20 853#define CHIPGCR_FCRESV 0x20
854#define CHIPGCR_FCMODE 0x10 854#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
855#define CHIPGCR_LPSOPT 0x08 855#define CHIPGCR_LPSOPT 0x08
856#define CHIPGCR_TM1US 0x04 856#define CHIPGCR_TM1US 0x04
857#define CHIPGCR_TM0US 0x02 857#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1b..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
446 } 446 }
447} 447}
448 448
449static void virtnet_napi_enable(struct virtnet_info *vi)
450{
451 napi_enable(&vi->napi);
452
453 /* If all buffers were filled by other side before we napi_enabled, we
454 * won't get another interrupt, so process any outstanding packets
455 * now. virtnet_poll wants re-enable the queue, so we disable here.
456 * We synchronize against interrupts via NAPI_STATE_SCHED */
457 if (napi_schedule_prep(&vi->napi)) {
458 virtqueue_disable_cb(vi->rvq);
459 __napi_schedule(&vi->napi);
460 }
461}
462
449static void refill_work(struct work_struct *work) 463static void refill_work(struct work_struct *work)
450{ 464{
451 struct virtnet_info *vi; 465 struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
454 vi = container_of(work, struct virtnet_info, refill.work); 468 vi = container_of(work, struct virtnet_info, refill.work);
455 napi_disable(&vi->napi); 469 napi_disable(&vi->napi);
456 still_empty = !try_fill_recv(vi, GFP_KERNEL); 470 still_empty = !try_fill_recv(vi, GFP_KERNEL);
457 napi_enable(&vi->napi); 471 virtnet_napi_enable(vi);
458 472
459 /* In theory, this can happen: if we don't get any buffers in 473 /* In theory, this can happen: if we don't get any buffers in
460 * we will *never* try to fill again. */ 474 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
638{ 652{
639 struct virtnet_info *vi = netdev_priv(dev); 653 struct virtnet_info *vi = netdev_priv(dev);
640 654
641 napi_enable(&vi->napi); 655 virtnet_napi_enable(vi);
642
643 /* If all buffers were filled by other side before we napi_enabled, we
644 * won't get another interrupt, so process any outstanding packets
645 * now. virtnet_poll wants re-enable the queue, so we disable here.
646 * We synchronize against interrupts via NAPI_STATE_SCHED */
647 if (napi_schedule_prep(&vi->napi)) {
648 virtqueue_disable_cb(vi->rvq);
649 __napi_schedule(&vi->napi);
650 }
651 return 0; 656 return 0;
652} 657}
653 658
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5b..cc14b4a75048 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
48static int enable_mq = 1; 48static int enable_mq = 1;
49static int irq_share_mode; 49static int irq_share_mode;
50 50
51static void
52vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
53
51/* 54/*
52 * Enable/Disable the given intr 55 * Enable/Disable the given intr
53 */ 56 */
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139{ 142{
140 u32 ret; 143 u32 ret;
141 int i; 144 int i;
145 unsigned long flags;
142 146
147 spin_lock_irqsave(&adapter->cmd_lock, flags);
143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
151
145 adapter->link_speed = ret >> 16; 152 adapter->link_speed = ret >> 16;
146 if (ret & 1) { /* Link is up. */ 153 if (ret & 1) { /* Link is up. */
147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 154 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
183 190
184 /* Check if there is an error on xmit/recv queues */ 191 /* Check if there is an error on xmit/recv queues */
185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock(&adapter->cmd_lock);
186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
187 VMXNET3_CMD_GET_QUEUE_STATUS); 195 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock(&adapter->cmd_lock);
188 197
189 for (i = 0; i < adapter->num_tx_queues; i++) 198 for (i = 0; i < adapter->num_tx_queues; i++)
190 if (adapter->tqd_start[i].status.stopped) 199 if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
804 skb_transport_header(skb))->doff * 4; 813 skb_transport_header(skb))->doff * 4;
805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 814 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
806 } else { 815 } else {
807 unsigned int pull_size;
808
809 if (skb->ip_summed == CHECKSUM_PARTIAL) { 816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 817 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
811 818
812 if (ctx->ipv4) { 819 if (ctx->ipv4) {
813 struct iphdr *iph = (struct iphdr *) 820 struct iphdr *iph = (struct iphdr *)
814 skb_network_header(skb); 821 skb_network_header(skb);
815 if (iph->protocol == IPPROTO_TCP) { 822 if (iph->protocol == IPPROTO_TCP)
816 pull_size = ctx->eth_ip_hdr_size +
817 sizeof(struct tcphdr);
818
819 if (unlikely(!pskb_may_pull(skb,
820 pull_size))) {
821 goto err;
822 }
823 ctx->l4_hdr_size = ((struct tcphdr *) 823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4; 824 skb_transport_header(skb))->doff * 4;
825 } else if (iph->protocol == IPPROTO_UDP) { 825 else if (iph->protocol == IPPROTO_UDP)
826 /*
827 * Use tcp header size so that bytes to
828 * be copied are more than required by
829 * the device.
830 */
826 ctx->l4_hdr_size = 831 ctx->l4_hdr_size =
827 sizeof(struct udphdr); 832 sizeof(struct tcphdr);
828 } else { 833 else
829 ctx->l4_hdr_size = 0; 834 ctx->l4_hdr_size = 0;
830 }
831 } else { 835 } else {
832 /* for simplicity, don't copy L4 headers */ 836 /* for simplicity, don't copy L4 headers */
833 ctx->l4_hdr_size = 0; 837 ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1859 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1863 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1860 struct Vmxnet3_DriverShared *shared = adapter->shared; 1864 struct Vmxnet3_DriverShared *shared = adapter->shared;
1861 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1865 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1866 unsigned long flags;
1862 1867
1863 if (grp) { 1868 if (grp) {
1864 /* add vlan rx stripping. */ 1869 /* add vlan rx stripping. */
1865 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1870 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1866 int i; 1871 int i;
1867 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1868 adapter->vlan_grp = grp; 1872 adapter->vlan_grp = grp;
1869 1873
1870 /* update FEATURES to device */
1871 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1873 VMXNET3_CMD_UPDATE_FEATURE);
1874 /* 1874 /*
1875 * Clear entire vfTable; then enable untagged pkts. 1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns 1876 * Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1880 vfTable[i] = 0; 1880 vfTable[i] = 0;
1881 1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1883 spin_lock_irqsave(&adapter->cmd_lock, flags);
1883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1884 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1885 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1885 } else { 1887 } else {
1886 printk(KERN_ERR "%s: vlan_rx_register when device has " 1888 printk(KERN_ERR "%s: vlan_rx_register when device has "
1887 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1889 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1900 */ 1902 */
1901 vfTable[i] = 0; 1903 vfTable[i] = 0;
1902 } 1904 }
1905 spin_lock_irqsave(&adapter->cmd_lock, flags);
1903 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1906 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1904 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1907 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1905 1908 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1906 /* update FEATURES to device */
1907 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1908 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1909 VMXNET3_CMD_UPDATE_FEATURE);
1910 } 1909 }
1911 } 1910 }
1912} 1911}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{ 1938{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1939 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1940 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1941 unsigned long flags;
1942 1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1944 spin_lock_irqsave(&adapter->cmd_lock, flags);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1945 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1946 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1947 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1946} 1948}
1947 1949
1948 1950
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951{ 1953{
1952 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1954 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1955 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1956 unsigned long flags;
1954 1957
1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1958 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1959 spin_lock_irqsave(&adapter->cmd_lock, flags);
1956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1957 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1961 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1962 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1958} 1963}
1959 1964
1960 1965
@@ -1985,6 +1990,7 @@ static void
1985vmxnet3_set_mc(struct net_device *netdev) 1990vmxnet3_set_mc(struct net_device *netdev)
1986{ 1991{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1992 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1993 unsigned long flags;
1988 struct Vmxnet3_RxFilterConf *rxConf = 1994 struct Vmxnet3_RxFilterConf *rxConf =
1989 &adapter->shared->devRead.rxFilterConf; 1995 &adapter->shared->devRead.rxFilterConf;
1990 u8 *new_table = NULL; 1996 u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2020 rxConf->mfTablePA = 0; 2026 rxConf->mfTablePA = 0;
2021 } 2027 }
2022 2028
2029 spin_lock_irqsave(&adapter->cmd_lock, flags);
2023 if (new_mode != rxConf->rxMode) { 2030 if (new_mode != rxConf->rxMode) {
2024 rxConf->rxMode = cpu_to_le32(new_mode); 2031 rxConf->rxMode = cpu_to_le32(new_mode);
2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2032 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 2035
2029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2036 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2030 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2037 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2038 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2031 2039
2032 kfree(new_table); 2040 kfree(new_table);
2033} 2041}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2080 devRead->misc.uptFeatures |= UPT1_F_LRO; 2088 devRead->misc.uptFeatures |= UPT1_F_LRO;
2081 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2089 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2082 } 2090 }
2083 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 2091 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2084 adapter->vlan_grp) {
2085 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2092 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2086 }
2087 2093
2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2094 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2095 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2168 /* rx filter settings */ 2174 /* rx filter settings */
2169 devRead->rxFilterConf.rxMode = 0; 2175 devRead->rxFilterConf.rxMode = 0;
2170 vmxnet3_restore_vlan(adapter); 2176 vmxnet3_restore_vlan(adapter);
2177 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2178
2171 /* the rest are already zeroed */ 2179 /* the rest are already zeroed */
2172} 2180}
2173 2181
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2177{ 2185{
2178 int err, i; 2186 int err, i;
2179 u32 ret; 2187 u32 ret;
2188 unsigned long flags;
2180 2189
2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2190 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2182 " ring sizes %u %u %u\n", adapter->netdev->name, 2191 " ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2206 adapter->shared_pa)); 2215 adapter->shared_pa));
2207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2208 adapter->shared_pa)); 2217 adapter->shared_pa));
2218 spin_lock_irqsave(&adapter->cmd_lock, flags);
2209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2219 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2210 VMXNET3_CMD_ACTIVATE_DEV); 2220 VMXNET3_CMD_ACTIVATE_DEV);
2211 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2221 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2222 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2212 2223
2213 if (ret != 0) { 2224 if (ret != 0) {
2214 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2225 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
2255void 2266void
2256vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2267vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2257{ 2268{
2269 unsigned long flags;
2270 spin_lock_irqsave(&adapter->cmd_lock, flags);
2258 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2259} 2273}
2260 2274
2261 2275
@@ -2263,12 +2277,15 @@ int
2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2277vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2264{ 2278{
2265 int i; 2279 int i;
2280 unsigned long flags;
2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2281 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2267 return 0; 2282 return 0;
2268 2283
2269 2284
2285 spin_lock_irqsave(&adapter->cmd_lock, flags);
2270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271 VMXNET3_CMD_QUIESCE_DEV); 2287 VMXNET3_CMD_QUIESCE_DEV);
2288 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2272 vmxnet3_disable_all_intrs(adapter); 2289 vmxnet3_disable_all_intrs(adapter);
2273 2290
2274 for (i = 0; i < adapter->num_rx_queues; i++) 2291 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2443 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2444 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2428 ring0_size = (ring0_size + sz - 1) / sz * sz; 2445 ring0_size = (ring0_size + sz - 1) / sz * sz;
2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / 2446 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2430 sz * sz); 2447 sz * sz);
2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2448 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size; 2449 comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2695 break; 2712 break;
2696 } else { 2713 } else {
2697 /* If fails to enable required number of MSI-x vectors 2714 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event 2715 * try enabling minimum number of vectors required.
2699 */ 2716 */
2700 vectors = vector_threshold; 2717 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" 2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2718 u32 cfg; 2735 u32 cfg;
2719 2736
2720 /* intr settings */ 2737 /* intr settings */
2738 spin_lock(&adapter->cmd_lock);
2721 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2722 VMXNET3_CMD_GET_CONF_INTR); 2740 VMXNET3_CMD_GET_CONF_INTR);
2723 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742 spin_unlock(&adapter->cmd_lock);
2724 adapter->intr.type = cfg & 0x3; 2743 adapter->intr.type = cfg & 0x3;
2725 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2726 2745
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2755 */ 2774 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) { 2777 || adapter->num_rx_queues != 1) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2778 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n"); 2779 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1; 2780 adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2905 adapter->netdev = netdev; 2924 adapter->netdev = netdev;
2906 adapter->pdev = pdev; 2925 adapter->pdev = pdev;
2907 2926
2927 spin_lock_init(&adapter->cmd_lock);
2908 adapter->shared = pci_alloc_consistent(adapter->pdev, 2928 adapter->shared = pci_alloc_consistent(adapter->pdev,
2909 sizeof(struct Vmxnet3_DriverShared), 2929 sizeof(struct Vmxnet3_DriverShared),
2910 &adapter->shared_pa); 2930 &adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
3108 u8 *arpreq; 3128 u8 *arpreq;
3109 struct in_device *in_dev; 3129 struct in_device *in_dev;
3110 struct in_ifaddr *ifa; 3130 struct in_ifaddr *ifa;
3131 unsigned long flags;
3111 int i = 0; 3132 int i = 0;
3112 3133
3113 if (!netif_running(netdev)) 3134 if (!netif_running(netdev))
3114 return 0; 3135 return 0;
3115 3136
3137 for (i = 0; i < adapter->num_rx_queues; i++)
3138 napi_disable(&adapter->rx_queue[i].napi);
3139
3116 vmxnet3_disable_all_intrs(adapter); 3140 vmxnet3_disable_all_intrs(adapter);
3117 vmxnet3_free_irqs(adapter); 3141 vmxnet3_free_irqs(adapter);
3118 vmxnet3_free_intr_resources(adapter); 3142 vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
3188 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3189 pmConf)); 3213 pmConf));
3190 3214
3215 spin_lock_irqsave(&adapter->cmd_lock, flags);
3191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3192 VMXNET3_CMD_UPDATE_PMCFG); 3217 VMXNET3_CMD_UPDATE_PMCFG);
3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3193 3219
3194 pci_save_state(pdev); 3220 pci_save_state(pdev);
3195 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
3204static int 3230static int
3205vmxnet3_resume(struct device *device) 3231vmxnet3_resume(struct device *device)
3206{ 3232{
3207 int err; 3233 int err, i = 0;
3234 unsigned long flags;
3208 struct pci_dev *pdev = to_pci_dev(device); 3235 struct pci_dev *pdev = to_pci_dev(device);
3209 struct net_device *netdev = pci_get_drvdata(pdev); 3236 struct net_device *netdev = pci_get_drvdata(pdev);
3210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
3232 3259
3233 pci_enable_wake(pdev, PCI_D0, 0); 3260 pci_enable_wake(pdev, PCI_D0, 0);
3234 3261
3262 spin_lock_irqsave(&adapter->cmd_lock, flags);
3235 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3236 VMXNET3_CMD_UPDATE_PMCFG); 3264 VMXNET3_CMD_UPDATE_PMCFG);
3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3237 vmxnet3_alloc_intr_resources(adapter); 3266 vmxnet3_alloc_intr_resources(adapter);
3238 vmxnet3_request_irqs(adapter); 3267 vmxnet3_request_irqs(adapter);
3268 for (i = 0; i < adapter->num_rx_queues; i++)
3269 napi_enable(&adapter->rx_queue[i].napi);
3239 vmxnet3_enable_all_intrs(adapter); 3270 vmxnet3_enable_all_intrs(adapter);
3240 3271
3241 return 0; 3272 return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe7..81254be85b92 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) 45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{ 46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 unsigned long flags;
48 49
49 if (adapter->rxcsum != val) { 50 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val; 51 adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
56 adapter->shared->devRead.misc.uptFeatures &= 57 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM; 58 ~UPT1_F_RXCSUM;
58 59
60 spin_lock_irqsave(&adapter->cmd_lock, flags);
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
63 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
61 } 64 }
62 } 65 }
63 return 0; 66 return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
68static const struct vmxnet3_stat_desc 71static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = { 72vmxnet3_tq_dev_stats[] = {
70 /* description, offset */ 73 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 74 { "Tx Queue#", 0 },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 75 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 76 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 77 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 78 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 79 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 80 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 81 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 82 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 83 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
84 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81}; 85};
82 86
83/* per tq stats maintained by the driver */ 87/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc 88static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = { 89vmxnet3_tq_driver_stats[] = {
86 /* description, offset */ 90 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 91 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) }, 92 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 93 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) }, 94 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 95 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) }, 96 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 97 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) }, 98 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 99 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) }, 100 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats, 101 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) }, 102 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 103 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) }, 104 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 105 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) }, 106 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 107 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) }, 108 oversized_hdr) },
105}; 109};
106 110
107/* per rq stats maintained by the device */ 111/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc 112static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = { 113vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 114 { "Rx Queue#", 0 },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 115 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 116 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 117 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 118 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 119 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 120 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 121 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 122 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 123 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120}; 125};
121 126
122/* per rq stats maintained by the driver */ 127/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc 128static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = { 129vmxnet3_rq_driver_stats[] = {
125 /* description, offset */ 130 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 131 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) }, 132 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats, 133 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) }, 134 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 135 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) }, 136 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 137 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) }, 138 rx_buf_alloc_failure) },
134}; 139};
135 140
136/* gloabl stats maintained by the driver */ 141/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc 142static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = { 143vmxnet3_global_stats[] = {
139 /* description, offset */ 144 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter, 145 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) } 146 tx_timeout_count) }
142}; 147};
143 148
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 156 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 157 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 158 struct net_device_stats *net_stats = &netdev->stats;
159 unsigned long flags;
154 int i; 160 int i;
155 161
156 adapter = netdev_priv(netdev); 162 adapter = netdev_priv(netdev);
157 163
158 /* Collect the dev stats into the shared area */ 164 /* Collect the dev stats into the shared area */
165 spin_lock_irqsave(&adapter->cmd_lock, flags);
159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 166 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
167 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
160 168
161 memset(net_stats, 0, sizeof(*net_stats)); 169 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) { 170 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
193static int 201static int
194vmxnet3_get_sset_count(struct net_device *netdev, int sset) 202vmxnet3_get_sset_count(struct net_device *netdev, int sset)
195{ 203{
204 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
196 switch (sset) { 205 switch (sset) {
197 case ETH_SS_STATS: 206 case ETH_SS_STATS:
198 return ARRAY_SIZE(vmxnet3_tq_dev_stats) + 207 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
199 ARRAY_SIZE(vmxnet3_tq_driver_stats) + 208 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
200 ARRAY_SIZE(vmxnet3_rq_dev_stats) + 209 adapter->num_tx_queues +
201 ARRAY_SIZE(vmxnet3_rq_driver_stats) + 210 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 adapter->num_rx_queues +
202 ARRAY_SIZE(vmxnet3_global_stats); 213 ARRAY_SIZE(vmxnet3_global_stats);
203 default: 214 default:
204 return -EOPNOTSUPP; 215 return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
206} 217}
207 218
208 219
220/* Should be multiple of 4 */
221#define NUM_TX_REGS 8
222#define NUM_RX_REGS 12
223
209static int 224static int
210vmxnet3_get_regs_len(struct net_device *netdev) 225vmxnet3_get_regs_len(struct net_device *netdev)
211{ 226{
212 return 20 * sizeof(u32); 227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
213} 230}
214 231
215 232
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
240static void 257static void
241vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 258vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
242{ 259{
260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
243 if (stringset == ETH_SS_STATS) { 261 if (stringset == ETH_SS_STATS) {
244 int i; 262 int i, j;
245 263 for (j = 0; j < adapter->num_tx_queues; j++) {
246 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { 264 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
247 memcpy(buf, vmxnet3_tq_dev_stats[i].desc, 265 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
248 ETH_GSTRING_LEN); 266 ETH_GSTRING_LEN);
249 buf += ETH_GSTRING_LEN; 267 buf += ETH_GSTRING_LEN;
250 } 268 }
251 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { 269 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
252 memcpy(buf, vmxnet3_tq_driver_stats[i].desc, 270 i++) {
253 ETH_GSTRING_LEN); 271 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
254 buf += ETH_GSTRING_LEN; 272 ETH_GSTRING_LEN);
255 } 273 buf += ETH_GSTRING_LEN;
256 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { 274 }
257 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
258 ETH_GSTRING_LEN);
259 buf += ETH_GSTRING_LEN;
260 } 275 }
261 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { 276
262 memcpy(buf, vmxnet3_rq_driver_stats[i].desc, 277 for (j = 0; j < adapter->num_rx_queues; j++) {
263 ETH_GSTRING_LEN); 278 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
264 buf += ETH_GSTRING_LEN; 279 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
283 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 i++) {
285 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 ETH_GSTRING_LEN);
287 buf += ETH_GSTRING_LEN;
288 }
265 } 289 }
290
266 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { 291 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
267 memcpy(buf, vmxnet3_global_stats[i].desc, 292 memcpy(buf, vmxnet3_global_stats[i].desc,
268 ETH_GSTRING_LEN); 293 ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
277 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 302 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
278 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; 303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
279 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags;
280 306
281 if (data & ~ETH_FLAG_LRO) 307 if (data & ~ETH_FLAG_LRO)
282 return -EOPNOTSUPP; 308 return -EOPNOTSUPP;
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 else 318 else
293 adapter->shared->devRead.misc.uptFeatures &= 319 adapter->shared->devRead.misc.uptFeatures &=
294 ~UPT1_F_LRO; 320 ~UPT1_F_LRO;
321 spin_lock_irqsave(&adapter->cmd_lock, flags);
295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
296 VMXNET3_CMD_UPDATE_FEATURE); 323 VMXNET3_CMD_UPDATE_FEATURE);
324 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
297 } 325 }
298 return 0; 326 return 0;
299} 327}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
303 struct ethtool_stats *stats, u64 *buf) 331 struct ethtool_stats *stats, u64 *buf)
304{ 332{
305 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
334 unsigned long flags;
306 u8 *base; 335 u8 *base;
307 int i; 336 int i;
308 int j = 0; 337 int j = 0;
309 338
339 spin_lock_irqsave(&adapter->cmd_lock, flags);
310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
341 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
311 342
312 /* this does assume each counter is 64-bit wide */ 343 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */ 344 for (j = 0; j < adapter->num_tx_queues; j++) {
314 345 base = (u8 *)&adapter->tqd_start[j].stats;
315 base = (u8 *)&adapter->tqd_start[j].stats; 346 *buf++ = (u64)j;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 347 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 348 *buf++ = *(u64 *)(base +
318 349 vmxnet3_tq_dev_stats[i].offset);
319 base = (u8 *)&adapter->tx_queue[j].stats; 350
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 351 base = (u8 *)&adapter->tx_queue[j].stats;
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 352 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
322 353 *buf++ = *(u64 *)(base +
323 base = (u8 *)&adapter->rqd_start[j].stats; 354 vmxnet3_tq_driver_stats[i].offset);
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 355 }
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 356
327 base = (u8 *)&adapter->rx_queue[j].stats; 357 for (j = 0; j < adapter->num_tx_queues; j++) {
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 358 base = (u8 *)&adapter->rqd_start[j].stats;
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 359 *buf++ = (u64) j;
360 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 *buf++ = *(u64 *)(base +
362 vmxnet3_rq_dev_stats[i].offset);
363
364 base = (u8 *)&adapter->rx_queue[j].stats;
365 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 *buf++ = *(u64 *)(base +
367 vmxnet3_rq_driver_stats[i].offset);
368 }
330 369
331 base = (u8 *)adapter; 370 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 371 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 378{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 379 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 380 u32 *buf = p;
342 int i = 0; 381 int i = 0, j = 0;
343 382
344 memset(p, 0, vmxnet3_get_regs_len(netdev)); 383 memset(p, 0, vmxnet3_get_regs_len(netdev));
345 384
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
348 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 387 /* Update vmxnet3_get_regs_len if we want to dump more registers */
349 388
350 /* make each ring use multiple of 16 bytes */ 389 /* make each ring use multiple of 16 bytes */
351/* TODO change this for multiple queues */ 390 for (i = 0; i < adapter->num_tx_queues; i++) {
352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill; 391 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp; 392 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen; 393 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
355 buf[3] = 0; 394 buf[j++] = 0;
356 395
357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc; 396 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
358 buf[5] = adapter->tx_queue[i].comp_ring.gen; 397 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
359 buf[6] = adapter->tx_queue[i].stopped; 398 buf[j++] = adapter->tx_queue[i].stopped;
360 buf[7] = 0; 399 buf[j++] = 0;
361 400 }
362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; 401
363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; 402 for (i = 0; i < adapter->num_rx_queues; i++) {
364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen; 403 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
365 buf[11] = 0; 404 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
366 405 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; 406 buf[j++] = 0;
368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; 407
369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen; 408 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
370 buf[15] = 0; 409 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
371 410 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc; 411 buf[j++] = 0;
373 buf[17] = adapter->rx_queue[i].comp_ring.gen; 412
374 buf[18] = 0; 413 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
375 buf[19] = 0; 414 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 buf[j++] = 0;
416 buf[j++] = 0;
417 }
418
376} 419}
377 420
378 421
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p) 617 const struct ethtool_rxfh_indir *p)
575{ 618{
576 unsigned int i; 619 unsigned int i;
620 unsigned long flags;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 622 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579 623
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
592 for (i = 0; i < rssConf->indTableSize; i++) 636 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i]; 637 rssConf->indTable[i] = p->ring_index[i];
594 638
639 spin_lock_irqsave(&adapter->cmd_lock, flags);
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT); 641 VMXNET3_CMD_UPDATE_RSSIDT);
642 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
597 643
598 return 0; 644 return 0;
599 645
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f03..fb5d245ac878 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
289 289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ 290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1) 291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ 292#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
293 293
294 294
295struct vmxnet3_intr { 295struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
318 struct vlan_group *vlan_grp; 318 struct vlan_group *vlan_grp;
319 struct vmxnet3_intr intr; 319 struct vmxnet3_intr intr;
320 spinlock_t cmd_lock;
320 struct Vmxnet3_DriverShared *shared; 321 struct Vmxnet3_DriverShared *shared;
321 struct Vmxnet3_PMConf *pm_conf; 322 struct Vmxnet3_PMConf *pm_conf;
322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ 323 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f9..e74e4b42592d 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
387 data1 = steer_ctrl = 0; 387 data1 = steer_ctrl = 0;
388 388
389 status = vxge_hw_vpath_fw_api(vpath, 389 status = vxge_hw_vpath_fw_api(vpath,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV, 390 VXGE_HW_FW_API_GET_EPROM_REV,
391 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
392 0, &data0, &data1, &steer_ctrl); 392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK) 393 if (status != VXGE_HW_OK)
394 break; 394 break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2868 ring->rxd_init = attr->rxd_init; 2868 ring->rxd_init = attr->rxd_init;
2869 ring->rxd_term = attr->rxd_term; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode; 2870 ring->buffer_mode = config->buffer_mode;
2871 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2871 ring->rxds_limit = config->rxds_limit; 2873 ring->rxds_limit = config->rxds_limit;
2872 2874
2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2875 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3511 3513
3512 /* apply "interrupts per txdl" attribute */ 3514 /* apply "interrupts per txdl" attribute */
3513 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; 3515 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3514 3518
3515 if (fifo->config->intr) 3519 if (fifo->config->intr)
3516 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; 3520 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -3690,7 +3694,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3690 if (status != VXGE_HW_OK) 3694 if (status != VXGE_HW_OK)
3691 goto exit; 3695 goto exit;
3692 3696
3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3697 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3694 (rts_table != 3698 (rts_table !=
3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) 3699 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3696 *data1 = 0; 3700 *data1 = 0;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4377 } 4381 }
4378 4382
4379 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4383 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384 vpath->tim_tti_cfg1_saved = val64;
4385
4380 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); 4386 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4381 4387
4382 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4388 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4433 } 4439 }
4434 4440
4435 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); 4441 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442 vpath->tim_tti_cfg3_saved = val64;
4436 } 4443 }
4437 4444
4438 if (config->ring.enable == VXGE_HW_RING_ENABLE) { 4445 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4481 } 4488 }
4482 4489
4483 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); 4490 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491 vpath->tim_rti_cfg1_saved = val64;
4492
4484 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); 4493 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4485 4494
4486 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4495 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4537 } 4546 }
4538 4547
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549 vpath->tim_rti_cfg3_saved = val64;
4540 } 4550 }
4541 4551
4542 val64 = 0; 4552 val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4555 return status; 4565 return status;
4556} 4566}
4557 4567
4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 struct __vxge_hw_virtualpath *vpath;
4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
4562 struct vxge_hw_vp_config *config;
4563 u64 val64;
4564
4565 vpath = &hldev->virtual_paths[vp_id];
4566 vp_reg = vpath->vp_reg;
4567 config = vpath->vp_config;
4568
4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4575 }
4576}
4577
4578/* 4568/*
4579 * __vxge_hw_vpath_initialize 4569 * __vxge_hw_vpath_initialize
4580 * This routine is the final phase of init which initializes the 4570 * This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d160..3c53aa732c9d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
682 u32 vsport_number; 682 u32 vsport_number;
683 u32 max_kdfc_db; 683 u32 max_kdfc_db;
684 u32 max_nofl_db; 684 u32 max_nofl_db;
685 u64 tim_tti_cfg1_saved;
686 u64 tim_tti_cfg3_saved;
687 u64 tim_rti_cfg1_saved;
688 u64 tim_rti_cfg3_saved;
685 689
686 struct __vxge_hw_ring *____cacheline_aligned ringh; 690 struct __vxge_hw_ring *____cacheline_aligned ringh;
687 struct __vxge_hw_fifo *____cacheline_aligned fifoh; 691 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
921 u32 doorbell_cnt; 925 u32 doorbell_cnt;
922 u32 total_db_cnt; 926 u32 total_db_cnt;
923 u64 rxds_limit; 927 u64 rxds_limit;
928 u32 rtimer;
929 u64 tim_rti_cfg1_saved;
930 u64 tim_rti_cfg3_saved;
924 931
925 enum vxge_hw_status (*callback)( 932 enum vxge_hw_status (*callback)(
926 struct __vxge_hw_ring *ringh, 933 struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
1000 u32 per_txdl_space; 1007 u32 per_txdl_space;
1001 u32 vp_id; 1008 u32 vp_id;
1002 u32 tx_intr_num; 1009 u32 tx_intr_num;
1010 u32 rtimer;
1011 u64 tim_tti_cfg1_saved;
1012 u64 tim_tti_cfg3_saved;
1003 1013
1004 enum vxge_hw_status (*callback)( 1014 enum vxge_hw_status (*callback)(
1005 struct __vxge_hw_fifo *fifo_handle, 1015 struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a6512c683..e40f619b62b1 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
371 struct vxge_hw_ring_rxd_info ext_info; 371 struct vxge_hw_ring_rxd_info ext_info;
372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
373 ring->ndev->name, __func__, __LINE__); 373 ring->ndev->name, __func__, __LINE__);
374 ring->pkts_processed = 0;
375
376 vxge_hw_ring_replenish(ringh);
377 374
378 do { 375 do {
379 prefetch((char *)dtr + L1_CACHE_BYTES); 376 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1588 return ret; 1585 return ret;
1589} 1586}
1590 1587
1588/* Configure CI */
1589static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1590{
1591 int i = 0;
1592
1593 /* Enable CI for RTI */
1594 if (vdev->config.intr_type == MSI_X) {
1595 for (i = 0; i < vdev->no_of_vpath; i++) {
1596 struct __vxge_hw_ring *hw_ring;
1597
1598 hw_ring = vdev->vpaths[i].ring.handle;
1599 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1600 }
1601 }
1602
1603 /* Enable CI for TTI */
1604 for (i = 0; i < vdev->no_of_vpath; i++) {
1605 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1606 vxge_hw_vpath_tti_ci_set(hw_fifo);
1607 /*
1608 * For Inta (with or without napi), Set CI ON for only one
1609 * vpath. (Have only one free running timer).
1610 */
1611 if ((vdev->config.intr_type == INTA) && (i == 0))
1612 break;
1613 }
1614
1615 return;
1616}
1617
1591static int do_vxge_reset(struct vxgedev *vdev, int event) 1618static int do_vxge_reset(struct vxgedev *vdev, int event)
1592{ 1619{
1593 enum vxge_hw_status status; 1620 enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1753 netif_tx_wake_all_queues(vdev->ndev); 1780 netif_tx_wake_all_queues(vdev->ndev);
1754 } 1781 }
1755 1782
1783 /* configure CI */
1784 vxge_config_ci_for_tti_rti(vdev);
1785
1756out: 1786out:
1757 vxge_debug_entryexit(VXGE_TRACE, 1787 vxge_debug_entryexit(VXGE_TRACE,
1758 "%s:%d Exiting...", __func__, __LINE__); 1788 "%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
1793 */ 1823 */
1794static int vxge_poll_msix(struct napi_struct *napi, int budget) 1824static int vxge_poll_msix(struct napi_struct *napi, int budget)
1795{ 1825{
1796 struct vxge_ring *ring = 1826 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1797 container_of(napi, struct vxge_ring, napi); 1827 int pkts_processed;
1798 int budget_org = budget; 1828 int budget_org = budget;
1799 ring->budget = budget;
1800 1829
1830 ring->budget = budget;
1831 ring->pkts_processed = 0;
1801 vxge_hw_vpath_poll_rx(ring->handle); 1832 vxge_hw_vpath_poll_rx(ring->handle);
1833 pkts_processed = ring->pkts_processed;
1802 1834
1803 if (ring->pkts_processed < budget_org) { 1835 if (ring->pkts_processed < budget_org) {
1804 napi_complete(napi); 1836 napi_complete(napi);
1837
1805 /* Re enable the Rx interrupts for the vpath */ 1838 /* Re enable the Rx interrupts for the vpath */
1806 vxge_hw_channel_msix_unmask( 1839 vxge_hw_channel_msix_unmask(
1807 (struct __vxge_hw_channel *)ring->handle, 1840 (struct __vxge_hw_channel *)ring->handle,
1808 ring->rx_vector_no); 1841 ring->rx_vector_no);
1842 mmiowb();
1809 } 1843 }
1810 1844
1811 return ring->pkts_processed; 1845 /* We are copying and returning the local variable, in case if after
1846 * clearing the msix interrupt above, if the interrupt fires right
1847 * away which can preempt this NAPI thread */
1848 return pkts_processed;
1812} 1849}
1813 1850
1814static int vxge_poll_inta(struct napi_struct *napi, int budget) 1851static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1824 for (i = 0; i < vdev->no_of_vpath; i++) { 1861 for (i = 0; i < vdev->no_of_vpath; i++) {
1825 ring = &vdev->vpaths[i].ring; 1862 ring = &vdev->vpaths[i].ring;
1826 ring->budget = budget; 1863 ring->budget = budget;
1864 ring->pkts_processed = 0;
1827 vxge_hw_vpath_poll_rx(ring->handle); 1865 vxge_hw_vpath_poll_rx(ring->handle);
1828 pkts_processed += ring->pkts_processed; 1866 pkts_processed += ring->pkts_processed;
1829 budget -= ring->pkts_processed; 1867 budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2054 netdev_get_tx_queue(vdev->ndev, 0); 2092 netdev_get_tx_queue(vdev->ndev, 0);
2055 vpath->fifo.indicate_max_pkts = 2093 vpath->fifo.indicate_max_pkts =
2056 vdev->config.fifo_indicate_max_pkts; 2094 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0;
2057 vpath->ring.rx_vector_no = 0; 2096 vpath->ring.rx_vector_no = 0;
2058 vpath->ring.rx_csum = vdev->rx_csum; 2097 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts; 2098 vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2079 return VXGE_HW_OK; 2118 return VXGE_HW_OK;
2080} 2119}
2081 2120
2121/**
2122 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2123 * if the interrupts are not within a range
2124 * @fifo: pointer to transmit fifo structure
2125 * Description: The function changes boundary timer and restriction timer
2126 * value depends on the traffic
2127 * Return Value: None
2128 */
2129static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2130{
2131 fifo->interrupt_count++;
2132 if (jiffies > fifo->jiffies + HZ / 100) {
2133 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2134
2135 fifo->jiffies = jiffies;
2136 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2137 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2138 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2139 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2140 } else if (hw_fifo->rtimer != 0) {
2141 hw_fifo->rtimer = 0;
2142 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2143 }
2144 fifo->interrupt_count = 0;
2145 }
2146}
2147
2148/**
2149 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2150 * if the interrupts are not within a range
2151 * @ring: pointer to receive ring structure
2152 * Description: The function increases of decreases the packet counts within
2153 * the ranges of traffic utilization, if the interrupts due to this ring are
2154 * not within a fixed range.
2155 * Return Value: Nothing
2156 */
2157static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2158{
2159 ring->interrupt_count++;
2160 if (jiffies > ring->jiffies + HZ / 100) {
2161 struct __vxge_hw_ring *hw_ring = ring->handle;
2162
2163 ring->jiffies = jiffies;
2164 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2165 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2166 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2167 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2168 } else if (hw_ring->rtimer != 0) {
2169 hw_ring->rtimer = 0;
2170 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2171 }
2172 ring->interrupt_count = 0;
2173 }
2174}
2175
2082/* 2176/*
2083 * vxge_isr_napi 2177 * vxge_isr_napi
2084 * @irq: the irq of the device. 2178 * @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2139 2233
2140#ifdef CONFIG_PCI_MSI 2234#ifdef CONFIG_PCI_MSI
2141 2235
2142static irqreturn_t 2236static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2143vxge_tx_msix_handle(int irq, void *dev_id)
2144{ 2237{
2145 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2238 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2146 2239
2240 adaptive_coalesce_tx_interrupts(fifo);
2241
2242 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2246 fifo->tx_vector_no);
2247
2147 VXGE_COMPLETE_VPATH_TX(fifo); 2248 VXGE_COMPLETE_VPATH_TX(fifo);
2148 2249
2250 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2251 fifo->tx_vector_no);
2252
2253 mmiowb();
2254
2149 return IRQ_HANDLED; 2255 return IRQ_HANDLED;
2150} 2256}
2151 2257
2152static irqreturn_t 2258static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2153vxge_rx_msix_napi_handle(int irq, void *dev_id)
2154{ 2259{
2155 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2260 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2156 2261
2157 /* MSIX_IDX for Rx is 1 */ 2262 adaptive_coalesce_rx_interrupts(ring);
2263
2158 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2264 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2159 ring->rx_vector_no); 2265 ring->rx_vector_no);
2266
2267 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2268 ring->rx_vector_no);
2160 2269
2161 napi_schedule(&ring->napi); 2270 napi_schedule(&ring->napi);
2162 return IRQ_HANDLED; 2271 return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2173 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2282 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2174 2283
2175 for (i = 0; i < vdev->no_of_vpath; i++) { 2284 for (i = 0; i < vdev->no_of_vpath; i++) {
2285 /* Reduce the chance of loosing alarm interrupts by masking
2286 * the vector. A pending bit will be set if an alarm is
2287 * generated and on unmask the interrupt will be fired.
2288 */
2176 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2289 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2290 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2291 mmiowb();
2177 2292
2178 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2293 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2179 vdev->exec_mode); 2294 vdev->exec_mode);
2180 if (status == VXGE_HW_OK) { 2295 if (status == VXGE_HW_OK) {
2181
2182 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2296 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2183 msix_id); 2297 msix_id);
2298 mmiowb();
2184 continue; 2299 continue;
2185 } 2300 }
2186 vxge_debug_intr(VXGE_ERR, 2301 vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2299 vpath->ring.rx_vector_no = (vpath->device_id * 2414 vpath->ring.rx_vector_no = (vpath->device_id *
2300 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2415 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2301 2416
2417 vpath->fifo.tx_vector_no = (vpath->device_id *
2418 VXGE_HW_VPATH_MSIX_ACTIVE);
2419
2302 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2420 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2303 VXGE_ALARM_MSIX_ID); 2421 VXGE_ALARM_MSIX_ID);
2304 } 2422 }
@@ -2474,8 +2592,9 @@ INTA_MODE:
2474 "%s:vxge:INTA", vdev->ndev->name); 2592 "%s:vxge:INTA", vdev->ndev->name);
2475 vxge_hw_device_set_intr_type(vdev->devh, 2593 vxge_hw_device_set_intr_type(vdev->devh,
2476 VXGE_HW_INTR_MODE_IRQLINE); 2594 VXGE_HW_INTR_MODE_IRQLINE);
2477 vxge_hw_vpath_tti_ci_set(vdev->devh, 2595
2478 vdev->vpaths[0].device_id); 2596 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2597
2479 ret = request_irq((int) vdev->pdev->irq, 2598 ret = request_irq((int) vdev->pdev->irq,
2480 vxge_isr_napi, 2599 vxge_isr_napi,
2481 IRQF_SHARED, vdev->desc[0], vdev); 2600 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
2745 } 2864 }
2746 2865
2747 netif_tx_start_all_queues(vdev->ndev); 2866 netif_tx_start_all_queues(vdev->ndev);
2867
2868 /* configure CI */
2869 vxge_config_ci_for_tti_rti(vdev);
2870
2748 goto out0; 2871 goto out0;
2749 2872
2750out2: 2873out2:
@@ -3348,7 +3471,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3348 vxge_debug_init(VXGE_ERR, 3471 vxge_debug_init(VXGE_ERR,
3349 "%s: vpath memory allocation failed", 3472 "%s: vpath memory allocation failed",
3350 vdev->ndev->name); 3473 vdev->ndev->name);
3351 ret = -ENODEV; 3474 ret = -ENOMEM;
3352 goto _out1; 3475 goto _out1;
3353 } 3476 }
3354 3477
@@ -3369,11 +3492,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3369 if (vdev->config.gro_enable) 3492 if (vdev->config.gro_enable)
3370 ndev->features |= NETIF_F_GRO; 3493 ndev->features |= NETIF_F_GRO;
3371 3494
3372 if (register_netdev(ndev)) { 3495 ret = register_netdev(ndev);
3496 if (ret) {
3373 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3497 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3374 "%s: %s : device registration failed!", 3498 "%s: %s : device registration failed!",
3375 ndev->name, __func__); 3499 ndev->name, __func__);
3376 ret = -ENODEV;
3377 goto _out2; 3500 goto _out2;
3378 } 3501 }
3379 3502
@@ -3444,6 +3567,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3444 /* in 2.6 will call stop() if device is up */ 3567 /* in 2.6 will call stop() if device is up */
3445 unregister_netdev(dev); 3568 unregister_netdev(dev);
3446 3569
3570 kfree(vdev->vpaths);
3571
3572 /* we are safe to free it now */
3573 free_netdev(dev);
3574
3447 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", 3575 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3448 buf); 3576 buf);
3449 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, 3577 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3927,7 @@ static void __devinit vxge_device_config_init(
3799 break; 3927 break;
3800 3928
3801 case MSI_X: 3929 case MSI_X:
3802 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3930 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3803 break; 3931 break;
3804 } 3932 }
3805 3933
@@ -4335,10 +4463,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4335 goto _exit1; 4463 goto _exit1;
4336 } 4464 }
4337 4465
4338 if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { 4466 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4467 if (ret) {
4339 vxge_debug_init(VXGE_ERR, 4468 vxge_debug_init(VXGE_ERR,
4340 "%s : request regions failed", __func__); 4469 "%s : request regions failed", __func__);
4341 ret = -ENODEV;
4342 goto _exit1; 4470 goto _exit1;
4343 } 4471 }
4344 4472
@@ -4446,7 +4574,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4446 if (!img[i].is_valid) 4574 if (!img[i].is_valid)
4447 break; 4575 break;
4448 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " 4576 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4449 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, 4577 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4450 VXGE_EPROM_IMG_MAJOR(img[i].version), 4578 VXGE_EPROM_IMG_MAJOR(img[i].version),
4451 VXGE_EPROM_IMG_MINOR(img[i].version), 4579 VXGE_EPROM_IMG_MINOR(img[i].version),
4452 VXGE_EPROM_IMG_FIX(img[i].version), 4580 VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4771,9 @@ _exit6:
4643_exit5: 4771_exit5:
4644 vxge_device_unregister(hldev); 4772 vxge_device_unregister(hldev);
4645_exit4: 4773_exit4:
4646 pci_disable_sriov(pdev); 4774 pci_set_drvdata(pdev, NULL);
4647 vxge_hw_device_terminate(hldev); 4775 vxge_hw_device_terminate(hldev);
4776 pci_disable_sriov(pdev);
4648_exit3: 4777_exit3:
4649 iounmap(attr.bar0); 4778 iounmap(attr.bar0);
4650_exit2: 4779_exit2:
@@ -4655,7 +4784,7 @@ _exit0:
4655 kfree(ll_config); 4784 kfree(ll_config);
4656 kfree(device_config); 4785 kfree(device_config);
4657 driver_config->config_dev_cnt--; 4786 driver_config->config_dev_cnt--;
4658 pci_set_drvdata(pdev, NULL); 4787 driver_config->total_dev_cnt--;
4659 return ret; 4788 return ret;
4660} 4789}
4661 4790
@@ -4668,45 +4797,34 @@ _exit0:
4668static void __devexit vxge_remove(struct pci_dev *pdev) 4797static void __devexit vxge_remove(struct pci_dev *pdev)
4669{ 4798{
4670 struct __vxge_hw_device *hldev; 4799 struct __vxge_hw_device *hldev;
4671 struct vxgedev *vdev = NULL; 4800 struct vxgedev *vdev;
4672 struct net_device *dev; 4801 int i;
4673 int i = 0;
4674 4802
4675 hldev = pci_get_drvdata(pdev); 4803 hldev = pci_get_drvdata(pdev);
4676
4677 if (hldev == NULL) 4804 if (hldev == NULL)
4678 return; 4805 return;
4679 4806
4680 dev = hldev->ndev; 4807 vdev = netdev_priv(hldev->ndev);
4681 vdev = netdev_priv(dev);
4682 4808
4683 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); 4809 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4684
4685 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", 4810 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4686 __func__); 4811 __func__);
4687 vxge_device_unregister(hldev);
4688 4812
4689 for (i = 0; i < vdev->no_of_vpath; i++) { 4813 for (i = 0; i < vdev->no_of_vpath; i++)
4690 vxge_free_mac_add_list(&vdev->vpaths[i]); 4814 vxge_free_mac_add_list(&vdev->vpaths[i]);
4691 vdev->vpaths[i].mcast_addr_cnt = 0;
4692 vdev->vpaths[i].mac_addr_cnt = 0;
4693 }
4694
4695 kfree(vdev->vpaths);
4696 4815
4816 vxge_device_unregister(hldev);
4817 pci_set_drvdata(pdev, NULL);
4818 /* Do not call pci_disable_sriov here, as it will break child devices */
4819 vxge_hw_device_terminate(hldev);
4697 iounmap(vdev->bar0); 4820 iounmap(vdev->bar0);
4698 4821 pci_release_region(pdev, 0);
4699 /* we are safe to free it now */ 4822 pci_disable_device(pdev);
4700 free_netdev(dev); 4823 driver_config->config_dev_cnt--;
4824 driver_config->total_dev_cnt--;
4701 4825
4702 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", 4826 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4703 __func__, __LINE__); 4827 __func__, __LINE__);
4704
4705 vxge_hw_device_terminate(hldev);
4706
4707 pci_disable_device(pdev);
4708 pci_release_region(pdev, 0);
4709 pci_set_drvdata(pdev, NULL);
4710 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, 4828 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4711 __LINE__); 4829 __LINE__);
4712} 4830}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356f..40474f0da576 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
59#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
60#define VXGE_T1A_TTI_LTIMER_VAL 80 60#define VXGE_T1A_TTI_LTIMER_VAL 80
61#define VXGE_TTI_RTIMER_VAL 0 61#define VXGE_TTI_RTIMER_VAL 0
62#define VXGE_TTI_RTIMER_ADAPT_VAL 10
62#define VXGE_T1A_TTI_RTIMER_VAL 400 63#define VXGE_T1A_TTI_RTIMER_VAL 400
63#define VXGE_RTI_BTIMER_VAL 250 64#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100 65#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0 66#define VXGE_RTI_RTIMER_VAL 0
66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 67#define VXGE_RTI_RTIMER_ADAPT_VAL 15
68#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
67#define VXGE_ISR_POLLING_CNT 8 69#define VXGE_ISR_POLLING_CNT 8
68#define VXGE_MAX_CONFIG_DEV 0xFF 70#define VXGE_MAX_CONFIG_DEV 0xFF
69#define VXGE_EXEC_MODE_DISABLE 0 71#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
107#define RTI_T1A_RX_UFC_C 50 109#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60 110#define RTI_T1A_RX_UFC_D 60
109 111
112/*
113 * The interrupt rate is maintained at 3k per second with the moderation
114 * parameters for most traffic but not all. This is the maximum interrupt
115 * count allowed per function with INTA or per vector in the case of
116 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
117 */
118#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
119#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
110 120
111/* Milli secs timer period */ 121/* Milli secs timer period */
112#define VXGE_TIMER_DELAY 10000 122#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
247 int tx_steering_type; 257 int tx_steering_type;
248 int indicate_max_pkts; 258 int indicate_max_pkts;
249 259
260 /* Adaptive interrupt moderation parameters used in T1A */
261 unsigned long interrupt_count;
262 unsigned long jiffies;
263
264 u32 tx_vector_no;
250 /* Tx stats */ 265 /* Tx stats */
251 struct vxge_fifo_stats stats; 266 struct vxge_fifo_stats stats;
252} ____cacheline_aligned; 267} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
271 */ 286 */
272 int driver_id; 287 int driver_id;
273 288
289 /* Adaptive interrupt moderation parameters used in T1A */
290 unsigned long interrupt_count;
291 unsigned long jiffies;
292
274 /* copy of the flag indicating whether rx_csum is to be used */ 293 /* copy of the flag indicating whether rx_csum is to be used */
275 u32 rx_csum:1, 294 u32 rx_csum:1,
276 rx_hwts:1; 295 rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
286 305
287 int vlan_tag_strip; 306 int vlan_tag_strip;
288 struct vlan_group *vlgrp; 307 struct vlan_group *vlgrp;
289 int rx_vector_no; 308 u32 rx_vector_no;
290 enum vxge_hw_status last_status; 309 enum vxge_hw_status last_status;
291 310
292 /* Rx stats */ 311 /* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075f..8674f331311c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
218 return status; 218 return status;
219} 219}
220 220
221void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
222{
223 struct vxge_hw_vpath_reg __iomem *vp_reg;
224 struct vxge_hw_vp_config *config;
225 u64 val64;
226
227 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
228 return;
229
230 vp_reg = fifo->vp_reg;
231 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
232
233 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
234 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
235 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
236 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
237 fifo->tim_tti_cfg1_saved = val64;
238 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
239 }
240}
241
242void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
243{
244 u64 val64 = ring->tim_rti_cfg1_saved;
245
246 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
247 ring->tim_rti_cfg1_saved = val64;
248 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
249}
250
251void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
252{
253 u64 val64 = fifo->tim_tti_cfg3_saved;
254 u64 timer = (fifo->rtimer * 1000) / 272;
255
256 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
257 if (timer)
258 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
259 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
260
261 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
262 /* tti_cfg3_saved is not updated again because it is
263 * initialized at one place only - init time.
264 */
265}
266
267void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
268{
269 u64 val64 = ring->tim_rti_cfg3_saved;
270 u64 timer = (ring->rtimer * 1000) / 272;
271
272 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
273 if (timer)
274 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
275 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
276
277 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
278 /* rti_cfg3_saved is not updated again because it is
279 * initialized at one place only - init time.
280 */
281}
282
221/** 283/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 284 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle 285 * @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254} 316}
255 317
256/** 318/**
319 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
320 * @channel: Channel for rx or tx handle
321 * @msix_id: MSI ID
322 *
323 * The function unmasks the msix interrupt for the given msix_id
324 * if configured in MSIX oneshot mode
325 *
326 * Returns: 0
327 */
328void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
329{
330 __vxge_hw_pio_mem_write32_upper(
331 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
332 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
333}
334
335/**
257 * vxge_hw_device_set_intr_type - Updates the configuration 336 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type. 337 * with new interrupt type.
259 * @hldev: HW device handle. 338 * @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2191 if (vpath->hldev->config.intr_mode == 2270 if (vpath->hldev->config.intr_mode ==
2192 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2271 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2193 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2272 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2273 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2274 0, 32), &vp_reg->one_shot_vect0_en);
2275 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2276 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2195 0, 32), &vp_reg->one_shot_vect1_en); 2277 0, 32), &vp_reg->one_shot_vect1_en);
2196 }
2197
2198 if (vpath->hldev->config.intr_mode ==
2199 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2200 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2278 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2201 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2279 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2202 0, 32), &vp_reg->one_shot_vect2_en); 2280 0, 32), &vp_reg->one_shot_vect2_en);
2203
2204 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2205 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2206 0, 32), &vp_reg->one_shot_vect3_en);
2207 } 2281 }
2208} 2282}
2209 2283
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2229} 2303}
2230 2304
2231/** 2305/**
2306 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2307 * @vp: Virtual Path handle.
2308 * @msix_id: MSI ID
2309 *
2310 * The function clears the msix interrupt for the given msix_id
2311 *
2312 * Returns: 0,
2313 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2314 * status.
2315 * See also:
2316 */
2317void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2318{
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320
2321 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322 __vxge_hw_pio_mem_write32_upper(
2323 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325 else
2326 __vxge_hw_pio_mem_write32_upper(
2327 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329}
2330
2331/**
2232 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2332 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233 * @vp: Virtual Path handle. 2333 * @vp: Virtual Path handle.
2234 * @msix_id: MSI ID 2334 * @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa1..9d9dfda4c7ab 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
2142 * Virtual Paths 2142 * Virtual Paths
2143 */ 2143 */
2144 2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2145u32 vxge_hw_vpath_id( 2149u32 vxge_hw_vpath_id(
2146 struct __vxge_hw_vpath_handle *vpath_handle); 2150 struct __vxge_hw_vpath_handle *vpath_handle);
2147 2151
@@ -2245,6 +2249,8 @@ void
2245vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, 2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int msix_id); 2250 int msix_id);
2247 2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2248void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2249 2255
2250void 2256void
@@ -2270,6 +2276,9 @@ void
2270vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2271 2277
2272void 2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2273vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2274 void **dtrh); 2283 void **dtrh);
2275 2284
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2282int 2291int
2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284 2293
2285void 2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2287 2297
2288#endif 2298#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b9bcf3..581e21525e85 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "1" 19#define VXGE_VERSION_FIX "2"
20#define VXGE_VERSION_BUILD "22082" 20#define VXGE_VERSION_BUILD "22259"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22 22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) 23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 5b6932c2193a..166e9f742596 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
402} 402}
403#endif 403#endif
404 404
405/**
406 * iwl3945_good_plcp_health - checks for plcp error.
407 *
408 * When the plcp error is exceeding the thresholds, reset the radio
409 * to improve the throughput.
410 */
411static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
412 struct iwl_rx_packet *pkt)
413{
414 bool rc = true;
415 struct iwl3945_notif_statistics current_stat;
416 int combined_plcp_delta;
417 unsigned int plcp_msec;
418 unsigned long plcp_received_jiffies;
419
420 if (priv->cfg->base_params->plcp_delta_threshold ==
421 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
422 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
423 return rc;
424 }
425 memcpy(&current_stat, pkt->u.raw, sizeof(struct
426 iwl3945_notif_statistics));
427 /*
428 * check for plcp_err and trigger radio reset if it exceeds
429 * the plcp error threshold plcp_delta.
430 */
431 plcp_received_jiffies = jiffies;
432 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
433 (long) priv->plcp_jiffies);
434 priv->plcp_jiffies = plcp_received_jiffies;
435 /*
436 * check to make sure plcp_msec is not 0 to prevent division
437 * by zero.
438 */
439 if (plcp_msec) {
440 combined_plcp_delta =
441 (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
442 le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
443
444 if ((combined_plcp_delta > 0) &&
445 ((combined_plcp_delta * 100) / plcp_msec) >
446 priv->cfg->base_params->plcp_delta_threshold) {
447 /*
448 * if plcp_err exceed the threshold, the following
449 * data is printed in csv format:
450 * Text: plcp_err exceeded %d,
451 * Received ofdm.plcp_err,
452 * Current ofdm.plcp_err,
453 * combined_plcp_delta,
454 * plcp_msec
455 */
456 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
457 "%u, %d, %u mSecs\n",
458 priv->cfg->base_params->plcp_delta_threshold,
459 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
460 combined_plcp_delta, plcp_msec);
461 /*
462 * Reset the RF radio due to the high plcp
463 * error rate
464 */
465 rc = false;
466 }
467 }
468 return rc;
469}
470
471void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 405void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
472 struct iwl_rx_mem_buffer *rxb) 406 struct iwl_rx_mem_buffer *rxb)
473{ 407{
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de5749824..da1f12120346 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123
124 /* Statistics */
125 int rx_gso_checksum_fixup;
123}; 126};
124 127
125struct netfront_rx_info { 128struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
770 return cons; 773 return cons;
771} 774}
772 775
773static int skb_checksum_setup(struct sk_buff *skb) 776static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
774{ 777{
775 struct iphdr *iph; 778 struct iphdr *iph;
776 unsigned char *th; 779 unsigned char *th;
777 int err = -EPROTO; 780 int err = -EPROTO;
781 int recalculate_partial_csum = 0;
782
783 /*
784 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
785 * peers can fail to set NETRXF_csum_blank when sending a GSO
786 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
787 * recalculate the partial checksum.
788 */
789 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
790 struct netfront_info *np = netdev_priv(dev);
791 np->rx_gso_checksum_fixup++;
792 skb->ip_summed = CHECKSUM_PARTIAL;
793 recalculate_partial_csum = 1;
794 }
795
796 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
797 if (skb->ip_summed != CHECKSUM_PARTIAL)
798 return 0;
778 799
779 if (skb->protocol != htons(ETH_P_IP)) 800 if (skb->protocol != htons(ETH_P_IP))
780 goto out; 801 goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
788 switch (iph->protocol) { 809 switch (iph->protocol) {
789 case IPPROTO_TCP: 810 case IPPROTO_TCP:
790 skb->csum_offset = offsetof(struct tcphdr, check); 811 skb->csum_offset = offsetof(struct tcphdr, check);
812
813 if (recalculate_partial_csum) {
814 struct tcphdr *tcph = (struct tcphdr *)th;
815 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
816 skb->len - iph->ihl*4,
817 IPPROTO_TCP, 0);
818 }
791 break; 819 break;
792 case IPPROTO_UDP: 820 case IPPROTO_UDP:
793 skb->csum_offset = offsetof(struct udphdr, check); 821 skb->csum_offset = offsetof(struct udphdr, check);
822
823 if (recalculate_partial_csum) {
824 struct udphdr *udph = (struct udphdr *)th;
825 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
826 skb->len - iph->ihl*4,
827 IPPROTO_UDP, 0);
828 }
794 break; 829 break;
795 default: 830 default:
796 if (net_ratelimit()) 831 if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
829 /* Ethernet work: Delayed to here as it peeks the header. */ 864 /* Ethernet work: Delayed to here as it peeks the header. */
830 skb->protocol = eth_type_trans(skb, dev); 865 skb->protocol = eth_type_trans(skb, dev);
831 866
832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 if (checksum_setup(dev, skb)) {
833 if (skb_checksum_setup(skb)) { 868 kfree_skb(skb);
834 kfree_skb(skb); 869 packets_dropped++;
835 packets_dropped++; 870 dev->stats.rx_errors++;
836 dev->stats.rx_errors++; 871 continue;
837 continue;
838 }
839 } 872 }
840 873
841 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
1632 } 1665 }
1633} 1666}
1634 1667
1668static const struct xennet_stat {
1669 char name[ETH_GSTRING_LEN];
1670 u16 offset;
1671} xennet_stats[] = {
1672 {
1673 "rx_gso_checksum_fixup",
1674 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1675 },
1676};
1677
1678static int xennet_get_sset_count(struct net_device *dev, int string_set)
1679{
1680 switch (string_set) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(xennet_stats);
1683 default:
1684 return -EINVAL;
1685 }
1686}
1687
1688static void xennet_get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 * data)
1690{
1691 void *np = netdev_priv(dev);
1692 int i;
1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(int *)(np + xennet_stats[i].offset);
1696}
1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1699{
1700 int i;
1701
1702 switch (stringset) {
1703 case ETH_SS_STATS:
1704 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1705 memcpy(data + i * ETH_GSTRING_LEN,
1706 xennet_stats[i].name, ETH_GSTRING_LEN);
1707 break;
1708 }
1709}
1710
1635static const struct ethtool_ops xennet_ethtool_ops = 1711static const struct ethtool_ops xennet_ethtool_ops =
1636{ 1712{
1637 .set_tx_csum = ethtool_op_set_tx_csum, 1713 .set_tx_csum = ethtool_op_set_tx_csum,
1638 .set_sg = xennet_set_sg, 1714 .set_sg = xennet_set_sg,
1639 .set_tso = xennet_set_tso, 1715 .set_tso = xennet_set_tso,
1640 .get_link = ethtool_op_get_link, 1716 .get_link = ethtool_op_get_link,
1717
1718 .get_sset_count = xennet_get_sset_count,
1719 .get_ethtool_stats = xennet_get_ethtool_stats,
1720 .get_strings = xennet_get_strings,
1641}; 1721};
1642 1722
1643#ifdef CONFIG_SYSFS 1723#ifdef CONFIG_SYSFS